2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
9 * a) Redistributions of source code must retain the above copyright notice,
10 * this list of conditions and the following disclaimer.
12 * b) Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the distribution.
16 * c) Neither the name of Cisco Systems, Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_var.h>
38 #include <netinet/sctp_sysctl.h>
39 #include <netinet/sctp_pcb.h>
40 #include <netinet/sctp_header.h>
41 #include <netinet/sctputil.h>
42 #include <netinet/sctp_output.h>
43 #include <netinet/sctp_input.h>
44 #include <netinet/sctp_indata.h>
45 #include <netinet/sctp_uio.h>
46 #include <netinet/sctp_timer.h>
50 * NOTES: On the outbound side of things I need to check the sack timer to
51 * see if I should generate a sack into the chunk queue (if I have data to
52 * send that is and will be sending it .. for bundling.
54 * The callback in sctp_usrreq.c will get called when the socket is read from.
55 * This will cause sctp_service_queues() to get called on the top entry in
60 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
62 asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
65 /* Calculate what the rwnd would be */
67 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
72 * This is really set wrong with respect to a 1-2-m socket. Since
73 * the sb_cc is the count that everyone as put up. When we re-write
74 * sctp_soreceive then we will fix this so that ONLY this
75 * associations data is taken into account.
77 if (stcb->sctp_socket == NULL)
80 if (stcb->asoc.sb_cc == 0 &&
81 asoc->size_on_reasm_queue == 0 &&
82 asoc->size_on_all_streams == 0) {
83 /* Full rwnd granted */
84 calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
87 /* get actual space */
88 calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
91 * take out what has NOT been put on socket queue and we yet hold
94 calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_reasm_queue +
95 asoc->cnt_on_reasm_queue * MSIZE));
96 calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_all_streams +
97 asoc->cnt_on_all_streams * MSIZE));
103 /* what is the overhead of all these rwnd's */
104 calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
106 * If the window gets too small due to ctrl-stuff, reduce it to 1,
107 * even it is 0. SWS engaged
109 if (calc < stcb->asoc.my_rwnd_control_len) {
118 * Build out our readq entry based on the incoming packet.
120 struct sctp_queued_to_read *
121 sctp_build_readq_entry(struct sctp_tcb *stcb,
122 struct sctp_nets *net,
123 uint32_t tsn, uint32_t ppid,
124 uint32_t context, uint16_t stream_no,
125 uint16_t stream_seq, uint8_t flags,
128 struct sctp_queued_to_read *read_queue_e = NULL;
130 sctp_alloc_a_readq(stcb, read_queue_e);
131 if (read_queue_e == NULL) {
134 read_queue_e->sinfo_stream = stream_no;
135 read_queue_e->sinfo_ssn = stream_seq;
136 read_queue_e->sinfo_flags = (flags << 8);
137 read_queue_e->sinfo_ppid = ppid;
138 read_queue_e->sinfo_context = context;
139 read_queue_e->sinfo_timetolive = 0;
140 read_queue_e->sinfo_tsn = tsn;
141 read_queue_e->sinfo_cumtsn = tsn;
142 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
143 read_queue_e->whoFrom = net;
144 read_queue_e->length = 0;
145 atomic_add_int(&net->ref_count, 1);
146 read_queue_e->data = dm;
147 read_queue_e->spec_flags = 0;
148 read_queue_e->tail_mbuf = NULL;
149 read_queue_e->aux_data = NULL;
150 read_queue_e->stcb = stcb;
151 read_queue_e->port_from = stcb->rport;
152 read_queue_e->do_not_ref_stcb = 0;
153 read_queue_e->end_added = 0;
154 read_queue_e->some_taken = 0;
155 read_queue_e->pdapi_aborted = 0;
157 return (read_queue_e);
162 * Build out our readq entry based on the incoming packet.
164 static struct sctp_queued_to_read *
165 sctp_build_readq_entry_chk(struct sctp_tcb *stcb,
166 struct sctp_tmit_chunk *chk)
168 struct sctp_queued_to_read *read_queue_e = NULL;
170 sctp_alloc_a_readq(stcb, read_queue_e);
171 if (read_queue_e == NULL) {
174 read_queue_e->sinfo_stream = chk->rec.data.stream_number;
175 read_queue_e->sinfo_ssn = chk->rec.data.stream_seq;
176 read_queue_e->sinfo_flags = (chk->rec.data.rcv_flags << 8);
177 read_queue_e->sinfo_ppid = chk->rec.data.payloadtype;
178 read_queue_e->sinfo_context = stcb->asoc.context;
179 read_queue_e->sinfo_timetolive = 0;
180 read_queue_e->sinfo_tsn = chk->rec.data.TSN_seq;
181 read_queue_e->sinfo_cumtsn = chk->rec.data.TSN_seq;
182 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
183 read_queue_e->whoFrom = chk->whoTo;
184 read_queue_e->aux_data = NULL;
185 read_queue_e->length = 0;
186 atomic_add_int(&chk->whoTo->ref_count, 1);
187 read_queue_e->data = chk->data;
188 read_queue_e->tail_mbuf = NULL;
189 read_queue_e->stcb = stcb;
190 read_queue_e->port_from = stcb->rport;
191 read_queue_e->spec_flags = 0;
192 read_queue_e->do_not_ref_stcb = 0;
193 read_queue_e->end_added = 0;
194 read_queue_e->some_taken = 0;
195 read_queue_e->pdapi_aborted = 0;
197 return (read_queue_e);
202 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
204 struct sctp_extrcvinfo *seinfo;
205 struct sctp_sndrcvinfo *outinfo;
206 struct sctp_rcvinfo *rcvinfo;
207 struct sctp_nxtinfo *nxtinfo;
214 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
215 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
216 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
217 /* user does not want any ancillary data */
221 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
222 len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
224 seinfo = (struct sctp_extrcvinfo *)sinfo;
225 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
226 (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
228 len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
232 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
233 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
235 len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
238 len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
244 ret = sctp_get_mbuf_for_msg(len, 0, M_DONTWAIT, 1, MT_DATA);
249 SCTP_BUF_LEN(ret) = 0;
251 /* We need a CMSG header followed by the struct */
252 cmh = mtod(ret, struct cmsghdr *);
254 * Make sure that there is no un-initialized padding between the
255 * cmsg header and cmsg data and after the cmsg data.
258 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
259 cmh->cmsg_level = IPPROTO_SCTP;
260 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
261 cmh->cmsg_type = SCTP_RCVINFO;
262 rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
263 rcvinfo->rcv_sid = sinfo->sinfo_stream;
264 rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
265 rcvinfo->rcv_flags = sinfo->sinfo_flags;
266 rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
267 rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
268 rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
269 rcvinfo->rcv_context = sinfo->sinfo_context;
270 rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
271 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
272 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
275 cmh->cmsg_level = IPPROTO_SCTP;
276 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
277 cmh->cmsg_type = SCTP_NXTINFO;
278 nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
279 nxtinfo->nxt_sid = seinfo->sreinfo_next_stream;
280 nxtinfo->nxt_flags = 0;
281 if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
282 nxtinfo->nxt_flags |= SCTP_UNORDERED;
284 if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
285 nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
287 if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
288 nxtinfo->nxt_flags |= SCTP_COMPLETE;
290 nxtinfo->nxt_ppid = seinfo->sreinfo_next_ppid;
291 nxtinfo->nxt_length = seinfo->sreinfo_next_length;
292 nxtinfo->nxt_assoc_id = seinfo->sreinfo_next_aid;
293 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
294 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
296 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
297 cmh->cmsg_level = IPPROTO_SCTP;
298 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
300 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
301 cmh->cmsg_type = SCTP_EXTRCV;
302 memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
303 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
305 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
306 cmh->cmsg_type = SCTP_SNDRCV;
308 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
316 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
318 uint32_t gap, i, cumackp1;
321 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
324 cumackp1 = asoc->cumulative_tsn + 1;
325 if (SCTP_TSN_GT(cumackp1, tsn)) {
327 * this tsn is behind the cum ack and thus we don't need to
328 * worry about it being moved from one to the other.
332 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
333 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
334 SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn);
335 sctp_print_mapping_array(asoc);
337 panic("Things are really messed up now!!");
340 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
341 SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
342 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
343 asoc->highest_tsn_inside_nr_map = tsn;
345 if (tsn == asoc->highest_tsn_inside_map) {
346 /* We must back down to see what the new highest is */
347 for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
348 SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
349 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
350 asoc->highest_tsn_inside_map = i;
356 asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
363 * We are delivering currently from the reassembly queue. We must continue to
364 * deliver until we either: 1) run out of space. 2) run out of sequential
365 * TSN's 3) hit the SCTP_DATA_LAST_FRAG flag.
368 sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc)
370 struct sctp_tmit_chunk *chk, *nchk;
375 struct sctp_queued_to_read *control, *ctl, *nctl;
380 cntDel = stream_no = 0;
381 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
382 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) ||
383 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
384 /* socket above is long gone or going.. */
386 asoc->fragmented_delivery_inprogress = 0;
387 TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
388 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
389 asoc->size_on_reasm_queue -= chk->send_size;
390 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
392 * Lose the data pointer, since its in the socket
396 sctp_m_freem(chk->data);
399 /* Now free the address and data */
400 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
401 /* sa_ignore FREED_MEMORY */
405 SCTP_TCB_LOCK_ASSERT(stcb);
406 TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
407 if (chk->rec.data.TSN_seq != (asoc->tsn_last_delivered + 1)) {
408 /* Can't deliver more :< */
411 stream_no = chk->rec.data.stream_number;
412 nxt_todel = asoc->strmin[stream_no].last_sequence_delivered + 1;
413 if (nxt_todel != chk->rec.data.stream_seq &&
414 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
416 * Not the next sequence to deliver in its stream OR
421 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
423 control = sctp_build_readq_entry_chk(stcb, chk);
424 if (control == NULL) {
428 /* save it off for our future deliveries */
429 stcb->asoc.control_pdapi = control;
430 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
434 sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
435 sctp_add_to_readq(stcb->sctp_ep,
436 stcb, control, &stcb->sctp_socket->so_rcv, end,
437 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
440 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
444 sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
445 if (sctp_append_to_readq(stcb->sctp_ep, stcb,
446 stcb->asoc.control_pdapi,
447 chk->data, end, chk->rec.data.TSN_seq,
448 &stcb->sctp_socket->so_rcv)) {
450 * something is very wrong, either
451 * control_pdapi is NULL, or the tail_mbuf
452 * is corrupt, or there is a EOM already on
455 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
459 if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
460 panic("This should not happen control_pdapi NULL?");
462 /* if we did not panic, it was a EOM */
463 panic("Bad chunking ??");
465 if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
466 SCTP_PRINTF("This should not happen control_pdapi NULL?\n");
468 SCTP_PRINTF("Bad chunking ??\n");
469 SCTP_PRINTF("Dumping re-assembly queue this will probably hose the association\n");
477 /* pull it we did it */
478 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
479 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
480 asoc->fragmented_delivery_inprogress = 0;
481 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
482 asoc->strmin[stream_no].last_sequence_delivered++;
484 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
485 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
487 } else if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
489 * turn the flag back on since we just delivered
492 asoc->fragmented_delivery_inprogress = 1;
494 asoc->tsn_of_pdapi_last_delivered = chk->rec.data.TSN_seq;
495 asoc->last_flags_delivered = chk->rec.data.rcv_flags;
496 asoc->last_strm_seq_delivered = chk->rec.data.stream_seq;
497 asoc->last_strm_no_delivered = chk->rec.data.stream_number;
499 asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
500 asoc->size_on_reasm_queue -= chk->send_size;
501 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
502 /* free up the chk */
504 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
506 if (asoc->fragmented_delivery_inprogress == 0) {
508 * Now lets see if we can deliver the next one on
511 struct sctp_stream_in *strm;
513 strm = &asoc->strmin[stream_no];
514 nxt_todel = strm->last_sequence_delivered + 1;
515 TAILQ_FOREACH_SAFE(ctl, &strm->inqueue, next, nctl) {
516 /* Deliver more if we can. */
517 if (nxt_todel == ctl->sinfo_ssn) {
518 TAILQ_REMOVE(&strm->inqueue, ctl, next);
519 asoc->size_on_all_streams -= ctl->length;
520 sctp_ucount_decr(asoc->cnt_on_all_streams);
521 strm->last_sequence_delivered++;
522 sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
523 sctp_add_to_readq(stcb->sctp_ep, stcb,
525 &stcb->sctp_socket->so_rcv, 1,
526 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
530 nxt_todel = strm->last_sequence_delivered + 1;
538 * Queue the chunk either right into the socket buffer if it is the next one
539 * to go OR put it in the correct place in the delivery queue. If we do
540 * append to the so_buf, keep doing so until we are out of order. One big
541 * question still remains, what to do when the socket buffer is FULL??
544 sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc,
545 struct sctp_queued_to_read *control, int *abort_flag)
548 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
549 * all the data in one stream this could happen quite rapidly. One
550 * could use the TSN to keep track of things, but this scheme breaks
551 * down in the other type of stream useage that could occur. Send a
552 * single msg to stream 0, send 4Billion messages to stream 1, now
553 * send a message to stream 0. You have a situation where the TSN
554 * has wrapped but not in the stream. Is this worth worrying about
555 * or should we just change our queue sort at the bottom to be by
558 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2
559 * with TSN 1? If the peer is doing some sort of funky TSN/SSN
560 * assignment this could happen... and I don't see how this would be
561 * a violation. So for now I am undecided an will leave the sort by
562 * SSN alone. Maybe a hybred approach is the answer
565 struct sctp_stream_in *strm;
566 struct sctp_queued_to_read *at;
572 asoc->size_on_all_streams += control->length;
573 sctp_ucount_incr(asoc->cnt_on_all_streams);
574 strm = &asoc->strmin[control->sinfo_stream];
575 nxt_todel = strm->last_sequence_delivered + 1;
576 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
577 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
579 SCTPDBG(SCTP_DEBUG_INDATA1,
580 "queue to stream called for ssn:%u lastdel:%u nxt:%u\n",
581 (uint32_t) control->sinfo_stream,
582 (uint32_t) strm->last_sequence_delivered,
583 (uint32_t) nxt_todel);
584 if (SCTP_SSN_GE(strm->last_sequence_delivered, control->sinfo_ssn)) {
585 /* The incoming sseq is behind where we last delivered? */
586 SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ:%d delivered:%d from peer, Abort association\n",
587 control->sinfo_ssn, strm->last_sequence_delivered);
590 * throw it in the stream so it gets cleaned up in
591 * association destruction
593 TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
594 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
595 0, M_DONTWAIT, 1, MT_DATA);
597 struct sctp_paramhdr *ph;
600 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
601 (sizeof(uint32_t) * 3);
602 ph = mtod(oper, struct sctp_paramhdr *);
603 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
604 ph->param_length = htons(SCTP_BUF_LEN(oper));
605 ippp = (uint32_t *) (ph + 1);
606 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_1);
608 *ippp = control->sinfo_tsn;
610 *ippp = ((control->sinfo_stream << 16) | control->sinfo_ssn);
612 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
613 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
618 if (nxt_todel == control->sinfo_ssn) {
619 /* can be delivered right away? */
620 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
621 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
623 /* EY it wont be queued if it could be delivered directly */
625 asoc->size_on_all_streams -= control->length;
626 sctp_ucount_decr(asoc->cnt_on_all_streams);
627 strm->last_sequence_delivered++;
629 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
630 sctp_add_to_readq(stcb->sctp_ep, stcb,
632 &stcb->sctp_socket->so_rcv, 1,
633 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
634 TAILQ_FOREACH_SAFE(control, &strm->inqueue, next, at) {
636 nxt_todel = strm->last_sequence_delivered + 1;
637 if (nxt_todel == control->sinfo_ssn) {
638 TAILQ_REMOVE(&strm->inqueue, control, next);
639 asoc->size_on_all_streams -= control->length;
640 sctp_ucount_decr(asoc->cnt_on_all_streams);
641 strm->last_sequence_delivered++;
643 * We ignore the return of deliver_data here
644 * since we always can hold the chunk on the
645 * d-queue. And we have a finite number that
646 * can be delivered from the strq.
648 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
649 sctp_log_strm_del(control, NULL,
650 SCTP_STR_LOG_FROM_IMMED_DEL);
652 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
653 sctp_add_to_readq(stcb->sctp_ep, stcb,
655 &stcb->sctp_socket->so_rcv, 1,
656 SCTP_READ_LOCK_NOT_HELD,
665 * Ok, we did not deliver this guy, find the correct place
666 * to put it on the queue.
668 if (SCTP_TSN_GE(asoc->cumulative_tsn, control->sinfo_tsn)) {
671 if (TAILQ_EMPTY(&strm->inqueue)) {
673 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
674 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INSERT_HD);
676 TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
678 TAILQ_FOREACH(at, &strm->inqueue, next) {
679 if (SCTP_SSN_GT(at->sinfo_ssn, control->sinfo_ssn)) {
681 * one in queue is bigger than the
682 * new one, insert before this one
684 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
685 sctp_log_strm_del(control, at,
686 SCTP_STR_LOG_FROM_INSERT_MD);
688 TAILQ_INSERT_BEFORE(at, control, next);
690 } else if (at->sinfo_ssn == control->sinfo_ssn) {
692 * Gak, He sent me a duplicate str
696 * foo bar, I guess I will just free
697 * this new guy, should we abort
698 * too? FIX ME MAYBE? Or it COULD be
699 * that the SSN's have wrapped.
700 * Maybe I should compare to TSN
701 * somehow... sigh for now just blow
706 sctp_m_freem(control->data);
707 control->data = NULL;
708 asoc->size_on_all_streams -= control->length;
709 sctp_ucount_decr(asoc->cnt_on_all_streams);
710 if (control->whoFrom) {
711 sctp_free_remote_addr(control->whoFrom);
712 control->whoFrom = NULL;
714 sctp_free_a_readq(stcb, control);
717 if (TAILQ_NEXT(at, next) == NULL) {
719 * We are at the end, insert
722 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
723 sctp_log_strm_del(control, at,
724 SCTP_STR_LOG_FROM_INSERT_TL);
726 TAILQ_INSERT_AFTER(&strm->inqueue,
737 * Returns two things: You get the total size of the deliverable parts of the
738 * first fragmented message on the reassembly queue. And you get a 1 back if
739 * all of the message is ready or a 0 back if the message is still incomplete
742 sctp_is_all_msg_on_reasm(struct sctp_association *asoc, uint32_t * t_size)
744 struct sctp_tmit_chunk *chk;
748 chk = TAILQ_FIRST(&asoc->reasmqueue);
750 /* nothing on the queue */
753 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
754 /* Not a first on the queue */
757 tsn = chk->rec.data.TSN_seq;
758 TAILQ_FOREACH(chk, &asoc->reasmqueue, sctp_next) {
759 if (tsn != chk->rec.data.TSN_seq) {
762 *t_size += chk->send_size;
763 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
772 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc)
774 struct sctp_tmit_chunk *chk;
776 uint32_t tsize, pd_point;
779 chk = TAILQ_FIRST(&asoc->reasmqueue);
782 asoc->size_on_reasm_queue = 0;
783 asoc->cnt_on_reasm_queue = 0;
786 if (asoc->fragmented_delivery_inprogress == 0) {
788 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
789 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
790 (nxt_todel == chk->rec.data.stream_seq ||
791 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
793 * Yep the first one is here and its ok to deliver
796 if (stcb->sctp_socket) {
797 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket),
798 stcb->sctp_ep->partial_delivery_point);
800 pd_point = stcb->sctp_ep->partial_delivery_point;
802 if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
805 * Yes, we setup to start reception, by
806 * backing down the TSN just in case we
807 * can't deliver. If we
809 asoc->fragmented_delivery_inprogress = 1;
810 asoc->tsn_last_delivered =
811 chk->rec.data.TSN_seq - 1;
813 chk->rec.data.stream_number;
814 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
815 asoc->pdapi_ppid = chk->rec.data.payloadtype;
816 asoc->fragment_flags = chk->rec.data.rcv_flags;
817 sctp_service_reassembly(stcb, asoc);
822 * Service re-assembly will deliver stream data queued at
823 * the end of fragmented delivery.. but it wont know to go
824 * back and call itself again... we do that here with the
827 sctp_service_reassembly(stcb, asoc);
828 if (asoc->fragmented_delivery_inprogress == 0) {
830 * finished our Fragmented delivery, could be more
839 * Dump onto the re-assembly queue, in its proper place. After dumping on the
840 * queue, see if anthing can be delivered. If so pull it off (or as much as
841 * we can. If we run out of space then we must dump what we can and set the
842 * appropriate flag to say we queued what we could.
845 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
846 struct sctp_tmit_chunk *chk, int *abort_flag)
849 uint32_t cum_ackp1, prev_tsn, post_tsn;
850 struct sctp_tmit_chunk *at, *prev, *next;
853 cum_ackp1 = asoc->tsn_last_delivered + 1;
854 if (TAILQ_EMPTY(&asoc->reasmqueue)) {
855 /* This is the first one on the queue */
856 TAILQ_INSERT_HEAD(&asoc->reasmqueue, chk, sctp_next);
858 * we do not check for delivery of anything when only one
861 asoc->size_on_reasm_queue = chk->send_size;
862 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
863 if (chk->rec.data.TSN_seq == cum_ackp1) {
864 if (asoc->fragmented_delivery_inprogress == 0 &&
865 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) !=
866 SCTP_DATA_FIRST_FRAG) {
868 * An empty queue, no delivery inprogress,
869 * we hit the next one and it does NOT have
870 * a FIRST fragment mark.
872 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not first, no fragmented delivery in progress\n");
873 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
874 0, M_DONTWAIT, 1, MT_DATA);
877 struct sctp_paramhdr *ph;
881 sizeof(struct sctp_paramhdr) +
882 (sizeof(uint32_t) * 3);
883 ph = mtod(oper, struct sctp_paramhdr *);
885 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
886 ph->param_length = htons(SCTP_BUF_LEN(oper));
887 ippp = (uint32_t *) (ph + 1);
888 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_2);
890 *ippp = chk->rec.data.TSN_seq;
892 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
895 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
896 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
898 } else if (asoc->fragmented_delivery_inprogress &&
899 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
901 * We are doing a partial delivery and the
902 * NEXT chunk MUST be either the LAST or
903 * MIDDLE fragment NOT a FIRST
905 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS a first and fragmented delivery in progress\n");
906 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
907 0, M_DONTWAIT, 1, MT_DATA);
909 struct sctp_paramhdr *ph;
913 sizeof(struct sctp_paramhdr) +
914 (3 * sizeof(uint32_t));
915 ph = mtod(oper, struct sctp_paramhdr *);
917 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
918 ph->param_length = htons(SCTP_BUF_LEN(oper));
919 ippp = (uint32_t *) (ph + 1);
920 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_3);
922 *ippp = chk->rec.data.TSN_seq;
924 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
926 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
927 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
929 } else if (asoc->fragmented_delivery_inprogress) {
931 * Here we are ok with a MIDDLE or LAST
934 if (chk->rec.data.stream_number !=
935 asoc->str_of_pdapi) {
936 /* Got to be the right STR No */
937 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream number %d vs %d\n",
938 chk->rec.data.stream_number,
940 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
941 0, M_DONTWAIT, 1, MT_DATA);
943 struct sctp_paramhdr *ph;
947 sizeof(struct sctp_paramhdr) +
948 (sizeof(uint32_t) * 3);
950 struct sctp_paramhdr *);
952 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
954 htons(SCTP_BUF_LEN(oper));
955 ippp = (uint32_t *) (ph + 1);
956 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
958 *ippp = chk->rec.data.TSN_seq;
960 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
962 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_4;
963 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
965 } else if ((asoc->fragment_flags & SCTP_DATA_UNORDERED) !=
966 SCTP_DATA_UNORDERED &&
967 chk->rec.data.stream_seq != asoc->ssn_of_pdapi) {
968 /* Got to be the right STR Seq */
969 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream seq %d vs %d\n",
970 chk->rec.data.stream_seq,
972 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
973 0, M_DONTWAIT, 1, MT_DATA);
975 struct sctp_paramhdr *ph;
979 sizeof(struct sctp_paramhdr) +
980 (3 * sizeof(uint32_t));
982 struct sctp_paramhdr *);
984 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
986 htons(SCTP_BUF_LEN(oper));
987 ippp = (uint32_t *) (ph + 1);
988 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
990 *ippp = chk->rec.data.TSN_seq;
992 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
995 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_5;
996 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1003 /* Find its place */
1004 TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1005 if (SCTP_TSN_GT(at->rec.data.TSN_seq, chk->rec.data.TSN_seq)) {
1007 * one in queue is bigger than the new one, insert
1011 asoc->size_on_reasm_queue += chk->send_size;
1012 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1014 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1016 } else if (at->rec.data.TSN_seq == chk->rec.data.TSN_seq) {
1017 /* Gak, He sent me a duplicate str seq number */
1019 * foo bar, I guess I will just free this new guy,
1020 * should we abort too? FIX ME MAYBE? Or it COULD be
1021 * that the SSN's have wrapped. Maybe I should
1022 * compare to TSN somehow... sigh for now just blow
1026 sctp_m_freem(chk->data);
1029 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1033 if (TAILQ_NEXT(at, sctp_next) == NULL) {
1035 * We are at the end, insert it after this
1038 /* check it first */
1039 asoc->size_on_reasm_queue += chk->send_size;
1040 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1041 TAILQ_INSERT_AFTER(&asoc->reasmqueue, at, chk, sctp_next);
1046 /* Now the audits */
1048 prev_tsn = chk->rec.data.TSN_seq - 1;
1049 if (prev_tsn == prev->rec.data.TSN_seq) {
1051 * Ok the one I am dropping onto the end is the
1052 * NEXT. A bit of valdiation here.
1054 if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1055 SCTP_DATA_FIRST_FRAG ||
1056 (prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1057 SCTP_DATA_MIDDLE_FRAG) {
1059 * Insert chk MUST be a MIDDLE or LAST
1062 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1063 SCTP_DATA_FIRST_FRAG) {
1064 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - It can be a midlle or last but not a first\n");
1065 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it's a FIRST!\n");
1066 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1067 0, M_DONTWAIT, 1, MT_DATA);
1069 struct sctp_paramhdr *ph;
1072 SCTP_BUF_LEN(oper) =
1073 sizeof(struct sctp_paramhdr) +
1074 (3 * sizeof(uint32_t));
1076 struct sctp_paramhdr *);
1078 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1080 htons(SCTP_BUF_LEN(oper));
1081 ippp = (uint32_t *) (ph + 1);
1082 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1084 *ippp = chk->rec.data.TSN_seq;
1086 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1089 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_6;
1090 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1094 if (chk->rec.data.stream_number !=
1095 prev->rec.data.stream_number) {
1097 * Huh, need the correct STR here,
1098 * they must be the same.
1100 SCTP_PRINTF("Prev check - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1101 chk->rec.data.stream_number,
1102 prev->rec.data.stream_number);
1103 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1104 0, M_DONTWAIT, 1, MT_DATA);
1106 struct sctp_paramhdr *ph;
1109 SCTP_BUF_LEN(oper) =
1110 sizeof(struct sctp_paramhdr) +
1111 (3 * sizeof(uint32_t));
1113 struct sctp_paramhdr *);
1115 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1117 htons(SCTP_BUF_LEN(oper));
1118 ippp = (uint32_t *) (ph + 1);
1119 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1121 *ippp = chk->rec.data.TSN_seq;
1123 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1125 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_7;
1126 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1130 if ((prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1131 chk->rec.data.stream_seq !=
1132 prev->rec.data.stream_seq) {
1134 * Huh, need the correct STR here,
1135 * they must be the same.
1137 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1138 chk->rec.data.stream_seq,
1139 prev->rec.data.stream_seq);
1140 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1141 0, M_DONTWAIT, 1, MT_DATA);
1143 struct sctp_paramhdr *ph;
1146 SCTP_BUF_LEN(oper) =
1147 sizeof(struct sctp_paramhdr) +
1148 (3 * sizeof(uint32_t));
1150 struct sctp_paramhdr *);
1152 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1154 htons(SCTP_BUF_LEN(oper));
1155 ippp = (uint32_t *) (ph + 1);
1156 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1158 *ippp = chk->rec.data.TSN_seq;
1160 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1162 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_8;
1163 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1167 } else if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1168 SCTP_DATA_LAST_FRAG) {
1169 /* Insert chk MUST be a FIRST */
1170 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1171 SCTP_DATA_FIRST_FRAG) {
1172 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, evil plot, its not FIRST and it must be!\n");
1173 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1174 0, M_DONTWAIT, 1, MT_DATA);
1176 struct sctp_paramhdr *ph;
1179 SCTP_BUF_LEN(oper) =
1180 sizeof(struct sctp_paramhdr) +
1181 (3 * sizeof(uint32_t));
1183 struct sctp_paramhdr *);
1185 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1187 htons(SCTP_BUF_LEN(oper));
1188 ippp = (uint32_t *) (ph + 1);
1189 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1191 *ippp = chk->rec.data.TSN_seq;
1193 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1196 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_9;
1197 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1205 post_tsn = chk->rec.data.TSN_seq + 1;
1206 if (post_tsn == next->rec.data.TSN_seq) {
1208 * Ok the one I am inserting ahead of is my NEXT
1209 * one. A bit of valdiation here.
1211 if (next->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1212 /* Insert chk MUST be a last fragment */
1213 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK)
1214 != SCTP_DATA_LAST_FRAG) {
1215 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is FIRST, we must be LAST\n");
1216 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not a last!\n");
1217 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1218 0, M_DONTWAIT, 1, MT_DATA);
1220 struct sctp_paramhdr *ph;
1223 SCTP_BUF_LEN(oper) =
1224 sizeof(struct sctp_paramhdr) +
1225 (3 * sizeof(uint32_t));
1227 struct sctp_paramhdr *);
1229 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1231 htons(SCTP_BUF_LEN(oper));
1232 ippp = (uint32_t *) (ph + 1);
1233 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1235 *ippp = chk->rec.data.TSN_seq;
1237 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1239 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_10;
1240 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1244 } else if ((next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1245 SCTP_DATA_MIDDLE_FRAG ||
1246 (next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1247 SCTP_DATA_LAST_FRAG) {
1249 * Insert chk CAN be MIDDLE or FIRST NOT
1252 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1253 SCTP_DATA_LAST_FRAG) {
1254 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is a MIDDLE/LAST\n");
1255 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, new prev chunk is a LAST\n");
1256 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1257 0, M_DONTWAIT, 1, MT_DATA);
1259 struct sctp_paramhdr *ph;
1262 SCTP_BUF_LEN(oper) =
1263 sizeof(struct sctp_paramhdr) +
1264 (3 * sizeof(uint32_t));
1266 struct sctp_paramhdr *);
1268 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1270 htons(SCTP_BUF_LEN(oper));
1271 ippp = (uint32_t *) (ph + 1);
1272 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1274 *ippp = chk->rec.data.TSN_seq;
1276 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1279 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_11;
1280 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1284 if (chk->rec.data.stream_number !=
1285 next->rec.data.stream_number) {
1287 * Huh, need the correct STR here,
1288 * they must be the same.
1290 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1291 chk->rec.data.stream_number,
1292 next->rec.data.stream_number);
1293 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1294 0, M_DONTWAIT, 1, MT_DATA);
1296 struct sctp_paramhdr *ph;
1299 SCTP_BUF_LEN(oper) =
1300 sizeof(struct sctp_paramhdr) +
1301 (3 * sizeof(uint32_t));
1303 struct sctp_paramhdr *);
1305 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1307 htons(SCTP_BUF_LEN(oper));
1308 ippp = (uint32_t *) (ph + 1);
1309 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1311 *ippp = chk->rec.data.TSN_seq;
1313 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1316 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_12;
1317 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1321 if ((next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1322 chk->rec.data.stream_seq !=
1323 next->rec.data.stream_seq) {
1325 * Huh, need the correct STR here,
1326 * they must be the same.
1328 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1329 chk->rec.data.stream_seq,
1330 next->rec.data.stream_seq);
1331 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1332 0, M_DONTWAIT, 1, MT_DATA);
1334 struct sctp_paramhdr *ph;
1337 SCTP_BUF_LEN(oper) =
1338 sizeof(struct sctp_paramhdr) +
1339 (3 * sizeof(uint32_t));
1341 struct sctp_paramhdr *);
1343 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1345 htons(SCTP_BUF_LEN(oper));
1346 ippp = (uint32_t *) (ph + 1);
1347 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1349 *ippp = chk->rec.data.TSN_seq;
1351 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1353 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_13;
1354 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1361 /* Do we need to do some delivery? check */
1362 sctp_deliver_reasm_check(stcb, asoc);
1366 * This is an unfortunate routine. It checks to make sure a evil guy is not
1367 * stuffing us full of bad packet fragments. A broken peer could also do this
1368 * but this is doubtful. It is to bad I must worry about evil crackers sigh
1372 sctp_does_tsn_belong_to_reasm(struct sctp_association *asoc,
1375 struct sctp_tmit_chunk *at;
1378 TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1379 if (SCTP_TSN_GT(TSN_seq, at->rec.data.TSN_seq)) {
1380 /* is it one bigger? */
1381 tsn_est = at->rec.data.TSN_seq + 1;
1382 if (tsn_est == TSN_seq) {
1383 /* yep. It better be a last then */
1384 if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1385 SCTP_DATA_LAST_FRAG) {
1387 * Ok this guy belongs next to a guy
1388 * that is NOT last, it should be a
1389 * middle/last, not a complete
1395 * This guy is ok since its a LAST
1396 * and the new chunk is a fully
1397 * self- contained one.
1402 } else if (TSN_seq == at->rec.data.TSN_seq) {
1403 /* Software error since I have a dup? */
1407 * Ok, 'at' is larger than new chunk but does it
1408 * need to be right before it.
1410 tsn_est = TSN_seq + 1;
1411 if (tsn_est == at->rec.data.TSN_seq) {
1412 /* Yep, It better be a first */
1413 if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1414 SCTP_DATA_FIRST_FRAG) {
1427 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1428 struct mbuf **m, int offset, struct sctp_data_chunk *ch, int chk_length,
1429 struct sctp_nets *net, uint32_t * high_tsn, int *abort_flag,
1430 int *break_flag, int last_chunk)
1432 /* Process a data chunk */
1433 /* struct sctp_tmit_chunk *chk; */
1434 struct sctp_tmit_chunk *chk;
1438 int need_reasm_check = 0;
1439 uint16_t strmno, strmseq;
1441 struct sctp_queued_to_read *control;
1443 uint32_t protocol_id;
1444 uint8_t chunk_flags;
1445 struct sctp_stream_reset_list *liste;
1448 tsn = ntohl(ch->dp.tsn);
1449 chunk_flags = ch->ch.chunk_flags;
1450 if ((chunk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1451 asoc->send_sack = 1;
1453 protocol_id = ch->dp.protocol_id;
1454 ordered = ((chunk_flags & SCTP_DATA_UNORDERED) == 0);
1455 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1456 sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1461 SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, ch->ch.chunk_type, tsn);
1462 if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1463 /* It is a duplicate */
1464 SCTP_STAT_INCR(sctps_recvdupdata);
1465 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1466 /* Record a dup for the next outbound sack */
1467 asoc->dup_tsns[asoc->numduptsns] = tsn;
1470 asoc->send_sack = 1;
1473 /* Calculate the number of TSN's between the base and this TSN */
1474 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1475 if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1476 /* Can't hold the bit in the mapping at max array, toss it */
1479 if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
1480 SCTP_TCB_LOCK_ASSERT(stcb);
1481 if (sctp_expand_mapping_array(asoc, gap)) {
1482 /* Can't expand, drop it */
1486 if (SCTP_TSN_GT(tsn, *high_tsn)) {
1489 /* See if we have received this one already */
1490 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1491 SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1492 SCTP_STAT_INCR(sctps_recvdupdata);
1493 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1494 /* Record a dup for the next outbound sack */
1495 asoc->dup_tsns[asoc->numduptsns] = tsn;
1498 asoc->send_sack = 1;
1502 * Check to see about the GONE flag, duplicates would cause a sack
1503 * to be sent up above
1505 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1506 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1507 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))
1510 * wait a minute, this guy is gone, there is no longer a
1511 * receiver. Send peer an ABORT!
1513 struct mbuf *op_err;
1515 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
1516 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1521 * Now before going further we see if there is room. If NOT then we
1522 * MAY let one through only IF this TSN is the one we are waiting
1523 * for on a partial delivery API.
1526 /* now do the tests */
1527 if (((asoc->cnt_on_all_streams +
1528 asoc->cnt_on_reasm_queue +
1529 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1530 (((int)asoc->my_rwnd) <= 0)) {
1532 * When we have NO room in the rwnd we check to make sure
1533 * the reader is doing its job...
1535 if (stcb->sctp_socket->so_rcv.sb_cc) {
1536 /* some to read, wake-up */
1537 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1540 so = SCTP_INP_SO(stcb->sctp_ep);
1541 atomic_add_int(&stcb->asoc.refcnt, 1);
1542 SCTP_TCB_UNLOCK(stcb);
1543 SCTP_SOCKET_LOCK(so, 1);
1544 SCTP_TCB_LOCK(stcb);
1545 atomic_subtract_int(&stcb->asoc.refcnt, 1);
1546 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1547 /* assoc was freed while we were unlocked */
1548 SCTP_SOCKET_UNLOCK(so, 1);
1552 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1553 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1554 SCTP_SOCKET_UNLOCK(so, 1);
1557 /* now is it in the mapping array of what we have accepted? */
1558 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1559 SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1560 /* Nope not in the valid range dump it */
1561 sctp_set_rwnd(stcb, asoc);
1562 if ((asoc->cnt_on_all_streams +
1563 asoc->cnt_on_reasm_queue +
1564 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1565 SCTP_STAT_INCR(sctps_datadropchklmt);
1567 SCTP_STAT_INCR(sctps_datadroprwnd);
1573 strmno = ntohs(ch->dp.stream_id);
1574 if (strmno >= asoc->streamincnt) {
1575 struct sctp_paramhdr *phdr;
1578 mb = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) * 2),
1579 0, M_DONTWAIT, 1, MT_DATA);
1581 /* add some space up front so prepend will work well */
1582 SCTP_BUF_RESV_UF(mb, sizeof(struct sctp_chunkhdr));
1583 phdr = mtod(mb, struct sctp_paramhdr *);
1585 * Error causes are just param's and this one has
1586 * two back to back phdr, one with the error type
1587 * and size, the other with the streamid and a rsvd
1589 SCTP_BUF_LEN(mb) = (sizeof(struct sctp_paramhdr) * 2);
1590 phdr->param_type = htons(SCTP_CAUSE_INVALID_STREAM);
1591 phdr->param_length =
1592 htons(sizeof(struct sctp_paramhdr) * 2);
1594 /* We insert the stream in the type field */
1595 phdr->param_type = ch->dp.stream_id;
1596 /* And set the length to 0 for the rsvd field */
1597 phdr->param_length = 0;
1598 sctp_queue_op_err(stcb, mb);
1600 SCTP_STAT_INCR(sctps_badsid);
1601 SCTP_TCB_LOCK_ASSERT(stcb);
1602 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1603 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1604 asoc->highest_tsn_inside_nr_map = tsn;
1606 if (tsn == (asoc->cumulative_tsn + 1)) {
1607 /* Update cum-ack */
1608 asoc->cumulative_tsn = tsn;
1613 * Before we continue lets validate that we are not being fooled by
1614 * an evil attacker. We can only have 4k chunks based on our TSN
1615 * spread allowed by the mapping array 512 * 8 bits, so there is no
1616 * way our stream sequence numbers could have wrapped. We of course
1617 * only validate the FIRST fragment so the bit must be set.
1619 strmseq = ntohs(ch->dp.stream_sequence);
1620 #ifdef SCTP_ASOCLOG_OF_TSNS
1621 SCTP_TCB_LOCK_ASSERT(stcb);
1622 if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1623 asoc->tsn_in_at = 0;
1624 asoc->tsn_in_wrapped = 1;
1626 asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1627 asoc->in_tsnlog[asoc->tsn_in_at].strm = strmno;
1628 asoc->in_tsnlog[asoc->tsn_in_at].seq = strmseq;
1629 asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1630 asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1631 asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1632 asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1633 asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1636 if ((chunk_flags & SCTP_DATA_FIRST_FRAG) &&
1637 (TAILQ_EMPTY(&asoc->resetHead)) &&
1638 (chunk_flags & SCTP_DATA_UNORDERED) == 0 &&
1639 SCTP_SSN_GE(asoc->strmin[strmno].last_sequence_delivered, strmseq)) {
1640 /* The incoming sseq is behind where we last delivered? */
1641 SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n",
1642 strmseq, asoc->strmin[strmno].last_sequence_delivered);
1643 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1644 0, M_DONTWAIT, 1, MT_DATA);
1646 struct sctp_paramhdr *ph;
1649 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
1650 (3 * sizeof(uint32_t));
1651 ph = mtod(oper, struct sctp_paramhdr *);
1652 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1653 ph->param_length = htons(SCTP_BUF_LEN(oper));
1654 ippp = (uint32_t *) (ph + 1);
1655 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_14);
1659 *ippp = ((strmno << 16) | strmseq);
1662 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1663 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1667 /************************************
1668 * From here down we may find ch-> invalid
1669 * so its a good idea NOT to use it.
1670 *************************************/
1672 the_len = (chk_length - sizeof(struct sctp_data_chunk));
1673 if (last_chunk == 0) {
1674 dmbuf = SCTP_M_COPYM(*m,
1675 (offset + sizeof(struct sctp_data_chunk)),
1676 the_len, M_DONTWAIT);
1677 #ifdef SCTP_MBUF_LOGGING
1678 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
1681 for (mat = dmbuf; mat; mat = SCTP_BUF_NEXT(mat)) {
1682 if (SCTP_BUF_IS_EXTENDED(mat)) {
1683 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
1689 /* We can steal the last chunk */
1693 /* lop off the top part */
1694 m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
1695 if (SCTP_BUF_NEXT(dmbuf) == NULL) {
1696 l_len = SCTP_BUF_LEN(dmbuf);
1699 * need to count up the size hopefully does not hit
1705 for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
1706 l_len += SCTP_BUF_LEN(lat);
1709 if (l_len > the_len) {
1710 /* Trim the end round bytes off too */
1711 m_adj(dmbuf, -(l_len - the_len));
1714 if (dmbuf == NULL) {
1715 SCTP_STAT_INCR(sctps_nomem);
1718 if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
1719 asoc->fragmented_delivery_inprogress == 0 &&
1720 TAILQ_EMPTY(&asoc->resetHead) &&
1722 ((uint16_t) (asoc->strmin[strmno].last_sequence_delivered + 1) == strmseq &&
1723 TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) {
1724 /* Candidate for express delivery */
1726 * Its not fragmented, No PD-API is up, Nothing in the
1727 * delivery queue, Its un-ordered OR ordered and the next to
1728 * deliver AND nothing else is stuck on the stream queue,
1729 * And there is room for it in the socket buffer. Lets just
1730 * stuff it up the buffer....
1733 /* It would be nice to avoid this copy if we could :< */
1734 sctp_alloc_a_readq(stcb, control);
1735 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1741 if (control == NULL) {
1742 goto failed_express_del;
1744 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1745 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1746 asoc->highest_tsn_inside_nr_map = tsn;
1748 sctp_add_to_readq(stcb->sctp_ep, stcb,
1749 control, &stcb->sctp_socket->so_rcv,
1750 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1752 if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1753 /* for ordered, bump what we delivered */
1754 asoc->strmin[strmno].last_sequence_delivered++;
1756 SCTP_STAT_INCR(sctps_recvexpress);
1757 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
1758 sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno,
1759 SCTP_STR_LOG_FROM_EXPRS_DEL);
1763 goto finish_express_del;
1766 /* If we reach here this is a new chunk */
1769 /* Express for fragmented delivery? */
1770 if ((asoc->fragmented_delivery_inprogress) &&
1771 (stcb->asoc.control_pdapi) &&
1772 (asoc->str_of_pdapi == strmno) &&
1773 (asoc->ssn_of_pdapi == strmseq)
1775 control = stcb->asoc.control_pdapi;
1776 if ((chunk_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
1777 /* Can't be another first? */
1778 goto failed_pdapi_express_del;
1780 if (tsn == (control->sinfo_tsn + 1)) {
1781 /* Yep, we can add it on */
1784 if (chunk_flags & SCTP_DATA_LAST_FRAG) {
1787 if (sctp_append_to_readq(stcb->sctp_ep, stcb, control, dmbuf, end,
1789 &stcb->sctp_socket->so_rcv)) {
1790 SCTP_PRINTF("Append fails end:%d\n", end);
1791 goto failed_pdapi_express_del;
1793 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1794 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1795 asoc->highest_tsn_inside_nr_map = tsn;
1797 SCTP_STAT_INCR(sctps_recvexpressm);
1798 control->sinfo_tsn = tsn;
1799 asoc->tsn_last_delivered = tsn;
1800 asoc->fragment_flags = chunk_flags;
1801 asoc->tsn_of_pdapi_last_delivered = tsn;
1802 asoc->last_flags_delivered = chunk_flags;
1803 asoc->last_strm_seq_delivered = strmseq;
1804 asoc->last_strm_no_delivered = strmno;
1806 /* clean up the flags and such */
1807 asoc->fragmented_delivery_inprogress = 0;
1808 if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1809 asoc->strmin[strmno].last_sequence_delivered++;
1811 stcb->asoc.control_pdapi = NULL;
1812 if (TAILQ_EMPTY(&asoc->reasmqueue) == 0) {
1814 * There could be another message
1817 need_reasm_check = 1;
1821 goto finish_express_del;
1824 failed_pdapi_express_del:
1826 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
1827 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1828 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1829 asoc->highest_tsn_inside_nr_map = tsn;
1832 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
1833 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
1834 asoc->highest_tsn_inside_map = tsn;
1837 if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1838 sctp_alloc_a_chunk(stcb, chk);
1840 /* No memory so we drop the chunk */
1841 SCTP_STAT_INCR(sctps_nomem);
1842 if (last_chunk == 0) {
1843 /* we copied it, free the copy */
1844 sctp_m_freem(dmbuf);
1848 chk->rec.data.TSN_seq = tsn;
1849 chk->no_fr_allowed = 0;
1850 chk->rec.data.stream_seq = strmseq;
1851 chk->rec.data.stream_number = strmno;
1852 chk->rec.data.payloadtype = protocol_id;
1853 chk->rec.data.context = stcb->asoc.context;
1854 chk->rec.data.doing_fast_retransmit = 0;
1855 chk->rec.data.rcv_flags = chunk_flags;
1857 chk->send_size = the_len;
1859 atomic_add_int(&net->ref_count, 1);
1862 sctp_alloc_a_readq(stcb, control);
1863 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1869 if (control == NULL) {
1870 /* No memory so we drop the chunk */
1871 SCTP_STAT_INCR(sctps_nomem);
1872 if (last_chunk == 0) {
1873 /* we copied it, free the copy */
1874 sctp_m_freem(dmbuf);
1878 control->length = the_len;
1881 /* Mark it as received */
1882 /* Now queue it where it belongs */
1883 if (control != NULL) {
1884 /* First a sanity check */
1885 if (asoc->fragmented_delivery_inprogress) {
1887 * Ok, we have a fragmented delivery in progress if
1888 * this chunk is next to deliver OR belongs in our
1889 * view to the reassembly, the peer is evil or
1892 uint32_t estimate_tsn;
1894 estimate_tsn = asoc->tsn_last_delivered + 1;
1895 if (TAILQ_EMPTY(&asoc->reasmqueue) &&
1896 (estimate_tsn == control->sinfo_tsn)) {
1897 /* Evil/Broke peer */
1898 sctp_m_freem(control->data);
1899 control->data = NULL;
1900 if (control->whoFrom) {
1901 sctp_free_remote_addr(control->whoFrom);
1902 control->whoFrom = NULL;
1904 sctp_free_a_readq(stcb, control);
1905 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1906 0, M_DONTWAIT, 1, MT_DATA);
1908 struct sctp_paramhdr *ph;
1911 SCTP_BUF_LEN(oper) =
1912 sizeof(struct sctp_paramhdr) +
1913 (3 * sizeof(uint32_t));
1914 ph = mtod(oper, struct sctp_paramhdr *);
1916 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1917 ph->param_length = htons(SCTP_BUF_LEN(oper));
1918 ippp = (uint32_t *) (ph + 1);
1919 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_15);
1923 *ippp = ((strmno << 16) | strmseq);
1925 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1926 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1930 if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
1931 sctp_m_freem(control->data);
1932 control->data = NULL;
1933 if (control->whoFrom) {
1934 sctp_free_remote_addr(control->whoFrom);
1935 control->whoFrom = NULL;
1937 sctp_free_a_readq(stcb, control);
1939 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1940 0, M_DONTWAIT, 1, MT_DATA);
1942 struct sctp_paramhdr *ph;
1945 SCTP_BUF_LEN(oper) =
1946 sizeof(struct sctp_paramhdr) +
1947 (3 * sizeof(uint32_t));
1949 struct sctp_paramhdr *);
1951 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1953 htons(SCTP_BUF_LEN(oper));
1954 ippp = (uint32_t *) (ph + 1);
1955 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_16);
1959 *ippp = ((strmno << 16) | strmseq);
1961 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
1962 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1968 /* No PDAPI running */
1969 if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
1971 * Reassembly queue is NOT empty validate
1972 * that this tsn does not need to be in
1973 * reasembly queue. If it does then our peer
1974 * is broken or evil.
1976 if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
1977 sctp_m_freem(control->data);
1978 control->data = NULL;
1979 if (control->whoFrom) {
1980 sctp_free_remote_addr(control->whoFrom);
1981 control->whoFrom = NULL;
1983 sctp_free_a_readq(stcb, control);
1984 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1985 0, M_DONTWAIT, 1, MT_DATA);
1987 struct sctp_paramhdr *ph;
1990 SCTP_BUF_LEN(oper) =
1991 sizeof(struct sctp_paramhdr) +
1992 (3 * sizeof(uint32_t));
1994 struct sctp_paramhdr *);
1996 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1998 htons(SCTP_BUF_LEN(oper));
1999 ippp = (uint32_t *) (ph + 1);
2000 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_17);
2004 *ippp = ((strmno << 16) | strmseq);
2006 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17;
2007 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
2013 /* ok, if we reach here we have passed the sanity checks */
2014 if (chunk_flags & SCTP_DATA_UNORDERED) {
2015 /* queue directly into socket buffer */
2016 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2017 sctp_add_to_readq(stcb->sctp_ep, stcb,
2019 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2022 * Special check for when streams are resetting. We
2023 * could be more smart about this and check the
2024 * actual stream to see if it is not being reset..
2025 * that way we would not create a HOLB when amongst
2026 * streams being reset and those not being reset.
2028 * We take complete messages that have a stream reset
2029 * intervening (aka the TSN is after where our
2030 * cum-ack needs to be) off and put them on a
2031 * pending_reply_queue. The reassembly ones we do
2032 * not have to worry about since they are all sorted
2033 * and proceessed by TSN order. It is only the
2034 * singletons I must worry about.
2036 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2037 SCTP_TSN_GT(tsn, liste->tsn)) {
2039 * yep its past where we need to reset... go
2040 * ahead and queue it.
2042 if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2044 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2046 struct sctp_queued_to_read *ctlOn,
2048 unsigned char inserted = 0;
2050 TAILQ_FOREACH_SAFE(ctlOn, &asoc->pending_reply_queue, next, nctlOn) {
2051 if (SCTP_TSN_GT(control->sinfo_tsn, ctlOn->sinfo_tsn)) {
2055 TAILQ_INSERT_BEFORE(ctlOn, control, next);
2060 if (inserted == 0) {
2062 * must be put at end, use
2063 * prevP (all setup from
2064 * loop) to setup nextP.
2066 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2070 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag);
2077 /* Into the re-assembly queue */
2078 sctp_queue_data_for_reasm(stcb, asoc, chk, abort_flag);
2081 * the assoc is now gone and chk was put onto the
2082 * reasm queue, which has all been freed.
2089 if (tsn == (asoc->cumulative_tsn + 1)) {
2090 /* Update cum-ack */
2091 asoc->cumulative_tsn = tsn;
2097 SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2099 SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2101 SCTP_STAT_INCR(sctps_recvdata);
2102 /* Set it present please */
2103 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2104 sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, SCTP_STR_LOG_FROM_MARK_TSN);
2106 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2107 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2108 asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2110 /* check the special flag for stream resets */
2111 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2112 SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
2114 * we have finished working through the backlogged TSN's now
2115 * time to reset streams. 1: call reset function. 2: free
2116 * pending_reply space 3: distribute any chunks in
2117 * pending_reply_queue.
2119 struct sctp_queued_to_read *ctl, *nctl;
2121 sctp_reset_in_stream(stcb, liste->number_entries, liste->req.list_of_streams);
2122 TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2123 SCTP_FREE(liste, SCTP_M_STRESET);
2124 /* sa_ignore FREED_MEMORY */
2125 liste = TAILQ_FIRST(&asoc->resetHead);
2126 if (TAILQ_EMPTY(&asoc->resetHead)) {
2127 /* All can be removed */
2128 TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
2129 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2130 sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
2136 TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
2137 if (SCTP_TSN_GT(ctl->sinfo_tsn, liste->tsn)) {
2141 * if ctl->sinfo_tsn is <= liste->tsn we can
2142 * process it which is the NOT of
2143 * ctl->sinfo_tsn > liste->tsn
2145 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2146 sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
2153 * Now service re-assembly to pick up anything that has been
2154 * held on reassembly queue?
2156 sctp_deliver_reasm_check(stcb, asoc);
2157 need_reasm_check = 0;
2159 if (need_reasm_check) {
2160 /* Another one waits ? */
2161 sctp_deliver_reasm_check(stcb, asoc);
2166 int8_t sctp_map_lookup_tab[256] = {
2167 0, 1, 0, 2, 0, 1, 0, 3,
2168 0, 1, 0, 2, 0, 1, 0, 4,
2169 0, 1, 0, 2, 0, 1, 0, 3,
2170 0, 1, 0, 2, 0, 1, 0, 5,
2171 0, 1, 0, 2, 0, 1, 0, 3,
2172 0, 1, 0, 2, 0, 1, 0, 4,
2173 0, 1, 0, 2, 0, 1, 0, 3,
2174 0, 1, 0, 2, 0, 1, 0, 6,
2175 0, 1, 0, 2, 0, 1, 0, 3,
2176 0, 1, 0, 2, 0, 1, 0, 4,
2177 0, 1, 0, 2, 0, 1, 0, 3,
2178 0, 1, 0, 2, 0, 1, 0, 5,
2179 0, 1, 0, 2, 0, 1, 0, 3,
2180 0, 1, 0, 2, 0, 1, 0, 4,
2181 0, 1, 0, 2, 0, 1, 0, 3,
2182 0, 1, 0, 2, 0, 1, 0, 7,
2183 0, 1, 0, 2, 0, 1, 0, 3,
2184 0, 1, 0, 2, 0, 1, 0, 4,
2185 0, 1, 0, 2, 0, 1, 0, 3,
2186 0, 1, 0, 2, 0, 1, 0, 5,
2187 0, 1, 0, 2, 0, 1, 0, 3,
2188 0, 1, 0, 2, 0, 1, 0, 4,
2189 0, 1, 0, 2, 0, 1, 0, 3,
2190 0, 1, 0, 2, 0, 1, 0, 6,
2191 0, 1, 0, 2, 0, 1, 0, 3,
2192 0, 1, 0, 2, 0, 1, 0, 4,
2193 0, 1, 0, 2, 0, 1, 0, 3,
2194 0, 1, 0, 2, 0, 1, 0, 5,
2195 0, 1, 0, 2, 0, 1, 0, 3,
2196 0, 1, 0, 2, 0, 1, 0, 4,
2197 0, 1, 0, 2, 0, 1, 0, 3,
2198 0, 1, 0, 2, 0, 1, 0, 8
2203 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2206 * Now we also need to check the mapping array in a couple of ways.
2207 * 1) Did we move the cum-ack point?
2209 * When you first glance at this you might think that all entries that
2210 * make up the postion of the cum-ack would be in the nr-mapping
2211 * array only.. i.e. things up to the cum-ack are always
2212 * deliverable. Thats true with one exception, when its a fragmented
2213 * message we may not deliver the data until some threshold (or all
2214 * of it) is in place. So we must OR the nr_mapping_array and
2215 * mapping_array to get a true picture of the cum-ack.
2217 struct sctp_association *asoc;
2220 int slide_from, slide_end, lgap, distance;
2221 uint32_t old_cumack, old_base, old_highest, highest_tsn;
2225 old_cumack = asoc->cumulative_tsn;
2226 old_base = asoc->mapping_array_base_tsn;
2227 old_highest = asoc->highest_tsn_inside_map;
2229 * We could probably improve this a small bit by calculating the
2230 * offset of the current cum-ack as the starting point.
2233 for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2234 val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2238 /* there is a 0 bit */
2239 at += sctp_map_lookup_tab[val];
2243 asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2245 if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2246 SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2248 panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2249 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2251 SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2252 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2253 sctp_print_mapping_array(asoc);
2254 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2255 sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2257 asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2258 asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2261 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2262 highest_tsn = asoc->highest_tsn_inside_nr_map;
2264 highest_tsn = asoc->highest_tsn_inside_map;
2266 if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2267 /* The complete array was completed by a single FR */
2268 /* highest becomes the cum-ack */
2276 /* clear the array */
2277 clr = ((at + 7) >> 3);
2278 if (clr > asoc->mapping_array_size) {
2279 clr = asoc->mapping_array_size;
2281 memset(asoc->mapping_array, 0, clr);
2282 memset(asoc->nr_mapping_array, 0, clr);
2284 for (i = 0; i < asoc->mapping_array_size; i++) {
2285 if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2286 SCTP_PRINTF("Error Mapping array's not clean at clear\n");
2287 sctp_print_mapping_array(asoc);
2291 asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2292 asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2293 } else if (at >= 8) {
2294 /* we can slide the mapping array down */
2295 /* slide_from holds where we hit the first NON 0xff byte */
2298 * now calculate the ceiling of the move using our highest
2301 SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2302 slide_end = (lgap >> 3);
2303 if (slide_end < slide_from) {
2304 sctp_print_mapping_array(asoc);
2306 panic("impossible slide");
2308 SCTP_PRINTF("impossible slide lgap:%x slide_end:%x slide_from:%x? at:%d\n",
2309 lgap, slide_end, slide_from, at);
2313 if (slide_end > asoc->mapping_array_size) {
2315 panic("would overrun buffer");
2317 SCTP_PRINTF("Gak, would have overrun map end:%d slide_end:%d\n",
2318 asoc->mapping_array_size, slide_end);
2319 slide_end = asoc->mapping_array_size;
2322 distance = (slide_end - slide_from) + 1;
2323 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2324 sctp_log_map(old_base, old_cumack, old_highest,
2325 SCTP_MAP_PREPARE_SLIDE);
2326 sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
2327 (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
2329 if (distance + slide_from > asoc->mapping_array_size ||
2332 * Here we do NOT slide forward the array so that
2333 * hopefully when more data comes in to fill it up
2334 * we will be able to slide it forward. Really I
2335 * don't think this should happen :-0
2338 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2339 sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
2340 (uint32_t) asoc->mapping_array_size,
2341 SCTP_MAP_SLIDE_NONE);
2346 for (ii = 0; ii < distance; ii++) {
2347 asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2348 asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2351 for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2352 asoc->mapping_array[ii] = 0;
2353 asoc->nr_mapping_array[ii] = 0;
2355 if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2356 asoc->highest_tsn_inside_map += (slide_from << 3);
2358 if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2359 asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2361 asoc->mapping_array_base_tsn += (slide_from << 3);
2362 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2363 sctp_log_map(asoc->mapping_array_base_tsn,
2364 asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2365 SCTP_MAP_SLIDE_RESULT);
2372 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
2374 struct sctp_association *asoc;
2375 uint32_t highest_tsn;
2378 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2379 highest_tsn = asoc->highest_tsn_inside_nr_map;
2381 highest_tsn = asoc->highest_tsn_inside_map;
2385 * Now we need to see if we need to queue a sack or just start the
2386 * timer (if allowed).
2388 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2390 * Ok special case, in SHUTDOWN-SENT case. here we maker
2391 * sure SACK timer is off and instead send a SHUTDOWN and a
2394 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2395 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2396 stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_INDATA + SCTP_LOC_18);
2398 sctp_send_shutdown(stcb,
2399 ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2400 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2404 /* is there a gap now ? */
2405 is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2408 * CMT DAC algorithm: increase number of packets received
2411 stcb->asoc.cmt_dac_pkts_rcvd++;
2413 if ((stcb->asoc.send_sack == 1) || /* We need to send a
2415 ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no
2417 (stcb->asoc.numduptsns) || /* we have dup's */
2418 (is_a_gap) || /* is still a gap */
2419 (stcb->asoc.delayed_ack == 0) || /* Delayed sack disabled */
2420 (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) /* hit limit of pkts */
2423 if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2424 (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2425 (stcb->asoc.send_sack == 0) &&
2426 (stcb->asoc.numduptsns == 0) &&
2427 (stcb->asoc.delayed_ack) &&
2428 (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2431 * CMT DAC algorithm: With CMT, delay acks
2432 * even in the face of
2434 * reordering. Therefore, if acks that do not
2435 * have to be sent because of the above
2436 * reasons, will be delayed. That is, acks
2437 * that would have been sent due to gap
2438 * reports will be delayed with DAC. Start
2439 * the delayed ack timer.
2441 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2442 stcb->sctp_ep, stcb, NULL);
2445 * Ok we must build a SACK since the timer
2446 * is pending, we got our first packet OR
2447 * there are gaps or duplicates.
2449 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2450 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2453 if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2454 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2455 stcb->sctp_ep, stcb, NULL);
2462 sctp_service_queues(struct sctp_tcb *stcb, struct sctp_association *asoc)
2464 struct sctp_tmit_chunk *chk;
2465 uint32_t tsize, pd_point;
2468 if (asoc->fragmented_delivery_inprogress) {
2469 sctp_service_reassembly(stcb, asoc);
2471 /* Can we proceed further, i.e. the PD-API is complete */
2472 if (asoc->fragmented_delivery_inprogress) {
2477 * Now is there some other chunk I can deliver from the reassembly
2481 chk = TAILQ_FIRST(&asoc->reasmqueue);
2483 asoc->size_on_reasm_queue = 0;
2484 asoc->cnt_on_reasm_queue = 0;
2487 nxt_todel = asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
2488 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
2489 ((nxt_todel == chk->rec.data.stream_seq) ||
2490 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
2492 * Yep the first one is here. We setup to start reception,
2493 * by backing down the TSN just in case we can't deliver.
2497 * Before we start though either all of the message should
2498 * be here or the socket buffer max or nothing on the
2499 * delivery queue and something can be delivered.
2501 if (stcb->sctp_socket) {
2502 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket),
2503 stcb->sctp_ep->partial_delivery_point);
2505 pd_point = stcb->sctp_ep->partial_delivery_point;
2507 if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
2508 asoc->fragmented_delivery_inprogress = 1;
2509 asoc->tsn_last_delivered = chk->rec.data.TSN_seq - 1;
2510 asoc->str_of_pdapi = chk->rec.data.stream_number;
2511 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
2512 asoc->pdapi_ppid = chk->rec.data.payloadtype;
2513 asoc->fragment_flags = chk->rec.data.rcv_flags;
2514 sctp_service_reassembly(stcb, asoc);
2515 if (asoc->fragmented_delivery_inprogress == 0) {
2523 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2524 struct sctphdr *sh, struct sctp_inpcb *inp,
2525 struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t * high_tsn,
2526 uint8_t use_mflowid, uint32_t mflowid,
2527 uint32_t vrf_id, uint16_t port)
2529 struct sctp_data_chunk *ch, chunk_buf;
2530 struct sctp_association *asoc;
2531 int num_chunks = 0; /* number of control chunks processed */
2533 int chk_length, break_flag, last_chunk;
2534 int abort_flag = 0, was_a_gap;
2536 uint32_t highest_tsn;
2539 sctp_set_rwnd(stcb, &stcb->asoc);
2542 SCTP_TCB_LOCK_ASSERT(stcb);
2544 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2545 highest_tsn = asoc->highest_tsn_inside_nr_map;
2547 highest_tsn = asoc->highest_tsn_inside_map;
2549 was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2551 * setup where we got the last DATA packet from for any SACK that
2552 * may need to go out. Don't bump the net. This is done ONLY when a
2553 * chunk is assigned.
2555 asoc->last_data_chunk_from = net;
2558 * Now before we proceed we must figure out if this is a wasted
2559 * cluster... i.e. it is a small packet sent in and yet the driver
2560 * underneath allocated a full cluster for it. If so we must copy it
2561 * to a smaller mbuf and free up the cluster mbuf. This will help
2562 * with cluster starvation. Note for __Panda__ we don't do this
2563 * since it has clusters all the way down to 64 bytes.
2565 if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2566 /* we only handle mbufs that are singletons.. not chains */
2567 m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_DONTWAIT, 1, MT_DATA);
2569 /* ok lets see if we can copy the data up */
2572 /* get the pointers and copy */
2573 to = mtod(m, caddr_t *);
2574 from = mtod((*mm), caddr_t *);
2575 memcpy(to, from, SCTP_BUF_LEN((*mm)));
2576 /* copy the length and free up the old */
2577 SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2579 /* sucess, back copy */
2582 /* We are in trouble in the mbuf world .. yikes */
2586 /* get pointer to the first chunk header */
2587 ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2588 sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2593 * process all DATA chunks...
2595 *high_tsn = asoc->cumulative_tsn;
2597 asoc->data_pkts_seen++;
2598 while (stop_proc == 0) {
2599 /* validate chunk length */
2600 chk_length = ntohs(ch->ch.chunk_length);
2601 if (length - *offset < chk_length) {
2602 /* all done, mutulated chunk */
2606 if (ch->ch.chunk_type == SCTP_DATA) {
2607 if ((size_t)chk_length < sizeof(struct sctp_data_chunk) + 1) {
2609 * Need to send an abort since we had a
2610 * invalid data chunk.
2612 struct mbuf *op_err;
2614 op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 2 * sizeof(uint32_t)),
2615 0, M_DONTWAIT, 1, MT_DATA);
2618 struct sctp_paramhdr *ph;
2621 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr) +
2622 (2 * sizeof(uint32_t));
2623 ph = mtod(op_err, struct sctp_paramhdr *);
2625 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2626 ph->param_length = htons(SCTP_BUF_LEN(op_err));
2627 ippp = (uint32_t *) (ph + 1);
2628 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_19);
2630 *ippp = asoc->cumulative_tsn;
2633 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
2634 sctp_abort_association(inp, stcb, m, iphlen, sh,
2636 use_mflowid, mflowid,
2640 #ifdef SCTP_AUDITING_ENABLED
2641 sctp_audit_log(0xB1, 0);
2643 if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2648 if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, ch,
2649 chk_length, net, high_tsn, &abort_flag, &break_flag,
2658 * Set because of out of rwnd space and no
2659 * drop rep space left.
2665 /* not a data chunk in the data region */
2666 switch (ch->ch.chunk_type) {
2667 case SCTP_INITIATION:
2668 case SCTP_INITIATION_ACK:
2669 case SCTP_SELECTIVE_ACK:
2670 case SCTP_NR_SELECTIVE_ACK:
2671 case SCTP_HEARTBEAT_REQUEST:
2672 case SCTP_HEARTBEAT_ACK:
2673 case SCTP_ABORT_ASSOCIATION:
2675 case SCTP_SHUTDOWN_ACK:
2676 case SCTP_OPERATION_ERROR:
2677 case SCTP_COOKIE_ECHO:
2678 case SCTP_COOKIE_ACK:
2681 case SCTP_SHUTDOWN_COMPLETE:
2682 case SCTP_AUTHENTICATION:
2683 case SCTP_ASCONF_ACK:
2684 case SCTP_PACKET_DROPPED:
2685 case SCTP_STREAM_RESET:
2686 case SCTP_FORWARD_CUM_TSN:
2689 * Now, what do we do with KNOWN chunks that
2690 * are NOT in the right place?
2692 * For now, I do nothing but ignore them. We
2693 * may later want to add sysctl stuff to
2694 * switch out and do either an ABORT() or
2695 * possibly process them.
2697 if (SCTP_BASE_SYSCTL(sctp_strict_data_order)) {
2698 struct mbuf *op_err;
2700 op_err = sctp_generate_invmanparam(SCTP_CAUSE_PROTOCOL_VIOLATION);
2701 sctp_abort_association(inp, stcb,
2704 use_mflowid, mflowid,
2710 /* unknown chunk type, use bit rules */
2711 if (ch->ch.chunk_type & 0x40) {
2712 /* Add a error report to the queue */
2714 struct sctp_paramhdr *phd;
2716 merr = sctp_get_mbuf_for_msg(sizeof(*phd), 0, M_DONTWAIT, 1, MT_DATA);
2718 phd = mtod(merr, struct sctp_paramhdr *);
2720 * We cheat and use param
2721 * type since we did not
2722 * bother to define a error
2723 * cause struct. They are
2724 * the same basic format
2725 * with different names.
2728 htons(SCTP_CAUSE_UNRECOG_CHUNK);
2730 htons(chk_length + sizeof(*phd));
2731 SCTP_BUF_LEN(merr) = sizeof(*phd);
2732 SCTP_BUF_NEXT(merr) = SCTP_M_COPYM(m, *offset, chk_length, M_DONTWAIT);
2733 if (SCTP_BUF_NEXT(merr)) {
2734 if (sctp_pad_lastmbuf(SCTP_BUF_NEXT(merr), SCTP_SIZE32(chk_length) - chk_length, NULL)) {
2737 sctp_queue_op_err(stcb, merr);
2744 if ((ch->ch.chunk_type & 0x80) == 0) {
2745 /* discard the rest of this packet */
2747 } /* else skip this bad chunk and
2750 } /* switch of chunk type */
2752 *offset += SCTP_SIZE32(chk_length);
2753 if ((*offset >= length) || stop_proc) {
2754 /* no more data left in the mbuf chain */
2758 ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2759 sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2768 * we need to report rwnd overrun drops.
2770 sctp_send_packet_dropped(stcb, net, *mm, iphlen, 0);
2774 * Did we get data, if so update the time for auto-close and
2775 * give peer credit for being alive.
2777 SCTP_STAT_INCR(sctps_recvpktwithdata);
2778 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2779 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2780 stcb->asoc.overall_error_count,
2782 SCTP_FROM_SCTP_INDATA,
2785 stcb->asoc.overall_error_count = 0;
2786 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2788 /* now service all of the reassm queue if needed */
2789 if (!(TAILQ_EMPTY(&asoc->reasmqueue)))
2790 sctp_service_queues(stcb, asoc);
2792 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2793 /* Assure that we ack right away */
2794 stcb->asoc.send_sack = 1;
2796 /* Start a sack timer or QUEUE a SACK for sending */
2797 sctp_sack_check(stcb, was_a_gap);
2802 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2803 uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2805 uint32_t * biggest_newly_acked_tsn,
2806 uint32_t * this_sack_lowest_newack,
2809 struct sctp_tmit_chunk *tp1;
2810 unsigned int theTSN;
2811 int j, wake_him = 0, circled = 0;
2813 /* Recover the tp1 we last saw */
2816 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2818 for (j = frag_strt; j <= frag_end; j++) {
2819 theTSN = j + last_tsn;
2821 if (tp1->rec.data.doing_fast_retransmit)
2825 * CMT: CUCv2 algorithm. For each TSN being
2826 * processed from the sent queue, track the
2827 * next expected pseudo-cumack, or
2828 * rtx_pseudo_cumack, if required. Separate
2829 * cumack trackers for first transmissions,
2830 * and retransmissions.
2832 if ((tp1->whoTo->find_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
2833 (tp1->snd_count == 1)) {
2834 tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq;
2835 tp1->whoTo->find_pseudo_cumack = 0;
2837 if ((tp1->whoTo->find_rtx_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
2838 (tp1->snd_count > 1)) {
2839 tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq;
2840 tp1->whoTo->find_rtx_pseudo_cumack = 0;
2842 if (tp1->rec.data.TSN_seq == theTSN) {
2843 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2845 * must be held until
2848 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
2850 * If it is less than RESEND, it is
2851 * now no-longer in flight.
2852 * Higher values may already be set
2853 * via previous Gap Ack Blocks...
2854 * i.e. ACKED or RESEND.
2856 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2857 *biggest_newly_acked_tsn)) {
2858 *biggest_newly_acked_tsn = tp1->rec.data.TSN_seq;
2861 * CMT: SFR algo (and HTNA) - set
2862 * saw_newack to 1 for dest being
2863 * newly acked. update
2864 * this_sack_highest_newack if
2867 if (tp1->rec.data.chunk_was_revoked == 0)
2868 tp1->whoTo->saw_newack = 1;
2870 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2871 tp1->whoTo->this_sack_highest_newack)) {
2872 tp1->whoTo->this_sack_highest_newack =
2873 tp1->rec.data.TSN_seq;
2876 * CMT DAC algo: also update
2877 * this_sack_lowest_newack
2879 if (*this_sack_lowest_newack == 0) {
2880 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2881 sctp_log_sack(*this_sack_lowest_newack,
2883 tp1->rec.data.TSN_seq,
2886 SCTP_LOG_TSN_ACKED);
2888 *this_sack_lowest_newack = tp1->rec.data.TSN_seq;
2891 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
2892 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
2893 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
2894 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
2895 * Separate pseudo_cumack trackers for first transmissions and
2898 if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) {
2899 if (tp1->rec.data.chunk_was_revoked == 0) {
2900 tp1->whoTo->new_pseudo_cumack = 1;
2902 tp1->whoTo->find_pseudo_cumack = 1;
2904 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
2905 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
2907 if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) {
2908 if (tp1->rec.data.chunk_was_revoked == 0) {
2909 tp1->whoTo->new_pseudo_cumack = 1;
2911 tp1->whoTo->find_rtx_pseudo_cumack = 1;
2913 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2914 sctp_log_sack(*biggest_newly_acked_tsn,
2916 tp1->rec.data.TSN_seq,
2919 SCTP_LOG_TSN_ACKED);
2921 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
2922 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
2923 tp1->whoTo->flight_size,
2925 (uintptr_t) tp1->whoTo,
2926 tp1->rec.data.TSN_seq);
2928 sctp_flight_size_decrease(tp1);
2929 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
2930 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
2933 sctp_total_flight_decrease(stcb, tp1);
2935 tp1->whoTo->net_ack += tp1->send_size;
2936 if (tp1->snd_count < 2) {
2938 * True non-retransmited chunk
2940 tp1->whoTo->net_ack2 += tp1->send_size;
2948 sctp_calculate_rto(stcb,
2951 &tp1->sent_rcv_time,
2952 sctp_align_safe_nocopy,
2953 SCTP_RTT_FROM_DATA);
2956 if (tp1->whoTo->rto_needed == 0) {
2957 tp1->whoTo->rto_needed = 1;
2963 if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
2964 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2965 stcb->asoc.this_sack_highest_gap)) {
2966 stcb->asoc.this_sack_highest_gap =
2967 tp1->rec.data.TSN_seq;
2969 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
2970 sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
2971 #ifdef SCTP_AUDITING_ENABLED
2972 sctp_audit_log(0xB2,
2973 (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
2978 * All chunks NOT UNSENT fall through here and are marked
2979 * (leave PR-SCTP ones that are to skip alone though)
2981 if (tp1->sent != SCTP_FORWARD_TSN_SKIP)
2982 tp1->sent = SCTP_DATAGRAM_MARKED;
2984 if (tp1->rec.data.chunk_was_revoked) {
2985 /* deflate the cwnd */
2986 tp1->whoTo->cwnd -= tp1->book_size;
2987 tp1->rec.data.chunk_was_revoked = 0;
2989 /* NR Sack code here */
2996 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
2997 sctp_m_freem(tp1->data);
3004 } /* if (tp1->TSN_seq == theTSN) */
3005 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, theTSN)) {
3008 tp1 = TAILQ_NEXT(tp1, sctp_next);
3009 if ((tp1 == NULL) && (circled == 0)) {
3011 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3013 } /* end while (tp1) */
3016 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3018 /* In case the fragments were not in order we must reset */
3019 } /* end for (j = fragStart */
3021 return (wake_him); /* Return value only used for nr-sack */
3026 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3027 uint32_t last_tsn, uint32_t * biggest_tsn_acked,
3028 uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack,
3029 int num_seg, int num_nr_seg, int *rto_ok)
3031 struct sctp_gap_ack_block *frag, block;
3032 struct sctp_tmit_chunk *tp1;
3037 uint16_t frag_strt, frag_end, prev_frag_end;
3039 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3043 for (i = 0; i < (num_seg + num_nr_seg); i++) {
3046 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3048 frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3049 sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
3050 *offset += sizeof(block);
3052 return (chunk_freed);
3054 frag_strt = ntohs(frag->start);
3055 frag_end = ntohs(frag->end);
3057 if (frag_strt > frag_end) {
3058 /* This gap report is malformed, skip it. */
3061 if (frag_strt <= prev_frag_end) {
3062 /* This gap report is not in order, so restart. */
3063 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3065 if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
3066 *biggest_tsn_acked = last_tsn + frag_end;
3073 if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3074 non_revocable, &num_frs, biggest_newly_acked_tsn,
3075 this_sack_lowest_newack, rto_ok)) {
3078 prev_frag_end = frag_end;
3080 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3082 sctp_log_fr(*biggest_tsn_acked,
3083 *biggest_newly_acked_tsn,
3084 last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3086 return (chunk_freed);
3090 sctp_check_for_revoked(struct sctp_tcb *stcb,
3091 struct sctp_association *asoc, uint32_t cumack,
3092 uint32_t biggest_tsn_acked)
3094 struct sctp_tmit_chunk *tp1;
3095 int tot_revoked = 0;
3097 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3098 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cumack)) {
3100 * ok this guy is either ACK or MARKED. If it is
3101 * ACKED it has been previously acked but not this
3102 * time i.e. revoked. If it is MARKED it was ACK'ed
3105 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked)) {
3108 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3109 /* it has been revoked */
3110 tp1->sent = SCTP_DATAGRAM_SENT;
3111 tp1->rec.data.chunk_was_revoked = 1;
3113 * We must add this stuff back in to assure
3114 * timers and such get started.
3116 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3117 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3118 tp1->whoTo->flight_size,
3120 (uintptr_t) tp1->whoTo,
3121 tp1->rec.data.TSN_seq);
3123 sctp_flight_size_increase(tp1);
3124 sctp_total_flight_increase(stcb, tp1);
3126 * We inflate the cwnd to compensate for our
3127 * artificial inflation of the flight_size.
3129 tp1->whoTo->cwnd += tp1->book_size;
3131 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3132 sctp_log_sack(asoc->last_acked_seq,
3134 tp1->rec.data.TSN_seq,
3137 SCTP_LOG_TSN_REVOKED);
3139 } else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3140 /* it has been re-acked in this SACK */
3141 tp1->sent = SCTP_DATAGRAM_ACKED;
3144 if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3151 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3152 uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3154 struct sctp_tmit_chunk *tp1;
3155 int strike_flag = 0;
3157 int tot_retrans = 0;
3158 uint32_t sending_seq;
3159 struct sctp_nets *net;
3160 int num_dests_sacked = 0;
3163 * select the sending_seq, this is either the next thing ready to be
3164 * sent but not transmitted, OR, the next seq we assign.
3166 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3168 sending_seq = asoc->sending_seq;
3170 sending_seq = tp1->rec.data.TSN_seq;
3173 /* CMT DAC algo: finding out if SACK is a mixed SACK */
3174 if ((asoc->sctp_cmt_on_off > 0) &&
3175 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3176 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3177 if (net->saw_newack)
3181 if (stcb->asoc.peer_supports_prsctp) {
3182 (void)SCTP_GETTIME_TIMEVAL(&now);
3184 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3186 if (tp1->no_fr_allowed) {
3187 /* this one had a timeout or something */
3190 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3191 if (tp1->sent < SCTP_DATAGRAM_RESEND)
3192 sctp_log_fr(biggest_tsn_newly_acked,
3193 tp1->rec.data.TSN_seq,
3195 SCTP_FR_LOG_CHECK_STRIKE);
3197 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked) ||
3198 tp1->sent == SCTP_DATAGRAM_UNSENT) {
3202 if (stcb->asoc.peer_supports_prsctp) {
3203 if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3204 /* Is it expired? */
3205 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3206 /* Yes so drop it */
3207 if (tp1->data != NULL) {
3208 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3209 SCTP_SO_NOT_LOCKED);
3215 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->this_sack_highest_gap)) {
3216 /* we are beyond the tsn in the sack */
3219 if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3220 /* either a RESEND, ACKED, or MARKED */
3222 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3223 /* Continue strikin FWD-TSN chunks */
3224 tp1->rec.data.fwd_tsn_cnt++;
3229 * CMT : SFR algo (covers part of DAC and HTNA as well)
3231 if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3233 * No new acks were receieved for data sent to this
3234 * dest. Therefore, according to the SFR algo for
3235 * CMT, no data sent to this dest can be marked for
3236 * FR using this SACK.
3239 } else if (tp1->whoTo && SCTP_TSN_GT(tp1->rec.data.TSN_seq,
3240 tp1->whoTo->this_sack_highest_newack)) {
3242 * CMT: New acks were receieved for data sent to
3243 * this dest. But no new acks were seen for data
3244 * sent after tp1. Therefore, according to the SFR
3245 * algo for CMT, tp1 cannot be marked for FR using
3246 * this SACK. This step covers part of the DAC algo
3247 * and the HTNA algo as well.
3252 * Here we check to see if we were have already done a FR
3253 * and if so we see if the biggest TSN we saw in the sack is
3254 * smaller than the recovery point. If so we don't strike
3255 * the tsn... otherwise we CAN strike the TSN.
3258 * @@@ JRI: Check for CMT if (accum_moved &&
3259 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3262 if (accum_moved && asoc->fast_retran_loss_recovery) {
3264 * Strike the TSN if in fast-recovery and cum-ack
3267 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3268 sctp_log_fr(biggest_tsn_newly_acked,
3269 tp1->rec.data.TSN_seq,
3271 SCTP_FR_LOG_STRIKE_CHUNK);
3273 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3276 if ((asoc->sctp_cmt_on_off > 0) &&
3277 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3279 * CMT DAC algorithm: If SACK flag is set to
3280 * 0, then lowest_newack test will not pass
3281 * because it would have been set to the
3282 * cumack earlier. If not already to be
3283 * rtx'd, If not a mixed sack and if tp1 is
3284 * not between two sacked TSNs, then mark by
3285 * one more. NOTE that we are marking by one
3286 * additional time since the SACK DAC flag
3287 * indicates that two packets have been
3288 * received after this missing TSN.
3290 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3291 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3292 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3293 sctp_log_fr(16 + num_dests_sacked,
3294 tp1->rec.data.TSN_seq,
3296 SCTP_FR_LOG_STRIKE_CHUNK);
3301 } else if ((tp1->rec.data.doing_fast_retransmit) &&
3302 (asoc->sctp_cmt_on_off == 0)) {
3304 * For those that have done a FR we must take
3305 * special consideration if we strike. I.e the
3306 * biggest_newly_acked must be higher than the
3307 * sending_seq at the time we did the FR.
3310 #ifdef SCTP_FR_TO_ALTERNATE
3312 * If FR's go to new networks, then we must only do
3313 * this for singly homed asoc's. However if the FR's
3314 * go to the same network (Armando's work) then its
3315 * ok to FR multiple times.
3323 if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3324 tp1->rec.data.fast_retran_tsn)) {
3326 * Strike the TSN, since this ack is
3327 * beyond where things were when we
3330 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3331 sctp_log_fr(biggest_tsn_newly_acked,
3332 tp1->rec.data.TSN_seq,
3334 SCTP_FR_LOG_STRIKE_CHUNK);
3336 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3340 if ((asoc->sctp_cmt_on_off > 0) &&
3341 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3343 * CMT DAC algorithm: If
3344 * SACK flag is set to 0,
3345 * then lowest_newack test
3346 * will not pass because it
3347 * would have been set to
3348 * the cumack earlier. If
3349 * not already to be rtx'd,
3350 * If not a mixed sack and
3351 * if tp1 is not between two
3352 * sacked TSNs, then mark by
3353 * one more. NOTE that we
3354 * are marking by one
3355 * additional time since the
3356 * SACK DAC flag indicates
3357 * that two packets have
3358 * been received after this
3361 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3362 (num_dests_sacked == 1) &&
3363 SCTP_TSN_GT(this_sack_lowest_newack,
3364 tp1->rec.data.TSN_seq)) {
3365 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3366 sctp_log_fr(32 + num_dests_sacked,
3367 tp1->rec.data.TSN_seq,
3369 SCTP_FR_LOG_STRIKE_CHUNK);
3371 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3379 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3382 } else if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
3383 biggest_tsn_newly_acked)) {
3385 * We don't strike these: This is the HTNA
3386 * algorithm i.e. we don't strike If our TSN is
3387 * larger than the Highest TSN Newly Acked.
3391 /* Strike the TSN */
3392 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3393 sctp_log_fr(biggest_tsn_newly_acked,
3394 tp1->rec.data.TSN_seq,
3396 SCTP_FR_LOG_STRIKE_CHUNK);
3398 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3401 if ((asoc->sctp_cmt_on_off > 0) &&
3402 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3404 * CMT DAC algorithm: If SACK flag is set to
3405 * 0, then lowest_newack test will not pass
3406 * because it would have been set to the
3407 * cumack earlier. If not already to be
3408 * rtx'd, If not a mixed sack and if tp1 is
3409 * not between two sacked TSNs, then mark by
3410 * one more. NOTE that we are marking by one
3411 * additional time since the SACK DAC flag
3412 * indicates that two packets have been
3413 * received after this missing TSN.
3415 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3416 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3417 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3418 sctp_log_fr(48 + num_dests_sacked,
3419 tp1->rec.data.TSN_seq,
3421 SCTP_FR_LOG_STRIKE_CHUNK);
3427 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3428 struct sctp_nets *alt;
3430 /* fix counts and things */
3431 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3432 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3433 (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3435 (uintptr_t) tp1->whoTo,
3436 tp1->rec.data.TSN_seq);
3439 tp1->whoTo->net_ack++;
3440 sctp_flight_size_decrease(tp1);
3441 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3442 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3446 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3447 sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3448 asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3450 /* add back to the rwnd */
3451 asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3453 /* remove from the total flight */
3454 sctp_total_flight_decrease(stcb, tp1);
3456 if ((stcb->asoc.peer_supports_prsctp) &&
3457 (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3459 * Has it been retransmitted tv_sec times? -
3460 * we store the retran count there.
3462 if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3463 /* Yes, so drop it */
3464 if (tp1->data != NULL) {
3465 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3466 SCTP_SO_NOT_LOCKED);
3468 /* Make sure to flag we had a FR */
3469 tp1->whoTo->net_ack++;
3474 * SCTP_PRINTF("OK, we are now ready to FR this
3477 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3478 sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count,
3482 /* This is a subsequent FR */
3483 SCTP_STAT_INCR(sctps_sendmultfastretrans);
3485 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3486 if (asoc->sctp_cmt_on_off > 0) {
3488 * CMT: Using RTX_SSTHRESH policy for CMT.
3489 * If CMT is being used, then pick dest with
3490 * largest ssthresh for any retransmission.
3492 tp1->no_fr_allowed = 1;
3494 /* sa_ignore NO_NULL_CHK */
3495 if (asoc->sctp_cmt_pf > 0) {
3497 * JRS 5/18/07 - If CMT PF is on,
3498 * use the PF version of
3501 alt = sctp_find_alternate_net(stcb, alt, 2);
3504 * JRS 5/18/07 - If only CMT is on,
3505 * use the CMT version of
3508 /* sa_ignore NO_NULL_CHK */
3509 alt = sctp_find_alternate_net(stcb, alt, 1);
3515 * CUCv2: If a different dest is picked for
3516 * the retransmission, then new
3517 * (rtx-)pseudo_cumack needs to be tracked
3518 * for orig dest. Let CUCv2 track new (rtx-)
3519 * pseudo-cumack always.
3522 tp1->whoTo->find_pseudo_cumack = 1;
3523 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3525 } else {/* CMT is OFF */
3527 #ifdef SCTP_FR_TO_ALTERNATE
3528 /* Can we find an alternate? */
3529 alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3532 * default behavior is to NOT retransmit
3533 * FR's to an alternate. Armando Caro's
3534 * paper details why.
3540 tp1->rec.data.doing_fast_retransmit = 1;
3542 /* mark the sending seq for possible subsequent FR's */
3544 * SCTP_PRINTF("Marking TSN for FR new value %x\n",
3545 * (uint32_t)tpi->rec.data.TSN_seq);
3547 if (TAILQ_EMPTY(&asoc->send_queue)) {
3549 * If the queue of send is empty then its
3550 * the next sequence number that will be
3551 * assigned so we subtract one from this to
3552 * get the one we last sent.
3554 tp1->rec.data.fast_retran_tsn = sending_seq;
3557 * If there are chunks on the send queue
3558 * (unsent data that has made it from the
3559 * stream queues but not out the door, we
3560 * take the first one (which will have the
3561 * lowest TSN) and subtract one to get the
3564 struct sctp_tmit_chunk *ttt;
3566 ttt = TAILQ_FIRST(&asoc->send_queue);
3567 tp1->rec.data.fast_retran_tsn =
3568 ttt->rec.data.TSN_seq;
3573 * this guy had a RTO calculation pending on
3576 if ((tp1->whoTo != NULL) &&
3577 (tp1->whoTo->rto_needed == 0)) {
3578 tp1->whoTo->rto_needed = 1;
3582 if (alt != tp1->whoTo) {
3583 /* yes, there is an alternate. */
3584 sctp_free_remote_addr(tp1->whoTo);
3585 /* sa_ignore FREED_MEMORY */
3587 atomic_add_int(&alt->ref_count, 1);
3593 struct sctp_tmit_chunk *
3594 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3595 struct sctp_association *asoc)
3597 struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3601 if (asoc->peer_supports_prsctp == 0) {
3604 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3605 if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3606 tp1->sent != SCTP_DATAGRAM_RESEND) {
3607 /* no chance to advance, out of here */
3610 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3611 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3612 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3613 asoc->advanced_peer_ack_point,
3614 tp1->rec.data.TSN_seq, 0, 0);
3617 if (!PR_SCTP_ENABLED(tp1->flags)) {
3619 * We can't fwd-tsn past any that are reliable aka
3620 * retransmitted until the asoc fails.
3625 (void)SCTP_GETTIME_TIMEVAL(&now);
3629 * now we got a chunk which is marked for another
3630 * retransmission to a PR-stream but has run out its chances
3631 * already maybe OR has been marked to skip now. Can we skip
3632 * it if its a resend?
3634 if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3635 (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3637 * Now is this one marked for resend and its time is
3640 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3641 /* Yes so drop it */
3643 (void)sctp_release_pr_sctp_chunk(stcb, tp1,
3644 1, SCTP_SO_NOT_LOCKED);
3648 * No, we are done when hit one for resend
3649 * whos time as not expired.
3655 * Ok now if this chunk is marked to drop it we can clean up
3656 * the chunk, advance our peer ack point and we can check
3659 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3660 /* advance PeerAckPoint goes forward */
3661 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->advanced_peer_ack_point)) {
3662 asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq;
3664 } else if (tp1->rec.data.TSN_seq == asoc->advanced_peer_ack_point) {
3665 /* No update but we do save the chk */
3670 * If it is still in RESEND we can advance no
3680 sctp_fs_audit(struct sctp_association *asoc)
3682 struct sctp_tmit_chunk *chk;
3683 int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3684 int entry_flight, entry_cnt, ret;
3686 entry_flight = asoc->total_flight;
3687 entry_cnt = asoc->total_flight_count;
3690 if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3693 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3694 if (chk->sent < SCTP_DATAGRAM_RESEND) {
3695 SCTP_PRINTF("Chk TSN:%u size:%d inflight cnt:%d\n",
3696 chk->rec.data.TSN_seq,
3700 } else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3702 } else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3704 } else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3711 if ((inflight > 0) || (inbetween > 0)) {
3713 panic("Flight size-express incorrect? \n");
3715 SCTP_PRINTF("asoc->total_flight:%d cnt:%d\n",
3716 entry_flight, entry_cnt);
3718 SCTP_PRINTF("Flight size-express incorrect F:%d I:%d R:%d Ab:%d ACK:%d\n",
3719 inflight, inbetween, resend, above, acked);
3728 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3729 struct sctp_association *asoc,
3730 struct sctp_tmit_chunk *tp1)
3732 tp1->window_probe = 0;
3733 if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3734 /* TSN's skipped we do NOT move back. */
3735 sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3736 tp1->whoTo->flight_size,
3738 (uintptr_t) tp1->whoTo,
3739 tp1->rec.data.TSN_seq);
3742 /* First setup this by shrinking flight */
3743 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3744 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3747 sctp_flight_size_decrease(tp1);
3748 sctp_total_flight_decrease(stcb, tp1);
3749 /* Now mark for resend */
3750 tp1->sent = SCTP_DATAGRAM_RESEND;
3751 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3753 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3754 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3755 tp1->whoTo->flight_size,
3757 (uintptr_t) tp1->whoTo,
3758 tp1->rec.data.TSN_seq);
3763 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3764 uint32_t rwnd, int *abort_now, int ecne_seen)
3766 struct sctp_nets *net;
3767 struct sctp_association *asoc;
3768 struct sctp_tmit_chunk *tp1, *tp2;
3770 int win_probe_recovery = 0;
3771 int win_probe_recovered = 0;
3772 int j, done_once = 0;
3775 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3776 sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3777 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3779 SCTP_TCB_LOCK_ASSERT(stcb);
3780 #ifdef SCTP_ASOCLOG_OF_TSNS
3781 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3782 stcb->asoc.cumack_log_at++;
3783 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3784 stcb->asoc.cumack_log_at = 0;
3788 old_rwnd = asoc->peers_rwnd;
3789 if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3792 } else if (asoc->last_acked_seq == cumack) {
3793 /* Window update sack */
3794 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3795 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3796 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3797 /* SWS sender side engages */
3798 asoc->peers_rwnd = 0;
3800 if (asoc->peers_rwnd > old_rwnd) {
3805 /* First setup for CC stuff */
3806 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3807 if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
3808 /* Drag along the window_tsn for cwr's */
3809 net->cwr_window_tsn = cumack;
3811 net->prev_cwnd = net->cwnd;
3816 * CMT: Reset CUC and Fast recovery algo variables before
3819 net->new_pseudo_cumack = 0;
3820 net->will_exit_fast_recovery = 0;
3821 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
3822 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
3825 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
3828 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3829 tp1 = TAILQ_LAST(&asoc->sent_queue,
3830 sctpchunk_listhead);
3831 send_s = tp1->rec.data.TSN_seq + 1;
3833 send_s = asoc->sending_seq;
3835 if (SCTP_TSN_GE(cumack, send_s)) {
3841 panic("Impossible sack 1");
3846 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
3847 0, M_DONTWAIT, 1, MT_DATA);
3849 struct sctp_paramhdr *ph;
3852 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
3854 ph = mtod(oper, struct sctp_paramhdr *);
3855 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
3856 ph->param_length = htons(SCTP_BUF_LEN(oper));
3857 ippp = (uint32_t *) (ph + 1);
3858 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
3860 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
3861 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
3866 asoc->this_sack_highest_gap = cumack;
3867 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
3868 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
3869 stcb->asoc.overall_error_count,
3871 SCTP_FROM_SCTP_INDATA,
3874 stcb->asoc.overall_error_count = 0;
3875 if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
3876 /* process the new consecutive TSN first */
3877 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3878 if (SCTP_TSN_GE(cumack, tp1->rec.data.TSN_seq)) {
3879 if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
3880 SCTP_PRINTF("Warning, an unsent is now acked?\n");
3882 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
3884 * If it is less than ACKED, it is
3885 * now no-longer in flight. Higher
3886 * values may occur during marking
3888 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3889 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3890 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
3891 tp1->whoTo->flight_size,
3893 (uintptr_t) tp1->whoTo,
3894 tp1->rec.data.TSN_seq);
3896 sctp_flight_size_decrease(tp1);
3897 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3898 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3901 /* sa_ignore NO_NULL_CHK */
3902 sctp_total_flight_decrease(stcb, tp1);
3904 tp1->whoTo->net_ack += tp1->send_size;
3905 if (tp1->snd_count < 2) {
3907 * True non-retransmited
3910 tp1->whoTo->net_ack2 +=
3913 /* update RTO too? */
3922 sctp_calculate_rto(stcb,
3924 &tp1->sent_rcv_time,
3925 sctp_align_safe_nocopy,
3926 SCTP_RTT_FROM_DATA);
3929 if (tp1->whoTo->rto_needed == 0) {
3930 tp1->whoTo->rto_needed = 1;
3936 * CMT: CUCv2 algorithm. From the
3937 * cumack'd TSNs, for each TSN being
3938 * acked for the first time, set the
3939 * following variables for the
3940 * corresp destination.
3941 * new_pseudo_cumack will trigger a
3943 * find_(rtx_)pseudo_cumack will
3944 * trigger search for the next
3945 * expected (rtx-)pseudo-cumack.
3947 tp1->whoTo->new_pseudo_cumack = 1;
3948 tp1->whoTo->find_pseudo_cumack = 1;
3949 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3951 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3952 /* sa_ignore NO_NULL_CHK */
3953 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
3956 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3957 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
3959 if (tp1->rec.data.chunk_was_revoked) {
3960 /* deflate the cwnd */
3961 tp1->whoTo->cwnd -= tp1->book_size;
3962 tp1->rec.data.chunk_was_revoked = 0;
3964 tp1->sent = SCTP_DATAGRAM_ACKED;
3965 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
3967 /* sa_ignore NO_NULL_CHK */
3968 sctp_free_bufspace(stcb, asoc, tp1, 1);
3969 sctp_m_freem(tp1->data);
3972 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3973 sctp_log_sack(asoc->last_acked_seq,
3975 tp1->rec.data.TSN_seq,
3978 SCTP_LOG_FREE_SENT);
3980 asoc->sent_queue_cnt--;
3981 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
3988 /* sa_ignore NO_NULL_CHK */
3989 if (stcb->sctp_socket) {
3990 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3994 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
3995 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
3996 /* sa_ignore NO_NULL_CHK */
3997 sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
3999 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4000 so = SCTP_INP_SO(stcb->sctp_ep);
4001 atomic_add_int(&stcb->asoc.refcnt, 1);
4002 SCTP_TCB_UNLOCK(stcb);
4003 SCTP_SOCKET_LOCK(so, 1);
4004 SCTP_TCB_LOCK(stcb);
4005 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4006 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4007 /* assoc was freed while we were unlocked */
4008 SCTP_SOCKET_UNLOCK(so, 1);
4012 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4013 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4014 SCTP_SOCKET_UNLOCK(so, 1);
4017 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4018 sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
4022 /* JRS - Use the congestion control given in the CC module */
4023 if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
4024 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4025 if (net->net_ack2 > 0) {
4027 * Karn's rule applies to clearing error
4028 * count, this is optional.
4030 net->error_count = 0;
4031 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4032 /* addr came good */
4033 net->dest_state |= SCTP_ADDR_REACHABLE;
4034 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4035 0, (void *)net, SCTP_SO_NOT_LOCKED);
4037 if (net == stcb->asoc.primary_destination) {
4038 if (stcb->asoc.alternate) {
4040 * release the alternate,
4043 sctp_free_remote_addr(stcb->asoc.alternate);
4044 stcb->asoc.alternate = NULL;
4047 if (net->dest_state & SCTP_ADDR_PF) {
4048 net->dest_state &= ~SCTP_ADDR_PF;
4049 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_3);
4050 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4051 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4052 /* Done with this net */
4055 /* restore any doubled timers */
4056 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4057 if (net->RTO < stcb->asoc.minrto) {
4058 net->RTO = stcb->asoc.minrto;
4060 if (net->RTO > stcb->asoc.maxrto) {
4061 net->RTO = stcb->asoc.maxrto;
4065 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4067 asoc->last_acked_seq = cumack;
4069 if (TAILQ_EMPTY(&asoc->sent_queue)) {
4070 /* nothing left in-flight */
4071 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4072 net->flight_size = 0;
4073 net->partial_bytes_acked = 0;
4075 asoc->total_flight = 0;
4076 asoc->total_flight_count = 0;
4079 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4080 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4081 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4082 /* SWS sender side engages */
4083 asoc->peers_rwnd = 0;
4085 if (asoc->peers_rwnd > old_rwnd) {
4086 win_probe_recovery = 1;
4088 /* Now assure a timer where data is queued at */
4091 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4094 if (win_probe_recovery && (net->window_probe)) {
4095 win_probe_recovered = 1;
4097 * Find first chunk that was used with window probe
4098 * and clear the sent
4100 /* sa_ignore FREED_MEMORY */
4101 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4102 if (tp1->window_probe) {
4103 /* move back to data send queue */
4104 sctp_window_probe_recovery(stcb, asoc, tp1);
4109 if (net->RTO == 0) {
4110 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
4112 to_ticks = MSEC_TO_TICKS(net->RTO);
4114 if (net->flight_size) {
4116 (void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4117 sctp_timeout_handler, &net->rxt_timer);
4118 if (net->window_probe) {
4119 net->window_probe = 0;
4122 if (net->window_probe) {
4124 * In window probes we must assure a timer
4125 * is still running there
4127 net->window_probe = 0;
4128 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4129 SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4130 sctp_timeout_handler, &net->rxt_timer);
4132 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4133 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4135 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4140 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4141 (asoc->sent_queue_retran_cnt == 0) &&
4142 (win_probe_recovered == 0) &&
4145 * huh, this should not happen unless all packets are
4146 * PR-SCTP and marked to skip of course.
4148 if (sctp_fs_audit(asoc)) {
4149 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4150 net->flight_size = 0;
4152 asoc->total_flight = 0;
4153 asoc->total_flight_count = 0;
4154 asoc->sent_queue_retran_cnt = 0;
4155 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4156 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4157 sctp_flight_size_increase(tp1);
4158 sctp_total_flight_increase(stcb, tp1);
4159 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4160 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4167 /**********************************/
4168 /* Now what about shutdown issues */
4169 /**********************************/
4170 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4171 /* nothing left on sendqueue.. consider done */
4173 if ((asoc->stream_queue_cnt == 1) &&
4174 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4175 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4176 (asoc->locked_on_sending)
4178 struct sctp_stream_queue_pending *sp;
4181 * I may be in a state where we got all across.. but
4182 * cannot write more due to a shutdown... we abort
4183 * since the user did not indicate EOR in this case.
4184 * The sp will be cleaned during free of the asoc.
4186 sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4188 if ((sp) && (sp->length == 0)) {
4189 /* Let cleanup code purge it */
4190 if (sp->msg_is_complete) {
4191 asoc->stream_queue_cnt--;
4193 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4194 asoc->locked_on_sending = NULL;
4195 asoc->stream_queue_cnt--;
4199 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4200 (asoc->stream_queue_cnt == 0)) {
4201 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4202 /* Need to abort here */
4208 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4209 0, M_DONTWAIT, 1, MT_DATA);
4211 struct sctp_paramhdr *ph;
4214 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4216 ph = mtod(oper, struct sctp_paramhdr *);
4217 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
4218 ph->param_length = htons(SCTP_BUF_LEN(oper));
4219 ippp = (uint32_t *) (ph + 1);
4220 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_24);
4222 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4223 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
4225 struct sctp_nets *netp;
4227 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4228 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4229 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4231 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4232 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4233 sctp_stop_timers_for_shutdown(stcb);
4234 if (asoc->alternate) {
4235 netp = asoc->alternate;
4237 netp = asoc->primary_destination;
4239 sctp_send_shutdown(stcb, netp);
4240 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4241 stcb->sctp_ep, stcb, netp);
4242 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4243 stcb->sctp_ep, stcb, netp);
4245 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4246 (asoc->stream_queue_cnt == 0)) {
4247 struct sctp_nets *netp;
4249 if (asoc->alternate) {
4250 netp = asoc->alternate;
4252 netp = asoc->primary_destination;
4254 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4257 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4258 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4259 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4260 sctp_send_shutdown_ack(stcb, netp);
4261 sctp_stop_timers_for_shutdown(stcb);
4262 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4263 stcb->sctp_ep, stcb, netp);
4266 /*********************************************/
4267 /* Here we perform PR-SCTP procedures */
4269 /*********************************************/
4270 /* C1. update advancedPeerAckPoint */
4271 if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4272 asoc->advanced_peer_ack_point = cumack;
4274 /* PR-Sctp issues need to be addressed too */
4275 if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
4276 struct sctp_tmit_chunk *lchk;
4277 uint32_t old_adv_peer_ack_point;
4279 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4280 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4281 /* C3. See if we need to send a Fwd-TSN */
4282 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4284 * ISSUE with ECN, see FWD-TSN processing.
4286 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4287 send_forward_tsn(stcb, asoc);
4289 /* try to FR fwd-tsn's that get lost too */
4290 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4291 send_forward_tsn(stcb, asoc);
4296 /* Assure a timer is up */
4297 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4298 stcb->sctp_ep, stcb, lchk->whoTo);
4301 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4302 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4304 stcb->asoc.peers_rwnd,
4305 stcb->asoc.total_flight,
4306 stcb->asoc.total_output_queue_size);
4311 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4312 struct sctp_tcb *stcb,
4313 uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4314 int *abort_now, uint8_t flags,
4315 uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4317 struct sctp_association *asoc;
4318 struct sctp_tmit_chunk *tp1, *tp2;
4319 uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4320 uint16_t wake_him = 0;
4321 uint32_t send_s = 0;
4323 int accum_moved = 0;
4324 int will_exit_fast_recovery = 0;
4325 uint32_t a_rwnd, old_rwnd;
4326 int win_probe_recovery = 0;
4327 int win_probe_recovered = 0;
4328 struct sctp_nets *net = NULL;
4331 uint8_t reneged_all = 0;
4332 uint8_t cmt_dac_flag;
4335 * we take any chance we can to service our queues since we cannot
4336 * get awoken when the socket is read from :<
4339 * Now perform the actual SACK handling: 1) Verify that it is not an
4340 * old sack, if so discard. 2) If there is nothing left in the send
4341 * queue (cum-ack is equal to last acked) then you have a duplicate
4342 * too, update any rwnd change and verify no timers are running.
4343 * then return. 3) Process any new consequtive data i.e. cum-ack
4344 * moved process these first and note that it moved. 4) Process any
4345 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4346 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4347 * sync up flightsizes and things, stop all timers and also check
4348 * for shutdown_pending state. If so then go ahead and send off the
4349 * shutdown. If in shutdown recv, send off the shutdown-ack and
4350 * start that timer, Ret. 9) Strike any non-acked things and do FR
4351 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4352 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4353 * if in shutdown_recv state.
4355 SCTP_TCB_LOCK_ASSERT(stcb);
4357 this_sack_lowest_newack = 0;
4358 SCTP_STAT_INCR(sctps_slowpath_sack);
4360 cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4361 #ifdef SCTP_ASOCLOG_OF_TSNS
4362 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4363 stcb->asoc.cumack_log_at++;
4364 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4365 stcb->asoc.cumack_log_at = 0;
4370 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4371 sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4372 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4374 old_rwnd = stcb->asoc.peers_rwnd;
4375 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4376 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4377 stcb->asoc.overall_error_count,
4379 SCTP_FROM_SCTP_INDATA,
4382 stcb->asoc.overall_error_count = 0;
4384 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4385 sctp_log_sack(asoc->last_acked_seq,
4392 if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4394 uint32_t *dupdata, dblock;
4396 for (i = 0; i < num_dup; i++) {
4397 dupdata = (uint32_t *) sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4398 sizeof(uint32_t), (uint8_t *) & dblock);
4399 if (dupdata == NULL) {
4402 sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4405 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4407 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4408 tp1 = TAILQ_LAST(&asoc->sent_queue,
4409 sctpchunk_listhead);
4410 send_s = tp1->rec.data.TSN_seq + 1;
4413 send_s = asoc->sending_seq;
4415 if (SCTP_TSN_GE(cum_ack, send_s)) {
4419 * no way, we have not even sent this TSN out yet.
4420 * Peer is hopelessly messed up with us.
4422 SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4425 SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1:%p\n",
4426 tp1->rec.data.TSN_seq, tp1);
4431 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4432 0, M_DONTWAIT, 1, MT_DATA);
4434 struct sctp_paramhdr *ph;
4437 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4439 ph = mtod(oper, struct sctp_paramhdr *);
4440 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
4441 ph->param_length = htons(SCTP_BUF_LEN(oper));
4442 ippp = (uint32_t *) (ph + 1);
4443 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
4445 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4446 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
4450 /**********************/
4451 /* 1) check the range */
4452 /**********************/
4453 if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4454 /* acking something behind */
4457 /* update the Rwnd of the peer */
4458 if (TAILQ_EMPTY(&asoc->sent_queue) &&
4459 TAILQ_EMPTY(&asoc->send_queue) &&
4460 (asoc->stream_queue_cnt == 0)) {
4461 /* nothing left on send/sent and strmq */
4462 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4463 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4464 asoc->peers_rwnd, 0, 0, a_rwnd);
4466 asoc->peers_rwnd = a_rwnd;
4467 if (asoc->sent_queue_retran_cnt) {
4468 asoc->sent_queue_retran_cnt = 0;
4470 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4471 /* SWS sender side engages */
4472 asoc->peers_rwnd = 0;
4474 /* stop any timers */
4475 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4476 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4477 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4478 net->partial_bytes_acked = 0;
4479 net->flight_size = 0;
4481 asoc->total_flight = 0;
4482 asoc->total_flight_count = 0;
4486 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4487 * things. The total byte count acked is tracked in netAckSz AND
4488 * netAck2 is used to track the total bytes acked that are un-
4489 * amibguious and were never retransmitted. We track these on a per
4490 * destination address basis.
4492 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4493 if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4494 /* Drag along the window_tsn for cwr's */
4495 net->cwr_window_tsn = cum_ack;
4497 net->prev_cwnd = net->cwnd;
4502 * CMT: Reset CUC and Fast recovery algo variables before
4505 net->new_pseudo_cumack = 0;
4506 net->will_exit_fast_recovery = 0;
4507 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4508 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4511 /* process the new consecutive TSN first */
4512 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4513 if (SCTP_TSN_GE(last_tsn, tp1->rec.data.TSN_seq)) {
4514 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4516 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4518 * If it is less than ACKED, it is
4519 * now no-longer in flight. Higher
4520 * values may occur during marking
4522 if ((tp1->whoTo->dest_state &
4523 SCTP_ADDR_UNCONFIRMED) &&
4524 (tp1->snd_count < 2)) {
4526 * If there was no retran
4527 * and the address is
4528 * un-confirmed and we sent
4530 * sacked.. its confirmed,
4533 tp1->whoTo->dest_state &=
4534 ~SCTP_ADDR_UNCONFIRMED;
4536 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4537 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4538 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4539 tp1->whoTo->flight_size,
4541 (uintptr_t) tp1->whoTo,
4542 tp1->rec.data.TSN_seq);
4544 sctp_flight_size_decrease(tp1);
4545 sctp_total_flight_decrease(stcb, tp1);
4546 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4547 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4551 tp1->whoTo->net_ack += tp1->send_size;
4553 /* CMT SFR and DAC algos */
4554 this_sack_lowest_newack = tp1->rec.data.TSN_seq;
4555 tp1->whoTo->saw_newack = 1;
4557 if (tp1->snd_count < 2) {
4559 * True non-retransmited
4562 tp1->whoTo->net_ack2 +=
4565 /* update RTO too? */
4569 sctp_calculate_rto(stcb,
4571 &tp1->sent_rcv_time,
4572 sctp_align_safe_nocopy,
4573 SCTP_RTT_FROM_DATA);
4576 if (tp1->whoTo->rto_needed == 0) {
4577 tp1->whoTo->rto_needed = 1;
4583 * CMT: CUCv2 algorithm. From the
4584 * cumack'd TSNs, for each TSN being
4585 * acked for the first time, set the
4586 * following variables for the
4587 * corresp destination.
4588 * new_pseudo_cumack will trigger a
4590 * find_(rtx_)pseudo_cumack will
4591 * trigger search for the next
4592 * expected (rtx-)pseudo-cumack.
4594 tp1->whoTo->new_pseudo_cumack = 1;
4595 tp1->whoTo->find_pseudo_cumack = 1;
4596 tp1->whoTo->find_rtx_pseudo_cumack = 1;
4599 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4600 sctp_log_sack(asoc->last_acked_seq,
4602 tp1->rec.data.TSN_seq,
4605 SCTP_LOG_TSN_ACKED);
4607 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4608 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
4611 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4612 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4613 #ifdef SCTP_AUDITING_ENABLED
4614 sctp_audit_log(0xB3,
4615 (asoc->sent_queue_retran_cnt & 0x000000ff));
4618 if (tp1->rec.data.chunk_was_revoked) {
4619 /* deflate the cwnd */
4620 tp1->whoTo->cwnd -= tp1->book_size;
4621 tp1->rec.data.chunk_was_revoked = 0;
4623 tp1->sent = SCTP_DATAGRAM_ACKED;
4629 biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4630 /* always set this up to cum-ack */
4631 asoc->this_sack_highest_gap = last_tsn;
4633 if ((num_seg > 0) || (num_nr_seg > 0)) {
4636 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4637 * to be greater than the cumack. Also reset saw_newack to 0
4640 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4641 net->saw_newack = 0;
4642 net->this_sack_highest_newack = last_tsn;
4646 * thisSackHighestGap will increase while handling NEW
4647 * segments this_sack_highest_newack will increase while
4648 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4649 * used for CMT DAC algo. saw_newack will also change.
4651 if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4652 &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4653 num_seg, num_nr_seg, &rto_ok)) {
4656 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4658 * validate the biggest_tsn_acked in the gap acks if
4659 * strict adherence is wanted.
4661 if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4663 * peer is either confused or we are under
4664 * attack. We must abort.
4666 SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4667 biggest_tsn_acked, send_s);
4672 /*******************************************/
4673 /* cancel ALL T3-send timer if accum moved */
4674 /*******************************************/
4675 if (asoc->sctp_cmt_on_off > 0) {
4676 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4677 if (net->new_pseudo_cumack)
4678 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4680 SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4685 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4686 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4687 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
4691 /********************************************/
4692 /* drop the acked chunks from the sentqueue */
4693 /********************************************/
4694 asoc->last_acked_seq = cum_ack;
4696 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4697 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cum_ack)) {
4700 if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
4701 /* no more sent on list */
4702 SCTP_PRINTF("Warning, tp1->sent == %d and its now acked?\n",
4705 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4706 if (tp1->pr_sctp_on) {
4707 if (asoc->pr_sctp_cnt != 0)
4708 asoc->pr_sctp_cnt--;
4710 asoc->sent_queue_cnt--;
4712 /* sa_ignore NO_NULL_CHK */
4713 sctp_free_bufspace(stcb, asoc, tp1, 1);
4714 sctp_m_freem(tp1->data);
4716 if (asoc->peer_supports_prsctp && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4717 asoc->sent_queue_cnt_removeable--;
4720 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4721 sctp_log_sack(asoc->last_acked_seq,
4723 tp1->rec.data.TSN_seq,
4726 SCTP_LOG_FREE_SENT);
4728 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4731 if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4733 panic("Warning flight size is postive and should be 0");
4735 SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4736 asoc->total_flight);
4738 asoc->total_flight = 0;
4740 /* sa_ignore NO_NULL_CHK */
4741 if ((wake_him) && (stcb->sctp_socket)) {
4742 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4746 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4747 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4748 sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
4750 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4751 so = SCTP_INP_SO(stcb->sctp_ep);
4752 atomic_add_int(&stcb->asoc.refcnt, 1);
4753 SCTP_TCB_UNLOCK(stcb);
4754 SCTP_SOCKET_LOCK(so, 1);
4755 SCTP_TCB_LOCK(stcb);
4756 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4757 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4758 /* assoc was freed while we were unlocked */
4759 SCTP_SOCKET_UNLOCK(so, 1);
4763 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4764 #if defined (__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4765 SCTP_SOCKET_UNLOCK(so, 1);
4768 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4769 sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
4773 if (asoc->fast_retran_loss_recovery && accum_moved) {
4774 if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4775 /* Setup so we will exit RFC2582 fast recovery */
4776 will_exit_fast_recovery = 1;
4780 * Check for revoked fragments:
4782 * if Previous sack - Had no frags then we can't have any revoked if
4783 * Previous sack - Had frag's then - If we now have frags aka
4784 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4785 * some of them. else - The peer revoked all ACKED fragments, since
4786 * we had some before and now we have NONE.
4790 sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4791 asoc->saw_sack_with_frags = 1;
4792 } else if (asoc->saw_sack_with_frags) {
4793 int cnt_revoked = 0;
4795 /* Peer revoked all dg's marked or acked */
4796 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4797 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4798 tp1->sent = SCTP_DATAGRAM_SENT;
4799 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4800 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4801 tp1->whoTo->flight_size,
4803 (uintptr_t) tp1->whoTo,
4804 tp1->rec.data.TSN_seq);
4806 sctp_flight_size_increase(tp1);
4807 sctp_total_flight_increase(stcb, tp1);
4808 tp1->rec.data.chunk_was_revoked = 1;
4810 * To ensure that this increase in
4811 * flightsize, which is artificial, does not
4812 * throttle the sender, we also increase the
4813 * cwnd artificially.
4815 tp1->whoTo->cwnd += tp1->book_size;
4822 asoc->saw_sack_with_frags = 0;
4825 asoc->saw_sack_with_nr_frags = 1;
4827 asoc->saw_sack_with_nr_frags = 0;
4829 /* JRS - Use the congestion control given in the CC module */
4830 if (ecne_seen == 0) {
4831 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4832 if (net->net_ack2 > 0) {
4834 * Karn's rule applies to clearing error
4835 * count, this is optional.
4837 net->error_count = 0;
4838 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4839 /* addr came good */
4840 net->dest_state |= SCTP_ADDR_REACHABLE;
4841 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4842 0, (void *)net, SCTP_SO_NOT_LOCKED);
4844 if (net == stcb->asoc.primary_destination) {
4845 if (stcb->asoc.alternate) {
4847 * release the alternate,
4850 sctp_free_remote_addr(stcb->asoc.alternate);
4851 stcb->asoc.alternate = NULL;
4854 if (net->dest_state & SCTP_ADDR_PF) {
4855 net->dest_state &= ~SCTP_ADDR_PF;
4856 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_3);
4857 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4858 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4859 /* Done with this net */
4862 /* restore any doubled timers */
4863 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4864 if (net->RTO < stcb->asoc.minrto) {
4865 net->RTO = stcb->asoc.minrto;
4867 if (net->RTO > stcb->asoc.maxrto) {
4868 net->RTO = stcb->asoc.maxrto;
4872 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4874 if (TAILQ_EMPTY(&asoc->sent_queue)) {
4875 /* nothing left in-flight */
4876 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4877 /* stop all timers */
4878 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4879 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4880 net->flight_size = 0;
4881 net->partial_bytes_acked = 0;
4883 asoc->total_flight = 0;
4884 asoc->total_flight_count = 0;
4886 /**********************************/
4887 /* Now what about shutdown issues */
4888 /**********************************/
4889 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4890 /* nothing left on sendqueue.. consider done */
4891 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4892 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4893 asoc->peers_rwnd, 0, 0, a_rwnd);
4895 asoc->peers_rwnd = a_rwnd;
4896 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4897 /* SWS sender side engages */
4898 asoc->peers_rwnd = 0;
4901 if ((asoc->stream_queue_cnt == 1) &&
4902 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4903 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4904 (asoc->locked_on_sending)
4906 struct sctp_stream_queue_pending *sp;
4909 * I may be in a state where we got all across.. but
4910 * cannot write more due to a shutdown... we abort
4911 * since the user did not indicate EOR in this case.
4913 sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4915 if ((sp) && (sp->length == 0)) {
4916 asoc->locked_on_sending = NULL;
4917 if (sp->msg_is_complete) {
4918 asoc->stream_queue_cnt--;
4920 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4921 asoc->stream_queue_cnt--;
4925 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4926 (asoc->stream_queue_cnt == 0)) {
4927 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4928 /* Need to abort here */
4934 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4935 0, M_DONTWAIT, 1, MT_DATA);
4937 struct sctp_paramhdr *ph;
4940 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4942 ph = mtod(oper, struct sctp_paramhdr *);
4943 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
4944 ph->param_length = htons(SCTP_BUF_LEN(oper));
4945 ippp = (uint32_t *) (ph + 1);
4946 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_31);
4948 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31;
4949 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
4952 struct sctp_nets *netp;
4954 if (asoc->alternate) {
4955 netp = asoc->alternate;
4957 netp = asoc->primary_destination;
4959 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4960 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4961 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4963 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4964 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4965 sctp_stop_timers_for_shutdown(stcb);
4966 sctp_send_shutdown(stcb, netp);
4967 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4968 stcb->sctp_ep, stcb, netp);
4969 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4970 stcb->sctp_ep, stcb, netp);
4973 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4974 (asoc->stream_queue_cnt == 0)) {
4975 struct sctp_nets *netp;
4977 if (asoc->alternate) {
4978 netp = asoc->alternate;
4980 netp = asoc->primary_destination;
4982 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4985 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4986 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4987 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4988 sctp_send_shutdown_ack(stcb, netp);
4989 sctp_stop_timers_for_shutdown(stcb);
4990 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4991 stcb->sctp_ep, stcb, netp);
4996 * Now here we are going to recycle net_ack for a different use...
4999 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5004 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
5005 * to be done. Setting this_sack_lowest_newack to the cum_ack will
5006 * automatically ensure that.
5008 if ((asoc->sctp_cmt_on_off > 0) &&
5009 SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
5010 (cmt_dac_flag == 0)) {
5011 this_sack_lowest_newack = cum_ack;
5013 if ((num_seg > 0) || (num_nr_seg > 0)) {
5014 sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
5015 biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
5017 /* JRS - Use the congestion control given in the CC module */
5018 asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
5020 /* Now are we exiting loss recovery ? */
5021 if (will_exit_fast_recovery) {
5022 /* Ok, we must exit fast recovery */
5023 asoc->fast_retran_loss_recovery = 0;
5025 if ((asoc->sat_t3_loss_recovery) &&
5026 SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
5027 /* end satellite t3 loss recovery */
5028 asoc->sat_t3_loss_recovery = 0;
5033 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5034 if (net->will_exit_fast_recovery) {
5035 /* Ok, we must exit fast recovery */
5036 net->fast_retran_loss_recovery = 0;
5040 /* Adjust and set the new rwnd value */
5041 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5042 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5043 asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
5045 asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5046 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
5047 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5048 /* SWS sender side engages */
5049 asoc->peers_rwnd = 0;
5051 if (asoc->peers_rwnd > old_rwnd) {
5052 win_probe_recovery = 1;
5055 * Now we must setup so we have a timer up for anyone with
5061 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5062 if (win_probe_recovery && (net->window_probe)) {
5063 win_probe_recovered = 1;
5065 * Find first chunk that was used with
5066 * window probe and clear the event. Put
5067 * it back into the send queue as if has
5070 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5071 if (tp1->window_probe) {
5072 sctp_window_probe_recovery(stcb, asoc, tp1);
5077 if (net->flight_size) {
5079 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5080 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5081 stcb->sctp_ep, stcb, net);
5083 if (net->window_probe) {
5084 net->window_probe = 0;
5087 if (net->window_probe) {
5089 * In window probes we must assure a timer
5090 * is still running there
5092 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5093 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5094 stcb->sctp_ep, stcb, net);
5097 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5098 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5100 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
5105 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5106 (asoc->sent_queue_retran_cnt == 0) &&
5107 (win_probe_recovered == 0) &&
5110 * huh, this should not happen unless all packets are
5111 * PR-SCTP and marked to skip of course.
5113 if (sctp_fs_audit(asoc)) {
5114 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5115 net->flight_size = 0;
5117 asoc->total_flight = 0;
5118 asoc->total_flight_count = 0;
5119 asoc->sent_queue_retran_cnt = 0;
5120 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5121 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5122 sctp_flight_size_increase(tp1);
5123 sctp_total_flight_increase(stcb, tp1);
5124 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5125 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
5132 /*********************************************/
5133 /* Here we perform PR-SCTP procedures */
5135 /*********************************************/
5136 /* C1. update advancedPeerAckPoint */
5137 if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
5138 asoc->advanced_peer_ack_point = cum_ack;
5140 /* C2. try to further move advancedPeerAckPoint ahead */
5141 if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
5142 struct sctp_tmit_chunk *lchk;
5143 uint32_t old_adv_peer_ack_point;
5145 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5146 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5147 /* C3. See if we need to send a Fwd-TSN */
5148 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
5150 * ISSUE with ECN, see FWD-TSN processing.
5152 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5153 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5154 0xee, cum_ack, asoc->advanced_peer_ack_point,
5155 old_adv_peer_ack_point);
5157 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
5158 send_forward_tsn(stcb, asoc);
5160 /* try to FR fwd-tsn's that get lost too */
5161 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
5162 send_forward_tsn(stcb, asoc);
5167 /* Assure a timer is up */
5168 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5169 stcb->sctp_ep, stcb, lchk->whoTo);
5172 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5173 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5175 stcb->asoc.peers_rwnd,
5176 stcb->asoc.total_flight,
5177 stcb->asoc.total_output_queue_size);
5182 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
5185 uint32_t cum_ack, a_rwnd;
5187 cum_ack = ntohl(cp->cumulative_tsn_ack);
5188 /* Arrange so a_rwnd does NOT change */
5189 a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5191 /* Now call the express sack handling */
5192 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
5196 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5197 struct sctp_stream_in *strmin)
5199 struct sctp_queued_to_read *ctl, *nctl;
5200 struct sctp_association *asoc;
5204 tt = strmin->last_sequence_delivered;
5206 * First deliver anything prior to and including the stream no that
5209 TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next, nctl) {
5210 if (SCTP_SSN_GE(tt, ctl->sinfo_ssn)) {
5211 /* this is deliverable now */
5212 TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5213 /* subtract pending on streams */
5214 asoc->size_on_all_streams -= ctl->length;
5215 sctp_ucount_decr(asoc->cnt_on_all_streams);
5216 /* deliver it to at least the delivery-q */
5217 if (stcb->sctp_socket) {
5218 sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5219 sctp_add_to_readq(stcb->sctp_ep, stcb,
5221 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5224 /* no more delivery now. */
5229 * now we must deliver things in queue the normal way if any are
5232 tt = strmin->last_sequence_delivered + 1;
5233 TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next, nctl) {
5234 if (tt == ctl->sinfo_ssn) {
5235 /* this is deliverable now */
5236 TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5237 /* subtract pending on streams */
5238 asoc->size_on_all_streams -= ctl->length;
5239 sctp_ucount_decr(asoc->cnt_on_all_streams);
5240 /* deliver it to at least the delivery-q */
5241 strmin->last_sequence_delivered = ctl->sinfo_ssn;
5242 if (stcb->sctp_socket) {
5243 sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5244 sctp_add_to_readq(stcb->sctp_ep, stcb,
5246 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5249 tt = strmin->last_sequence_delivered + 1;
5257 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5258 struct sctp_association *asoc,
5259 uint16_t stream, uint16_t seq)
5261 struct sctp_tmit_chunk *chk, *nchk;
5263 /* For each one on here see if we need to toss it */
5265 * For now large messages held on the reasmqueue that are complete
5266 * will be tossed too. We could in theory do more work to spin
5267 * through and stop after dumping one msg aka seeing the start of a
5268 * new msg at the head, and call the delivery function... to see if
5269 * it can be delivered... But for now we just dump everything on the
5272 TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
5274 * Do not toss it if on a different stream or marked for
5275 * unordered delivery in which case the stream sequence
5276 * number has no meaning.
5278 if ((chk->rec.data.stream_number != stream) ||
5279 ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == SCTP_DATA_UNORDERED)) {
5282 if (chk->rec.data.stream_seq == seq) {
5283 /* It needs to be tossed */
5284 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5285 if (SCTP_TSN_GT(chk->rec.data.TSN_seq, asoc->tsn_last_delivered)) {
5286 asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
5287 asoc->str_of_pdapi = chk->rec.data.stream_number;
5288 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
5289 asoc->fragment_flags = chk->rec.data.rcv_flags;
5291 asoc->size_on_reasm_queue -= chk->send_size;
5292 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5294 /* Clear up any stream problem */
5295 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != SCTP_DATA_UNORDERED &&
5296 SCTP_SSN_GT(chk->rec.data.stream_seq, asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered)) {
5298 * We must dump forward this streams
5299 * sequence number if the chunk is not
5300 * unordered that is being skipped. There is
5301 * a chance that if the peer does not
5302 * include the last fragment in its FWD-TSN
5303 * we WILL have a problem here since you
5304 * would have a partial chunk in queue that
5305 * may not be deliverable. Also if a Partial
5306 * delivery API as started the user may get
5307 * a partial chunk. The next read returning
5308 * a new chunk... really ugly but I see no
5309 * way around it! Maybe a notify??
5311 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = chk->rec.data.stream_seq;
5314 sctp_m_freem(chk->data);
5317 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5318 } else if (SCTP_SSN_GT(chk->rec.data.stream_seq, seq)) {
5320 * If the stream_seq is > than the purging one, we
5330 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5331 struct sctp_forward_tsn_chunk *fwd,
5332 int *abort_flag, struct mbuf *m, int offset)
5334 /* The pr-sctp fwd tsn */
5336 * here we will perform all the data receiver side steps for
5337 * processing FwdTSN, as required in by pr-sctp draft:
5339 * Assume we get FwdTSN(x):
5341 * 1) update local cumTSN to x 2) try to further advance cumTSN to x +
5342 * others we have 3) examine and update re-ordering queue on
5343 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5344 * report where we are.
5346 struct sctp_association *asoc;
5347 uint32_t new_cum_tsn, gap;
5348 unsigned int i, fwd_sz, m_size;
5350 struct sctp_stream_in *strm;
5351 struct sctp_tmit_chunk *chk, *nchk;
5352 struct sctp_queued_to_read *ctl, *sv;
5355 if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5356 SCTPDBG(SCTP_DEBUG_INDATA1,
5357 "Bad size too small/big fwd-tsn\n");
5360 m_size = (stcb->asoc.mapping_array_size << 3);
5361 /*************************************************************/
5362 /* 1. Here we update local cumTSN and shift the bitmap array */
5363 /*************************************************************/
5364 new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5366 if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5367 /* Already got there ... */
5371 * now we know the new TSN is more advanced, let's find the actual
5374 SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5375 asoc->cumulative_tsn = new_cum_tsn;
5376 if (gap >= m_size) {
5377 if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5381 * out of range (of single byte chunks in the rwnd I
5382 * give out). This must be an attacker.
5385 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
5386 0, M_DONTWAIT, 1, MT_DATA);
5388 struct sctp_paramhdr *ph;
5391 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
5392 (sizeof(uint32_t) * 3);
5393 ph = mtod(oper, struct sctp_paramhdr *);
5394 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
5395 ph->param_length = htons(SCTP_BUF_LEN(oper));
5396 ippp = (uint32_t *) (ph + 1);
5397 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_33);
5399 *ippp = asoc->highest_tsn_inside_map;
5401 *ippp = new_cum_tsn;
5403 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
5404 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
5407 SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5409 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5410 asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5411 asoc->highest_tsn_inside_map = new_cum_tsn;
5413 memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5414 asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5416 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5417 sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5420 SCTP_TCB_LOCK_ASSERT(stcb);
5421 for (i = 0; i <= gap; i++) {
5422 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5423 !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5424 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5425 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5426 asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5431 /*************************************************************/
5432 /* 2. Clear up re-assembly queue */
5433 /*************************************************************/
5435 * First service it if pd-api is up, just in case we can progress it
5438 if (asoc->fragmented_delivery_inprogress) {
5439 sctp_service_reassembly(stcb, asoc);
5441 /* For each one on here see if we need to toss it */
5443 * For now large messages held on the reasmqueue that are complete
5444 * will be tossed too. We could in theory do more work to spin
5445 * through and stop after dumping one msg aka seeing the start of a
5446 * new msg at the head, and call the delivery function... to see if
5447 * it can be delivered... But for now we just dump everything on the
5450 TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
5451 if (SCTP_TSN_GE(new_cum_tsn, chk->rec.data.TSN_seq)) {
5452 /* It needs to be tossed */
5453 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5454 if (SCTP_TSN_GT(chk->rec.data.TSN_seq, asoc->tsn_last_delivered)) {
5455 asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
5456 asoc->str_of_pdapi = chk->rec.data.stream_number;
5457 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
5458 asoc->fragment_flags = chk->rec.data.rcv_flags;
5460 asoc->size_on_reasm_queue -= chk->send_size;
5461 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5463 /* Clear up any stream problem */
5464 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != SCTP_DATA_UNORDERED &&
5465 SCTP_SSN_GT(chk->rec.data.stream_seq, asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered)) {
5467 * We must dump forward this streams
5468 * sequence number if the chunk is not
5469 * unordered that is being skipped. There is
5470 * a chance that if the peer does not
5471 * include the last fragment in its FWD-TSN
5472 * we WILL have a problem here since you
5473 * would have a partial chunk in queue that
5474 * may not be deliverable. Also if a Partial
5475 * delivery API as started the user may get
5476 * a partial chunk. The next read returning
5477 * a new chunk... really ugly but I see no
5478 * way around it! Maybe a notify??
5480 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = chk->rec.data.stream_seq;
5483 sctp_m_freem(chk->data);
5486 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5489 * Ok we have gone beyond the end of the fwd-tsn's
5495 /*******************************************************/
5496 /* 3. Update the PR-stream re-ordering queues and fix */
5497 /* delivery issues as needed. */
5498 /*******************************************************/
5499 fwd_sz -= sizeof(*fwd);
5502 unsigned int num_str;
5503 struct sctp_strseq *stseq, strseqbuf;
5505 offset += sizeof(*fwd);
5507 SCTP_INP_READ_LOCK(stcb->sctp_ep);
5508 num_str = fwd_sz / sizeof(struct sctp_strseq);
5509 for (i = 0; i < num_str; i++) {
5512 stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5513 sizeof(struct sctp_strseq),
5514 (uint8_t *) & strseqbuf);
5515 offset += sizeof(struct sctp_strseq);
5516 if (stseq == NULL) {
5520 st = ntohs(stseq->stream);
5522 st = ntohs(stseq->sequence);
5523 stseq->sequence = st;
5528 * Ok we now look for the stream/seq on the read
5529 * queue where its not all delivered. If we find it
5530 * we transmute the read entry into a PDI_ABORTED.
5532 if (stseq->stream >= asoc->streamincnt) {
5533 /* screwed up streams, stop! */
5536 if ((asoc->str_of_pdapi == stseq->stream) &&
5537 (asoc->ssn_of_pdapi == stseq->sequence)) {
5539 * If this is the one we were partially
5540 * delivering now then we no longer are.
5541 * Note this will change with the reassembly
5544 asoc->fragmented_delivery_inprogress = 0;
5546 sctp_flush_reassm_for_str_seq(stcb, asoc, stseq->stream, stseq->sequence);
5547 TAILQ_FOREACH(ctl, &stcb->sctp_ep->read_queue, next) {
5548 if ((ctl->sinfo_stream == stseq->stream) &&
5549 (ctl->sinfo_ssn == stseq->sequence)) {
5550 str_seq = (stseq->stream << 16) | stseq->sequence;
5552 ctl->pdapi_aborted = 1;
5553 sv = stcb->asoc.control_pdapi;
5554 stcb->asoc.control_pdapi = ctl;
5555 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5557 SCTP_PARTIAL_DELIVERY_ABORTED,
5559 SCTP_SO_NOT_LOCKED);
5560 stcb->asoc.control_pdapi = sv;
5562 } else if ((ctl->sinfo_stream == stseq->stream) &&
5563 SCTP_SSN_GT(ctl->sinfo_ssn, stseq->sequence)) {
5564 /* We are past our victim SSN */
5568 strm = &asoc->strmin[stseq->stream];
5569 if (SCTP_SSN_GT(stseq->sequence, strm->last_sequence_delivered)) {
5570 /* Update the sequence number */
5571 strm->last_sequence_delivered = stseq->sequence;
5573 /* now kick the stream the new way */
5574 /* sa_ignore NO_NULL_CHK */
5575 sctp_kick_prsctp_reorder_queue(stcb, strm);
5577 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5580 * Now slide thing forward.
5582 sctp_slide_mapping_arrays(stcb);
5584 if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
5585 /* now lets kick out and check for more fragmented delivery */
5586 /* sa_ignore NO_NULL_CHK */
5587 sctp_deliver_reasm_check(stcb, &stcb->asoc);