2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
9 * a) Redistributions of source code must retain the above copyright notice,
10 * this list of conditions and the following disclaimer.
12 * b) Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the distribution.
16 * c) Neither the name of Cisco Systems, Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_var.h>
38 #include <netinet/sctp_sysctl.h>
39 #include <netinet/sctp_pcb.h>
40 #include <netinet/sctp_header.h>
41 #include <netinet/sctputil.h>
42 #include <netinet/sctp_output.h>
43 #include <netinet/sctp_input.h>
44 #include <netinet/sctp_indata.h>
45 #include <netinet/sctp_uio.h>
46 #include <netinet/sctp_timer.h>
50 * NOTES: On the outbound side of things I need to check the sack timer to
51 * see if I should generate a sack into the chunk queue (if I have data to
52 * send that is and will be sending it .. for bundling.
54 * The callback in sctp_usrreq.c will get called when the socket is read from.
55 * This will cause sctp_service_queues() to get called on the top entry in
60 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
62 asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
65 /* Calculate what the rwnd would be */
67 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
72 * This is really set wrong with respect to a 1-2-m socket. Since
73 * the sb_cc is the count that everyone as put up. When we re-write
74 * sctp_soreceive then we will fix this so that ONLY this
75 * associations data is taken into account.
77 if (stcb->sctp_socket == NULL)
80 if (stcb->asoc.sb_cc == 0 &&
81 asoc->size_on_reasm_queue == 0 &&
82 asoc->size_on_all_streams == 0) {
83 /* Full rwnd granted */
84 calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
87 /* get actual space */
88 calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
91 * take out what has NOT been put on socket queue and we yet hold
94 calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_reasm_queue +
95 asoc->cnt_on_reasm_queue * MSIZE));
96 calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_all_streams +
97 asoc->cnt_on_all_streams * MSIZE));
103 /* what is the overhead of all these rwnd's */
104 calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
106 * If the window gets too small due to ctrl-stuff, reduce it to 1,
107 * even it is 0. SWS engaged
109 if (calc < stcb->asoc.my_rwnd_control_len) {
118 * Build out our readq entry based on the incoming packet.
120 struct sctp_queued_to_read *
121 sctp_build_readq_entry(struct sctp_tcb *stcb,
122 struct sctp_nets *net,
123 uint32_t tsn, uint32_t ppid,
124 uint32_t context, uint16_t stream_no,
125 uint16_t stream_seq, uint8_t flags,
128 struct sctp_queued_to_read *read_queue_e = NULL;
130 sctp_alloc_a_readq(stcb, read_queue_e);
131 if (read_queue_e == NULL) {
134 read_queue_e->sinfo_stream = stream_no;
135 read_queue_e->sinfo_ssn = stream_seq;
136 read_queue_e->sinfo_flags = (flags << 8);
137 read_queue_e->sinfo_ppid = ppid;
138 read_queue_e->sinfo_context = context;
139 read_queue_e->sinfo_timetolive = 0;
140 read_queue_e->sinfo_tsn = tsn;
141 read_queue_e->sinfo_cumtsn = tsn;
142 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
143 read_queue_e->whoFrom = net;
144 read_queue_e->length = 0;
145 atomic_add_int(&net->ref_count, 1);
146 read_queue_e->data = dm;
147 read_queue_e->spec_flags = 0;
148 read_queue_e->tail_mbuf = NULL;
149 read_queue_e->aux_data = NULL;
150 read_queue_e->stcb = stcb;
151 read_queue_e->port_from = stcb->rport;
152 read_queue_e->do_not_ref_stcb = 0;
153 read_queue_e->end_added = 0;
154 read_queue_e->some_taken = 0;
155 read_queue_e->pdapi_aborted = 0;
157 return (read_queue_e);
162 * Build out our readq entry based on the incoming packet.
164 static struct sctp_queued_to_read *
165 sctp_build_readq_entry_chk(struct sctp_tcb *stcb,
166 struct sctp_tmit_chunk *chk)
168 struct sctp_queued_to_read *read_queue_e = NULL;
170 sctp_alloc_a_readq(stcb, read_queue_e);
171 if (read_queue_e == NULL) {
174 read_queue_e->sinfo_stream = chk->rec.data.stream_number;
175 read_queue_e->sinfo_ssn = chk->rec.data.stream_seq;
176 read_queue_e->sinfo_flags = (chk->rec.data.rcv_flags << 8);
177 read_queue_e->sinfo_ppid = chk->rec.data.payloadtype;
178 read_queue_e->sinfo_context = stcb->asoc.context;
179 read_queue_e->sinfo_timetolive = 0;
180 read_queue_e->sinfo_tsn = chk->rec.data.TSN_seq;
181 read_queue_e->sinfo_cumtsn = chk->rec.data.TSN_seq;
182 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
183 read_queue_e->whoFrom = chk->whoTo;
184 read_queue_e->aux_data = NULL;
185 read_queue_e->length = 0;
186 atomic_add_int(&chk->whoTo->ref_count, 1);
187 read_queue_e->data = chk->data;
188 read_queue_e->tail_mbuf = NULL;
189 read_queue_e->stcb = stcb;
190 read_queue_e->port_from = stcb->rport;
191 read_queue_e->spec_flags = 0;
192 read_queue_e->do_not_ref_stcb = 0;
193 read_queue_e->end_added = 0;
194 read_queue_e->some_taken = 0;
195 read_queue_e->pdapi_aborted = 0;
197 return (read_queue_e);
202 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
204 struct sctp_extrcvinfo *seinfo;
205 struct sctp_sndrcvinfo *outinfo;
206 struct sctp_rcvinfo *rcvinfo;
207 struct sctp_nxtinfo *nxtinfo;
214 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
215 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
216 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
217 /* user does not want any ancillary data */
221 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
222 len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
224 seinfo = (struct sctp_extrcvinfo *)sinfo;
225 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
226 (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
228 len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
232 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
233 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
235 len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
238 len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
244 ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
249 SCTP_BUF_LEN(ret) = 0;
251 /* We need a CMSG header followed by the struct */
252 cmh = mtod(ret, struct cmsghdr *);
254 * Make sure that there is no un-initialized padding between the
255 * cmsg header and cmsg data and after the cmsg data.
258 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
259 cmh->cmsg_level = IPPROTO_SCTP;
260 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
261 cmh->cmsg_type = SCTP_RCVINFO;
262 rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
263 rcvinfo->rcv_sid = sinfo->sinfo_stream;
264 rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
265 rcvinfo->rcv_flags = sinfo->sinfo_flags;
266 rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
267 rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
268 rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
269 rcvinfo->rcv_context = sinfo->sinfo_context;
270 rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
271 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
272 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
275 cmh->cmsg_level = IPPROTO_SCTP;
276 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
277 cmh->cmsg_type = SCTP_NXTINFO;
278 nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
279 nxtinfo->nxt_sid = seinfo->sreinfo_next_stream;
280 nxtinfo->nxt_flags = 0;
281 if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
282 nxtinfo->nxt_flags |= SCTP_UNORDERED;
284 if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
285 nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
287 if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
288 nxtinfo->nxt_flags |= SCTP_COMPLETE;
290 nxtinfo->nxt_ppid = seinfo->sreinfo_next_ppid;
291 nxtinfo->nxt_length = seinfo->sreinfo_next_length;
292 nxtinfo->nxt_assoc_id = seinfo->sreinfo_next_aid;
293 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
294 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
296 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
297 cmh->cmsg_level = IPPROTO_SCTP;
298 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
300 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
301 cmh->cmsg_type = SCTP_EXTRCV;
302 memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
303 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
305 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
306 cmh->cmsg_type = SCTP_SNDRCV;
308 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
316 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
318 uint32_t gap, i, cumackp1;
321 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
324 cumackp1 = asoc->cumulative_tsn + 1;
325 if (SCTP_TSN_GT(cumackp1, tsn)) {
327 * this tsn is behind the cum ack and thus we don't need to
328 * worry about it being moved from one to the other.
332 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
333 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
334 SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn);
335 sctp_print_mapping_array(asoc);
337 panic("Things are really messed up now!!");
340 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
341 SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
342 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
343 asoc->highest_tsn_inside_nr_map = tsn;
345 if (tsn == asoc->highest_tsn_inside_map) {
346 /* We must back down to see what the new highest is */
347 for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
348 SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
349 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
350 asoc->highest_tsn_inside_map = i;
356 asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
363 * We are delivering currently from the reassembly queue. We must continue to
364 * deliver until we either: 1) run out of space. 2) run out of sequential
365 * TSN's 3) hit the SCTP_DATA_LAST_FRAG flag.
368 sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc)
370 struct sctp_tmit_chunk *chk, *nchk;
375 struct sctp_queued_to_read *control, *ctl, *nctl;
380 cntDel = stream_no = 0;
381 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
382 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) ||
383 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
384 /* socket above is long gone or going.. */
386 asoc->fragmented_delivery_inprogress = 0;
387 TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
388 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
389 asoc->size_on_reasm_queue -= chk->send_size;
390 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
392 * Lose the data pointer, since its in the socket
396 sctp_m_freem(chk->data);
399 /* Now free the address and data */
400 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
401 /* sa_ignore FREED_MEMORY */
405 SCTP_TCB_LOCK_ASSERT(stcb);
406 TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
407 if (chk->rec.data.TSN_seq != (asoc->tsn_last_delivered + 1)) {
408 /* Can't deliver more :< */
411 stream_no = chk->rec.data.stream_number;
412 nxt_todel = asoc->strmin[stream_no].last_sequence_delivered + 1;
413 if (nxt_todel != chk->rec.data.stream_seq &&
414 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
416 * Not the next sequence to deliver in its stream OR
421 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
423 control = sctp_build_readq_entry_chk(stcb, chk);
424 if (control == NULL) {
428 /* save it off for our future deliveries */
429 stcb->asoc.control_pdapi = control;
430 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
434 sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
435 sctp_add_to_readq(stcb->sctp_ep,
436 stcb, control, &stcb->sctp_socket->so_rcv, end,
437 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
440 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
444 sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
445 if (sctp_append_to_readq(stcb->sctp_ep, stcb,
446 stcb->asoc.control_pdapi,
447 chk->data, end, chk->rec.data.TSN_seq,
448 &stcb->sctp_socket->so_rcv)) {
450 * something is very wrong, either
451 * control_pdapi is NULL, or the tail_mbuf
452 * is corrupt, or there is a EOM already on
455 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
459 if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
460 panic("This should not happen control_pdapi NULL?");
462 /* if we did not panic, it was a EOM */
463 panic("Bad chunking ??");
465 if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
466 SCTP_PRINTF("This should not happen control_pdapi NULL?\n");
468 SCTP_PRINTF("Bad chunking ??\n");
469 SCTP_PRINTF("Dumping re-assembly queue this will probably hose the association\n");
477 /* pull it we did it */
478 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
479 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
480 asoc->fragmented_delivery_inprogress = 0;
481 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
482 asoc->strmin[stream_no].last_sequence_delivered++;
484 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
485 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
487 } else if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
489 * turn the flag back on since we just delivered
492 asoc->fragmented_delivery_inprogress = 1;
494 asoc->tsn_of_pdapi_last_delivered = chk->rec.data.TSN_seq;
495 asoc->last_flags_delivered = chk->rec.data.rcv_flags;
496 asoc->last_strm_seq_delivered = chk->rec.data.stream_seq;
497 asoc->last_strm_no_delivered = chk->rec.data.stream_number;
499 asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
500 asoc->size_on_reasm_queue -= chk->send_size;
501 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
502 /* free up the chk */
504 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
506 if (asoc->fragmented_delivery_inprogress == 0) {
508 * Now lets see if we can deliver the next one on
511 struct sctp_stream_in *strm;
513 strm = &asoc->strmin[stream_no];
514 nxt_todel = strm->last_sequence_delivered + 1;
515 TAILQ_FOREACH_SAFE(ctl, &strm->inqueue, next, nctl) {
516 /* Deliver more if we can. */
517 if (nxt_todel == ctl->sinfo_ssn) {
518 TAILQ_REMOVE(&strm->inqueue, ctl, next);
519 asoc->size_on_all_streams -= ctl->length;
520 sctp_ucount_decr(asoc->cnt_on_all_streams);
521 strm->last_sequence_delivered++;
522 sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
523 sctp_add_to_readq(stcb->sctp_ep, stcb,
525 &stcb->sctp_socket->so_rcv, 1,
526 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
530 nxt_todel = strm->last_sequence_delivered + 1;
538 * Queue the chunk either right into the socket buffer if it is the next one
539 * to go OR put it in the correct place in the delivery queue. If we do
540 * append to the so_buf, keep doing so until we are out of order. One big
541 * question still remains, what to do when the socket buffer is FULL??
544 sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc,
545 struct sctp_queued_to_read *control, int *abort_flag)
548 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
549 * all the data in one stream this could happen quite rapidly. One
550 * could use the TSN to keep track of things, but this scheme breaks
551 * down in the other type of stream useage that could occur. Send a
552 * single msg to stream 0, send 4Billion messages to stream 1, now
553 * send a message to stream 0. You have a situation where the TSN
554 * has wrapped but not in the stream. Is this worth worrying about
555 * or should we just change our queue sort at the bottom to be by
558 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2
559 * with TSN 1? If the peer is doing some sort of funky TSN/SSN
560 * assignment this could happen... and I don't see how this would be
561 * a violation. So for now I am undecided an will leave the sort by
562 * SSN alone. Maybe a hybred approach is the answer
565 struct sctp_stream_in *strm;
566 struct sctp_queued_to_read *at;
570 char msg[SCTP_DIAG_INFO_LEN];
573 asoc->size_on_all_streams += control->length;
574 sctp_ucount_incr(asoc->cnt_on_all_streams);
575 strm = &asoc->strmin[control->sinfo_stream];
576 nxt_todel = strm->last_sequence_delivered + 1;
577 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
578 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
580 SCTPDBG(SCTP_DEBUG_INDATA1,
581 "queue to stream called for sid:%u ssn:%u tsn:%u lastdel:%u nxt:%u\n",
582 (uint32_t) control->sinfo_stream, (uint32_t) control->sinfo_ssn,
583 (uint32_t) control->sinfo_tsn,
584 (uint32_t) strm->last_sequence_delivered, (uint32_t) nxt_todel);
585 if (SCTP_SSN_GE(strm->last_sequence_delivered, control->sinfo_ssn)) {
586 /* The incoming sseq is behind where we last delivered? */
587 SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ:%d delivered:%d from peer, Abort association\n",
588 control->sinfo_ssn, strm->last_sequence_delivered);
591 * throw it in the stream so it gets cleaned up in
592 * association destruction
594 TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
595 snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
596 strm->last_sequence_delivered, control->sinfo_tsn,
597 control->sinfo_stream, control->sinfo_ssn);
598 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
599 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
600 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
605 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
608 so = SCTP_INP_SO(stcb->sctp_ep);
609 atomic_add_int(&stcb->asoc.refcnt, 1);
610 SCTP_TCB_UNLOCK(stcb);
611 SCTP_SOCKET_LOCK(so, 1);
613 atomic_subtract_int(&stcb->asoc.refcnt, 1);
614 if (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) {
615 SCTP_SOCKET_UNLOCK(so, 1);
619 if (nxt_todel == control->sinfo_ssn) {
620 /* can be delivered right away? */
621 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
622 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
624 /* EY it wont be queued if it could be delivered directly */
626 asoc->size_on_all_streams -= control->length;
627 sctp_ucount_decr(asoc->cnt_on_all_streams);
628 strm->last_sequence_delivered++;
630 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
631 sctp_add_to_readq(stcb->sctp_ep, stcb,
633 &stcb->sctp_socket->so_rcv, 1,
634 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_LOCKED);
635 TAILQ_FOREACH_SAFE(control, &strm->inqueue, next, at) {
637 nxt_todel = strm->last_sequence_delivered + 1;
638 if (nxt_todel == control->sinfo_ssn) {
639 TAILQ_REMOVE(&strm->inqueue, control, next);
640 asoc->size_on_all_streams -= control->length;
641 sctp_ucount_decr(asoc->cnt_on_all_streams);
642 strm->last_sequence_delivered++;
644 * We ignore the return of deliver_data here
645 * since we always can hold the chunk on the
646 * d-queue. And we have a finite number that
647 * can be delivered from the strq.
649 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
650 sctp_log_strm_del(control, NULL,
651 SCTP_STR_LOG_FROM_IMMED_DEL);
653 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
654 sctp_add_to_readq(stcb->sctp_ep, stcb,
656 &stcb->sctp_socket->so_rcv, 1,
657 SCTP_READ_LOCK_NOT_HELD,
666 * Ok, we did not deliver this guy, find the correct place
667 * to put it on the queue.
669 if (SCTP_TSN_GE(asoc->cumulative_tsn, control->sinfo_tsn)) {
670 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
671 SCTP_SOCKET_UNLOCK(so, 1);
675 if (TAILQ_EMPTY(&strm->inqueue)) {
677 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
678 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INSERT_HD);
680 TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
682 TAILQ_FOREACH(at, &strm->inqueue, next) {
683 if (SCTP_SSN_GT(at->sinfo_ssn, control->sinfo_ssn)) {
685 * one in queue is bigger than the
686 * new one, insert before this one
688 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
689 sctp_log_strm_del(control, at,
690 SCTP_STR_LOG_FROM_INSERT_MD);
692 TAILQ_INSERT_BEFORE(at, control, next);
694 } else if (at->sinfo_ssn == control->sinfo_ssn) {
696 * Gak, He sent me a duplicate str
700 * foo bar, I guess I will just free
701 * this new guy, should we abort
702 * too? FIX ME MAYBE? Or it COULD be
703 * that the SSN's have wrapped.
704 * Maybe I should compare to TSN
705 * somehow... sigh for now just blow
710 sctp_m_freem(control->data);
711 control->data = NULL;
712 asoc->size_on_all_streams -= control->length;
713 sctp_ucount_decr(asoc->cnt_on_all_streams);
714 if (control->whoFrom) {
715 sctp_free_remote_addr(control->whoFrom);
716 control->whoFrom = NULL;
718 sctp_free_a_readq(stcb, control);
719 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
720 SCTP_SOCKET_UNLOCK(so, 1);
724 if (TAILQ_NEXT(at, next) == NULL) {
726 * We are at the end, insert
729 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
730 sctp_log_strm_del(control, at,
731 SCTP_STR_LOG_FROM_INSERT_TL);
733 TAILQ_INSERT_AFTER(&strm->inqueue,
741 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
742 SCTP_SOCKET_UNLOCK(so, 1);
747 * Returns two things: You get the total size of the deliverable parts of the
748 * first fragmented message on the reassembly queue. And you get a 1 back if
749 * all of the message is ready or a 0 back if the message is still incomplete
752 sctp_is_all_msg_on_reasm(struct sctp_association *asoc, uint32_t * t_size)
754 struct sctp_tmit_chunk *chk;
758 chk = TAILQ_FIRST(&asoc->reasmqueue);
760 /* nothing on the queue */
763 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
764 /* Not a first on the queue */
767 tsn = chk->rec.data.TSN_seq;
768 TAILQ_FOREACH(chk, &asoc->reasmqueue, sctp_next) {
769 if (tsn != chk->rec.data.TSN_seq) {
772 *t_size += chk->send_size;
773 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
782 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc)
784 struct sctp_tmit_chunk *chk;
786 uint32_t tsize, pd_point;
789 chk = TAILQ_FIRST(&asoc->reasmqueue);
792 asoc->size_on_reasm_queue = 0;
793 asoc->cnt_on_reasm_queue = 0;
796 if (asoc->fragmented_delivery_inprogress == 0) {
798 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
799 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
800 (nxt_todel == chk->rec.data.stream_seq ||
801 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
803 * Yep the first one is here and its ok to deliver
806 if (stcb->sctp_socket) {
807 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
808 stcb->sctp_ep->partial_delivery_point);
810 pd_point = stcb->sctp_ep->partial_delivery_point;
812 if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
814 * Yes, we setup to start reception, by
815 * backing down the TSN just in case we
816 * can't deliver. If we
818 asoc->fragmented_delivery_inprogress = 1;
819 asoc->tsn_last_delivered =
820 chk->rec.data.TSN_seq - 1;
822 chk->rec.data.stream_number;
823 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
824 asoc->pdapi_ppid = chk->rec.data.payloadtype;
825 asoc->fragment_flags = chk->rec.data.rcv_flags;
826 sctp_service_reassembly(stcb, asoc);
831 * Service re-assembly will deliver stream data queued at
832 * the end of fragmented delivery.. but it wont know to go
833 * back and call itself again... we do that here with the
836 sctp_service_reassembly(stcb, asoc);
837 if (asoc->fragmented_delivery_inprogress == 0) {
839 * finished our Fragmented delivery, could be more
848 * Dump onto the re-assembly queue, in its proper place. After dumping on the
849 * queue, see if anthing can be delivered. If so pull it off (or as much as
850 * we can. If we run out of space then we must dump what we can and set the
851 * appropriate flag to say we queued what we could.
854 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
855 struct sctp_tmit_chunk *chk, int *abort_flag)
858 char msg[SCTP_DIAG_INFO_LEN];
859 uint32_t cum_ackp1, prev_tsn, post_tsn;
860 struct sctp_tmit_chunk *at, *prev, *next;
863 cum_ackp1 = asoc->tsn_last_delivered + 1;
864 if (TAILQ_EMPTY(&asoc->reasmqueue)) {
865 /* This is the first one on the queue */
866 TAILQ_INSERT_HEAD(&asoc->reasmqueue, chk, sctp_next);
868 * we do not check for delivery of anything when only one
871 asoc->size_on_reasm_queue = chk->send_size;
872 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
873 if (chk->rec.data.TSN_seq == cum_ackp1) {
874 if (asoc->fragmented_delivery_inprogress == 0 &&
875 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) !=
876 SCTP_DATA_FIRST_FRAG) {
878 * An empty queue, no delivery inprogress,
879 * we hit the next one and it does NOT have
880 * a FIRST fragment mark.
882 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not first, no fragmented delivery in progress\n");
883 snprintf(msg, sizeof(msg),
884 "Expected B-bit for TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
885 chk->rec.data.TSN_seq,
886 chk->rec.data.stream_number,
887 chk->rec.data.stream_seq);
888 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
889 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
890 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
892 } else if (asoc->fragmented_delivery_inprogress &&
893 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
895 * We are doing a partial delivery and the
896 * NEXT chunk MUST be either the LAST or
897 * MIDDLE fragment NOT a FIRST
899 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS a first and fragmented delivery in progress\n");
900 snprintf(msg, sizeof(msg),
901 "Didn't expect B-bit for TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
902 chk->rec.data.TSN_seq,
903 chk->rec.data.stream_number,
904 chk->rec.data.stream_seq);
905 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
906 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
907 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
909 } else if (asoc->fragmented_delivery_inprogress) {
911 * Here we are ok with a MIDDLE or LAST
914 if (chk->rec.data.stream_number !=
915 asoc->str_of_pdapi) {
916 /* Got to be the right STR No */
917 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream number %d vs %d\n",
918 chk->rec.data.stream_number,
920 snprintf(msg, sizeof(msg),
921 "Expected SID=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
923 chk->rec.data.TSN_seq,
924 chk->rec.data.stream_number,
925 chk->rec.data.stream_seq);
926 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
927 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_4;
928 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
930 } else if ((asoc->fragment_flags & SCTP_DATA_UNORDERED) !=
931 SCTP_DATA_UNORDERED &&
932 chk->rec.data.stream_seq != asoc->ssn_of_pdapi) {
933 /* Got to be the right STR Seq */
934 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream seq %d vs %d\n",
935 chk->rec.data.stream_seq,
937 snprintf(msg, sizeof(msg),
938 "Expected SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
940 chk->rec.data.TSN_seq,
941 chk->rec.data.stream_number,
942 chk->rec.data.stream_seq);
943 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
944 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_5;
945 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
953 TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
954 if (SCTP_TSN_GT(at->rec.data.TSN_seq, chk->rec.data.TSN_seq)) {
956 * one in queue is bigger than the new one, insert
960 asoc->size_on_reasm_queue += chk->send_size;
961 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
963 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
965 } else if (at->rec.data.TSN_seq == chk->rec.data.TSN_seq) {
966 /* Gak, He sent me a duplicate str seq number */
968 * foo bar, I guess I will just free this new guy,
969 * should we abort too? FIX ME MAYBE? Or it COULD be
970 * that the SSN's have wrapped. Maybe I should
971 * compare to TSN somehow... sigh for now just blow
975 sctp_m_freem(chk->data);
978 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
982 if (TAILQ_NEXT(at, sctp_next) == NULL) {
984 * We are at the end, insert it after this
988 asoc->size_on_reasm_queue += chk->send_size;
989 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
990 TAILQ_INSERT_AFTER(&asoc->reasmqueue, at, chk, sctp_next);
997 prev_tsn = chk->rec.data.TSN_seq - 1;
998 if (prev_tsn == prev->rec.data.TSN_seq) {
1000 * Ok the one I am dropping onto the end is the
1001 * NEXT. A bit of valdiation here.
1003 if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1004 SCTP_DATA_FIRST_FRAG ||
1005 (prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1006 SCTP_DATA_MIDDLE_FRAG) {
1008 * Insert chk MUST be a MIDDLE or LAST
1011 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1012 SCTP_DATA_FIRST_FRAG) {
1013 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - It can be a midlle or last but not a first\n");
1014 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it's a FIRST!\n");
1015 snprintf(msg, sizeof(msg),
1016 "Can't handle B-bit, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1017 chk->rec.data.TSN_seq,
1018 chk->rec.data.stream_number,
1019 chk->rec.data.stream_seq);
1020 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1021 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_6;
1022 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1026 if (chk->rec.data.stream_number !=
1027 prev->rec.data.stream_number) {
1029 * Huh, need the correct STR here,
1030 * they must be the same.
1032 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, sid:%d not the same as at:%d\n",
1033 chk->rec.data.stream_number,
1034 prev->rec.data.stream_number);
1035 snprintf(msg, sizeof(msg),
1036 "Expect SID=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1037 prev->rec.data.stream_number,
1038 chk->rec.data.TSN_seq,
1039 chk->rec.data.stream_number,
1040 chk->rec.data.stream_seq);
1041 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1042 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_7;
1043 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1047 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) !=
1048 (prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED)) {
1050 * Huh, need the same ordering here,
1051 * they must be the same.
1053 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, U-bit not constant\n");
1054 snprintf(msg, sizeof(msg),
1055 "Expect U-bit=%d for TSN=%8.8x, got U-bit=%d",
1056 (prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) ? 1 : 0,
1057 chk->rec.data.TSN_seq,
1058 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) ? 1 : 0);
1059 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1060 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_8;
1061 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1065 if ((prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1066 chk->rec.data.stream_seq !=
1067 prev->rec.data.stream_seq) {
1069 * Huh, need the correct STR here,
1070 * they must be the same.
1072 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1073 chk->rec.data.stream_seq,
1074 prev->rec.data.stream_seq);
1075 snprintf(msg, sizeof(msg),
1076 "Expect SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1077 prev->rec.data.stream_seq,
1078 chk->rec.data.TSN_seq,
1079 chk->rec.data.stream_number,
1080 chk->rec.data.stream_seq);
1081 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1082 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_9;
1083 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1087 } else if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1088 SCTP_DATA_LAST_FRAG) {
1089 /* Insert chk MUST be a FIRST */
1090 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1091 SCTP_DATA_FIRST_FRAG) {
1092 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, evil plot, its not FIRST and it must be!\n");
1093 snprintf(msg, sizeof(msg),
1094 "Expect B-bit, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1095 chk->rec.data.TSN_seq,
1096 chk->rec.data.stream_number,
1097 chk->rec.data.stream_seq);
1098 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1099 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_10;
1100 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1108 post_tsn = chk->rec.data.TSN_seq + 1;
1109 if (post_tsn == next->rec.data.TSN_seq) {
1111 * Ok the one I am inserting ahead of is my NEXT
1112 * one. A bit of valdiation here.
1114 if (next->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1115 /* Insert chk MUST be a last fragment */
1116 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK)
1117 != SCTP_DATA_LAST_FRAG) {
1118 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is FIRST, we must be LAST\n");
1119 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not a last!\n");
1120 snprintf(msg, sizeof(msg),
1121 "Expect only E-bit, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1122 chk->rec.data.TSN_seq,
1123 chk->rec.data.stream_number,
1124 chk->rec.data.stream_seq);
1125 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1126 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_11;
1127 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1131 } else if ((next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1132 SCTP_DATA_MIDDLE_FRAG ||
1133 (next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1134 SCTP_DATA_LAST_FRAG) {
1136 * Insert chk CAN be MIDDLE or FIRST NOT
1139 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1140 SCTP_DATA_LAST_FRAG) {
1141 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is a MIDDLE/LAST\n");
1142 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, new prev chunk is a LAST\n");
1143 snprintf(msg, sizeof(msg),
1144 "Didn't expect E-bit, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1145 chk->rec.data.TSN_seq,
1146 chk->rec.data.stream_number,
1147 chk->rec.data.stream_seq);
1148 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1149 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_12;
1150 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1154 if (chk->rec.data.stream_number !=
1155 next->rec.data.stream_number) {
1157 * Huh, need the correct STR here,
1158 * they must be the same.
1160 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1161 chk->rec.data.stream_number,
1162 next->rec.data.stream_number);
1163 snprintf(msg, sizeof(msg),
1164 "Required SID %4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1165 next->rec.data.stream_number,
1166 chk->rec.data.TSN_seq,
1167 chk->rec.data.stream_number,
1168 chk->rec.data.stream_seq);
1169 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1170 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_13;
1171 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1175 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) !=
1176 (next->rec.data.rcv_flags & SCTP_DATA_UNORDERED)) {
1178 * Huh, need the same ordering here,
1179 * they must be the same.
1181 SCTPDBG(SCTP_DEBUG_INDATA1, "Next check - Gak, Evil plot, U-bit not constant\n");
1182 snprintf(msg, sizeof(msg),
1183 "Expect U-bit=%d for TSN=%8.8x, got U-bit=%d",
1184 (next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) ? 1 : 0,
1185 chk->rec.data.TSN_seq,
1186 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) ? 1 : 0);
1187 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1188 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1189 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1193 if ((next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1194 chk->rec.data.stream_seq !=
1195 next->rec.data.stream_seq) {
1197 * Huh, need the correct STR here,
1198 * they must be the same.
1200 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1201 chk->rec.data.stream_seq,
1202 next->rec.data.stream_seq);
1203 snprintf(msg, sizeof(msg),
1204 "Required SSN %4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1205 next->rec.data.stream_seq,
1206 chk->rec.data.TSN_seq,
1207 chk->rec.data.stream_number,
1208 chk->rec.data.stream_seq);
1209 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1210 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1211 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1218 /* Do we need to do some delivery? check */
1219 sctp_deliver_reasm_check(stcb, asoc);
1223 * This is an unfortunate routine. It checks to make sure a evil guy is not
1224 * stuffing us full of bad packet fragments. A broken peer could also do this
1225 * but this is doubtful. It is to bad I must worry about evil crackers sigh
1229 sctp_does_tsn_belong_to_reasm(struct sctp_association *asoc,
1232 struct sctp_tmit_chunk *at;
1235 TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1236 if (SCTP_TSN_GT(TSN_seq, at->rec.data.TSN_seq)) {
1237 /* is it one bigger? */
1238 tsn_est = at->rec.data.TSN_seq + 1;
1239 if (tsn_est == TSN_seq) {
1240 /* yep. It better be a last then */
1241 if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1242 SCTP_DATA_LAST_FRAG) {
1244 * Ok this guy belongs next to a guy
1245 * that is NOT last, it should be a
1246 * middle/last, not a complete
1252 * This guy is ok since its a LAST
1253 * and the new chunk is a fully
1254 * self- contained one.
1259 } else if (TSN_seq == at->rec.data.TSN_seq) {
1260 /* Software error since I have a dup? */
1264 * Ok, 'at' is larger than new chunk but does it
1265 * need to be right before it.
1267 tsn_est = TSN_seq + 1;
1268 if (tsn_est == at->rec.data.TSN_seq) {
1269 /* Yep, It better be a first */
1270 if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1271 SCTP_DATA_FIRST_FRAG) {
1283 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1284 struct mbuf **m, int offset, struct sctp_data_chunk *ch, int chk_length,
1285 struct sctp_nets *net, uint32_t * high_tsn, int *abort_flag,
1286 int *break_flag, int last_chunk)
1288 /* Process a data chunk */
1289 /* struct sctp_tmit_chunk *chk; */
1290 struct sctp_tmit_chunk *chk;
1294 int need_reasm_check = 0;
1295 uint16_t strmno, strmseq;
1296 struct mbuf *op_err;
1297 char msg[SCTP_DIAG_INFO_LEN];
1298 struct sctp_queued_to_read *control;
1300 uint32_t protocol_id;
1301 uint8_t chunk_flags;
1302 struct sctp_stream_reset_list *liste;
1305 tsn = ntohl(ch->dp.tsn);
1306 chunk_flags = ch->ch.chunk_flags;
1307 if ((chunk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1308 asoc->send_sack = 1;
1310 protocol_id = ch->dp.protocol_id;
1311 ordered = ((chunk_flags & SCTP_DATA_UNORDERED) == 0);
1312 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1313 sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1318 SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, ch->ch.chunk_type, tsn);
1319 if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1320 /* It is a duplicate */
1321 SCTP_STAT_INCR(sctps_recvdupdata);
1322 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1323 /* Record a dup for the next outbound sack */
1324 asoc->dup_tsns[asoc->numduptsns] = tsn;
1327 asoc->send_sack = 1;
1330 /* Calculate the number of TSN's between the base and this TSN */
1331 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1332 if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1333 /* Can't hold the bit in the mapping at max array, toss it */
1336 if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
1337 SCTP_TCB_LOCK_ASSERT(stcb);
1338 if (sctp_expand_mapping_array(asoc, gap)) {
1339 /* Can't expand, drop it */
1343 if (SCTP_TSN_GT(tsn, *high_tsn)) {
1346 /* See if we have received this one already */
1347 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1348 SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1349 SCTP_STAT_INCR(sctps_recvdupdata);
1350 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1351 /* Record a dup for the next outbound sack */
1352 asoc->dup_tsns[asoc->numduptsns] = tsn;
1355 asoc->send_sack = 1;
1359 * Check to see about the GONE flag, duplicates would cause a sack
1360 * to be sent up above
1362 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1363 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1364 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) {
1366 * wait a minute, this guy is gone, there is no longer a
1367 * receiver. Send peer an ABORT!
1369 op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
1370 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1375 * Now before going further we see if there is room. If NOT then we
1376 * MAY let one through only IF this TSN is the one we are waiting
1377 * for on a partial delivery API.
1380 /* now do the tests */
1381 if (((asoc->cnt_on_all_streams +
1382 asoc->cnt_on_reasm_queue +
1383 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1384 (((int)asoc->my_rwnd) <= 0)) {
1386 * When we have NO room in the rwnd we check to make sure
1387 * the reader is doing its job...
1389 if (stcb->sctp_socket->so_rcv.sb_cc) {
1390 /* some to read, wake-up */
1391 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1394 so = SCTP_INP_SO(stcb->sctp_ep);
1395 atomic_add_int(&stcb->asoc.refcnt, 1);
1396 SCTP_TCB_UNLOCK(stcb);
1397 SCTP_SOCKET_LOCK(so, 1);
1398 SCTP_TCB_LOCK(stcb);
1399 atomic_subtract_int(&stcb->asoc.refcnt, 1);
1400 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1401 /* assoc was freed while we were unlocked */
1402 SCTP_SOCKET_UNLOCK(so, 1);
1406 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1407 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1408 SCTP_SOCKET_UNLOCK(so, 1);
1411 /* now is it in the mapping array of what we have accepted? */
1412 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1413 SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1414 /* Nope not in the valid range dump it */
1415 sctp_set_rwnd(stcb, asoc);
1416 if ((asoc->cnt_on_all_streams +
1417 asoc->cnt_on_reasm_queue +
1418 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1419 SCTP_STAT_INCR(sctps_datadropchklmt);
1421 SCTP_STAT_INCR(sctps_datadroprwnd);
1427 strmno = ntohs(ch->dp.stream_id);
1428 if (strmno >= asoc->streamincnt) {
1429 struct sctp_paramhdr *phdr;
1432 mb = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) * 2),
1433 0, M_NOWAIT, 1, MT_DATA);
1435 /* add some space up front so prepend will work well */
1436 SCTP_BUF_RESV_UF(mb, sizeof(struct sctp_chunkhdr));
1437 phdr = mtod(mb, struct sctp_paramhdr *);
1439 * Error causes are just param's and this one has
1440 * two back to back phdr, one with the error type
1441 * and size, the other with the streamid and a rsvd
1443 SCTP_BUF_LEN(mb) = (sizeof(struct sctp_paramhdr) * 2);
1444 phdr->param_type = htons(SCTP_CAUSE_INVALID_STREAM);
1445 phdr->param_length =
1446 htons(sizeof(struct sctp_paramhdr) * 2);
1448 /* We insert the stream in the type field */
1449 phdr->param_type = ch->dp.stream_id;
1450 /* And set the length to 0 for the rsvd field */
1451 phdr->param_length = 0;
1452 sctp_queue_op_err(stcb, mb);
1454 SCTP_STAT_INCR(sctps_badsid);
1455 SCTP_TCB_LOCK_ASSERT(stcb);
1456 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1457 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1458 asoc->highest_tsn_inside_nr_map = tsn;
1460 if (tsn == (asoc->cumulative_tsn + 1)) {
1461 /* Update cum-ack */
1462 asoc->cumulative_tsn = tsn;
1467 * Before we continue lets validate that we are not being fooled by
1468 * an evil attacker. We can only have 4k chunks based on our TSN
1469 * spread allowed by the mapping array 512 * 8 bits, so there is no
1470 * way our stream sequence numbers could have wrapped. We of course
1471 * only validate the FIRST fragment so the bit must be set.
1473 strmseq = ntohs(ch->dp.stream_sequence);
1474 #ifdef SCTP_ASOCLOG_OF_TSNS
1475 SCTP_TCB_LOCK_ASSERT(stcb);
1476 if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1477 asoc->tsn_in_at = 0;
1478 asoc->tsn_in_wrapped = 1;
1480 asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1481 asoc->in_tsnlog[asoc->tsn_in_at].strm = strmno;
1482 asoc->in_tsnlog[asoc->tsn_in_at].seq = strmseq;
1483 asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1484 asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1485 asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1486 asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1487 asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1490 if ((chunk_flags & SCTP_DATA_FIRST_FRAG) &&
1491 (TAILQ_EMPTY(&asoc->resetHead)) &&
1492 (chunk_flags & SCTP_DATA_UNORDERED) == 0 &&
1493 SCTP_SSN_GE(asoc->strmin[strmno].last_sequence_delivered, strmseq)) {
1494 /* The incoming sseq is behind where we last delivered? */
1495 SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n",
1496 strmseq, asoc->strmin[strmno].last_sequence_delivered);
1498 snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1499 asoc->strmin[strmno].last_sequence_delivered,
1500 tsn, strmno, strmseq);
1501 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1502 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
1503 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1507 /************************************
1508 * From here down we may find ch-> invalid
1509 * so its a good idea NOT to use it.
1510 *************************************/
1512 the_len = (chk_length - sizeof(struct sctp_data_chunk));
1513 if (last_chunk == 0) {
1514 dmbuf = SCTP_M_COPYM(*m,
1515 (offset + sizeof(struct sctp_data_chunk)),
1517 #ifdef SCTP_MBUF_LOGGING
1518 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
1519 sctp_log_mbc(dmbuf, SCTP_MBUF_ICOPY);
1523 /* We can steal the last chunk */
1527 /* lop off the top part */
1528 m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
1529 if (SCTP_BUF_NEXT(dmbuf) == NULL) {
1530 l_len = SCTP_BUF_LEN(dmbuf);
1533 * need to count up the size hopefully does not hit
1539 for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
1540 l_len += SCTP_BUF_LEN(lat);
1543 if (l_len > the_len) {
1544 /* Trim the end round bytes off too */
1545 m_adj(dmbuf, -(l_len - the_len));
1548 if (dmbuf == NULL) {
1549 SCTP_STAT_INCR(sctps_nomem);
1552 if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
1553 asoc->fragmented_delivery_inprogress == 0 &&
1554 TAILQ_EMPTY(&asoc->resetHead) &&
1556 ((uint16_t) (asoc->strmin[strmno].last_sequence_delivered + 1) == strmseq &&
1557 TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) {
1558 /* Candidate for express delivery */
1560 * Its not fragmented, No PD-API is up, Nothing in the
1561 * delivery queue, Its un-ordered OR ordered and the next to
1562 * deliver AND nothing else is stuck on the stream queue,
1563 * And there is room for it in the socket buffer. Lets just
1564 * stuff it up the buffer....
1567 /* It would be nice to avoid this copy if we could :< */
1568 sctp_alloc_a_readq(stcb, control);
1569 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1574 if (control == NULL) {
1575 goto failed_express_del;
1577 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1578 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1579 asoc->highest_tsn_inside_nr_map = tsn;
1581 sctp_add_to_readq(stcb->sctp_ep, stcb,
1582 control, &stcb->sctp_socket->so_rcv,
1583 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1585 if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1586 /* for ordered, bump what we delivered */
1587 asoc->strmin[strmno].last_sequence_delivered++;
1589 SCTP_STAT_INCR(sctps_recvexpress);
1590 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
1591 sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno,
1592 SCTP_STR_LOG_FROM_EXPRS_DEL);
1596 goto finish_express_del;
1599 /* If we reach here this is a new chunk */
1602 /* Express for fragmented delivery? */
1603 if ((asoc->fragmented_delivery_inprogress) &&
1604 (stcb->asoc.control_pdapi) &&
1605 (asoc->str_of_pdapi == strmno) &&
1606 (asoc->ssn_of_pdapi == strmseq)
1608 control = stcb->asoc.control_pdapi;
1609 if ((chunk_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
1610 /* Can't be another first? */
1611 goto failed_pdapi_express_del;
1613 if (tsn == (control->sinfo_tsn + 1)) {
1614 /* Yep, we can add it on */
1617 if (chunk_flags & SCTP_DATA_LAST_FRAG) {
1620 if (sctp_append_to_readq(stcb->sctp_ep, stcb, control, dmbuf, end,
1622 &stcb->sctp_socket->so_rcv)) {
1623 SCTP_PRINTF("Append fails end:%d\n", end);
1624 goto failed_pdapi_express_del;
1626 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1627 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1628 asoc->highest_tsn_inside_nr_map = tsn;
1630 SCTP_STAT_INCR(sctps_recvexpressm);
1631 asoc->tsn_last_delivered = tsn;
1632 asoc->fragment_flags = chunk_flags;
1633 asoc->tsn_of_pdapi_last_delivered = tsn;
1634 asoc->last_flags_delivered = chunk_flags;
1635 asoc->last_strm_seq_delivered = strmseq;
1636 asoc->last_strm_no_delivered = strmno;
1638 /* clean up the flags and such */
1639 asoc->fragmented_delivery_inprogress = 0;
1640 if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1641 asoc->strmin[strmno].last_sequence_delivered++;
1643 stcb->asoc.control_pdapi = NULL;
1644 if (TAILQ_EMPTY(&asoc->reasmqueue) == 0) {
1646 * There could be another message
1649 need_reasm_check = 1;
1653 goto finish_express_del;
1656 failed_pdapi_express_del:
1658 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
1659 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1660 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1661 asoc->highest_tsn_inside_nr_map = tsn;
1664 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
1665 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
1666 asoc->highest_tsn_inside_map = tsn;
1669 if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1670 sctp_alloc_a_chunk(stcb, chk);
1672 /* No memory so we drop the chunk */
1673 SCTP_STAT_INCR(sctps_nomem);
1674 if (last_chunk == 0) {
1675 /* we copied it, free the copy */
1676 sctp_m_freem(dmbuf);
1680 chk->rec.data.TSN_seq = tsn;
1681 chk->no_fr_allowed = 0;
1682 chk->rec.data.stream_seq = strmseq;
1683 chk->rec.data.stream_number = strmno;
1684 chk->rec.data.payloadtype = protocol_id;
1685 chk->rec.data.context = stcb->asoc.context;
1686 chk->rec.data.doing_fast_retransmit = 0;
1687 chk->rec.data.rcv_flags = chunk_flags;
1689 chk->send_size = the_len;
1691 atomic_add_int(&net->ref_count, 1);
1694 sctp_alloc_a_readq(stcb, control);
1695 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1700 if (control == NULL) {
1701 /* No memory so we drop the chunk */
1702 SCTP_STAT_INCR(sctps_nomem);
1703 if (last_chunk == 0) {
1704 /* we copied it, free the copy */
1705 sctp_m_freem(dmbuf);
1709 control->length = the_len;
1712 /* Mark it as received */
1713 /* Now queue it where it belongs */
1714 if (control != NULL) {
1715 /* First a sanity check */
1716 if (asoc->fragmented_delivery_inprogress) {
1718 * Ok, we have a fragmented delivery in progress if
1719 * this chunk is next to deliver OR belongs in our
1720 * view to the reassembly, the peer is evil or
1723 uint32_t estimate_tsn;
1725 estimate_tsn = asoc->tsn_last_delivered + 1;
1726 if (TAILQ_EMPTY(&asoc->reasmqueue) &&
1727 (estimate_tsn == control->sinfo_tsn)) {
1728 /* Evil/Broke peer */
1729 sctp_m_freem(control->data);
1730 control->data = NULL;
1731 if (control->whoFrom) {
1732 sctp_free_remote_addr(control->whoFrom);
1733 control->whoFrom = NULL;
1735 sctp_free_a_readq(stcb, control);
1736 snprintf(msg, sizeof(msg), "Reas. queue emtpy, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1737 tsn, strmno, strmseq);
1738 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1739 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17;
1740 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1747 if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
1748 sctp_m_freem(control->data);
1749 control->data = NULL;
1750 if (control->whoFrom) {
1751 sctp_free_remote_addr(control->whoFrom);
1752 control->whoFrom = NULL;
1754 sctp_free_a_readq(stcb, control);
1755 snprintf(msg, sizeof(msg), "PD ongoing, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1756 tsn, strmno, strmseq);
1757 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1758 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_18;
1759 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1768 /* No PDAPI running */
1769 if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
1771 * Reassembly queue is NOT empty validate
1772 * that this tsn does not need to be in
1773 * reasembly queue. If it does then our peer
1774 * is broken or evil.
1776 if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
1777 sctp_m_freem(control->data);
1778 control->data = NULL;
1779 if (control->whoFrom) {
1780 sctp_free_remote_addr(control->whoFrom);
1781 control->whoFrom = NULL;
1783 sctp_free_a_readq(stcb, control);
1784 snprintf(msg, sizeof(msg), "No PD ongoing, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1785 tsn, strmno, strmseq);
1786 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1787 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
1788 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1797 /* ok, if we reach here we have passed the sanity checks */
1798 if (chunk_flags & SCTP_DATA_UNORDERED) {
1799 /* queue directly into socket buffer */
1800 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
1801 sctp_add_to_readq(stcb->sctp_ep, stcb,
1803 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1806 * Special check for when streams are resetting. We
1807 * could be more smart about this and check the
1808 * actual stream to see if it is not being reset..
1809 * that way we would not create a HOLB when amongst
1810 * streams being reset and those not being reset.
1812 * We take complete messages that have a stream reset
1813 * intervening (aka the TSN is after where our
1814 * cum-ack needs to be) off and put them on a
1815 * pending_reply_queue. The reassembly ones we do
1816 * not have to worry about since they are all sorted
1817 * and proceessed by TSN order. It is only the
1818 * singletons I must worry about.
1820 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
1821 SCTP_TSN_GT(tsn, liste->tsn)) {
1823 * yep its past where we need to reset... go
1824 * ahead and queue it.
1826 if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
1828 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
1830 struct sctp_queued_to_read *ctlOn,
1832 unsigned char inserted = 0;
1834 TAILQ_FOREACH_SAFE(ctlOn, &asoc->pending_reply_queue, next, nctlOn) {
1835 if (SCTP_TSN_GT(control->sinfo_tsn, ctlOn->sinfo_tsn)) {
1839 TAILQ_INSERT_BEFORE(ctlOn, control, next);
1844 if (inserted == 0) {
1846 * must be put at end, use
1847 * prevP (all setup from
1848 * loop) to setup nextP.
1850 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
1854 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag);
1864 /* Into the re-assembly queue */
1865 sctp_queue_data_for_reasm(stcb, asoc, chk, abort_flag);
1868 * the assoc is now gone and chk was put onto the
1869 * reasm queue, which has all been freed.
1878 if (tsn == (asoc->cumulative_tsn + 1)) {
1879 /* Update cum-ack */
1880 asoc->cumulative_tsn = tsn;
1886 SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
1888 SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
1890 SCTP_STAT_INCR(sctps_recvdata);
1891 /* Set it present please */
1892 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
1893 sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, SCTP_STR_LOG_FROM_MARK_TSN);
1895 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1896 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
1897 asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
1899 /* check the special flag for stream resets */
1900 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
1901 SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
1903 * we have finished working through the backlogged TSN's now
1904 * time to reset streams. 1: call reset function. 2: free
1905 * pending_reply space 3: distribute any chunks in
1906 * pending_reply_queue.
1908 struct sctp_queued_to_read *ctl, *nctl;
1910 sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
1911 TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
1912 sctp_send_deferred_reset_response(stcb, liste, SCTP_STREAM_RESET_RESULT_PERFORMED);
1913 SCTP_FREE(liste, SCTP_M_STRESET);
1914 /* sa_ignore FREED_MEMORY */
1915 liste = TAILQ_FIRST(&asoc->resetHead);
1916 if (TAILQ_EMPTY(&asoc->resetHead)) {
1917 /* All can be removed */
1918 TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
1919 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
1920 sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
1926 TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
1927 if (SCTP_TSN_GT(ctl->sinfo_tsn, liste->tsn)) {
1931 * if ctl->sinfo_tsn is <= liste->tsn we can
1932 * process it which is the NOT of
1933 * ctl->sinfo_tsn > liste->tsn
1935 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
1936 sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
1943 * Now service re-assembly to pick up anything that has been
1944 * held on reassembly queue?
1946 sctp_deliver_reasm_check(stcb, asoc);
1947 need_reasm_check = 0;
1949 if (need_reasm_check) {
1950 /* Another one waits ? */
1951 sctp_deliver_reasm_check(stcb, asoc);
1956 int8_t sctp_map_lookup_tab[256] = {
1957 0, 1, 0, 2, 0, 1, 0, 3,
1958 0, 1, 0, 2, 0, 1, 0, 4,
1959 0, 1, 0, 2, 0, 1, 0, 3,
1960 0, 1, 0, 2, 0, 1, 0, 5,
1961 0, 1, 0, 2, 0, 1, 0, 3,
1962 0, 1, 0, 2, 0, 1, 0, 4,
1963 0, 1, 0, 2, 0, 1, 0, 3,
1964 0, 1, 0, 2, 0, 1, 0, 6,
1965 0, 1, 0, 2, 0, 1, 0, 3,
1966 0, 1, 0, 2, 0, 1, 0, 4,
1967 0, 1, 0, 2, 0, 1, 0, 3,
1968 0, 1, 0, 2, 0, 1, 0, 5,
1969 0, 1, 0, 2, 0, 1, 0, 3,
1970 0, 1, 0, 2, 0, 1, 0, 4,
1971 0, 1, 0, 2, 0, 1, 0, 3,
1972 0, 1, 0, 2, 0, 1, 0, 7,
1973 0, 1, 0, 2, 0, 1, 0, 3,
1974 0, 1, 0, 2, 0, 1, 0, 4,
1975 0, 1, 0, 2, 0, 1, 0, 3,
1976 0, 1, 0, 2, 0, 1, 0, 5,
1977 0, 1, 0, 2, 0, 1, 0, 3,
1978 0, 1, 0, 2, 0, 1, 0, 4,
1979 0, 1, 0, 2, 0, 1, 0, 3,
1980 0, 1, 0, 2, 0, 1, 0, 6,
1981 0, 1, 0, 2, 0, 1, 0, 3,
1982 0, 1, 0, 2, 0, 1, 0, 4,
1983 0, 1, 0, 2, 0, 1, 0, 3,
1984 0, 1, 0, 2, 0, 1, 0, 5,
1985 0, 1, 0, 2, 0, 1, 0, 3,
1986 0, 1, 0, 2, 0, 1, 0, 4,
1987 0, 1, 0, 2, 0, 1, 0, 3,
1988 0, 1, 0, 2, 0, 1, 0, 8
1993 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
1996 * Now we also need to check the mapping array in a couple of ways.
1997 * 1) Did we move the cum-ack point?
1999 * When you first glance at this you might think that all entries that
2000 * make up the postion of the cum-ack would be in the nr-mapping
2001 * array only.. i.e. things up to the cum-ack are always
2002 * deliverable. Thats true with one exception, when its a fragmented
2003 * message we may not deliver the data until some threshold (or all
2004 * of it) is in place. So we must OR the nr_mapping_array and
2005 * mapping_array to get a true picture of the cum-ack.
2007 struct sctp_association *asoc;
2010 int slide_from, slide_end, lgap, distance;
2011 uint32_t old_cumack, old_base, old_highest, highest_tsn;
2015 old_cumack = asoc->cumulative_tsn;
2016 old_base = asoc->mapping_array_base_tsn;
2017 old_highest = asoc->highest_tsn_inside_map;
2019 * We could probably improve this a small bit by calculating the
2020 * offset of the current cum-ack as the starting point.
2023 for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2024 val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2028 /* there is a 0 bit */
2029 at += sctp_map_lookup_tab[val];
2033 asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2035 if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2036 SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2038 panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2039 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2041 SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2042 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2043 sctp_print_mapping_array(asoc);
2044 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2045 sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2047 asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2048 asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2051 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2052 highest_tsn = asoc->highest_tsn_inside_nr_map;
2054 highest_tsn = asoc->highest_tsn_inside_map;
2056 if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2057 /* The complete array was completed by a single FR */
2058 /* highest becomes the cum-ack */
2066 /* clear the array */
2067 clr = ((at + 7) >> 3);
2068 if (clr > asoc->mapping_array_size) {
2069 clr = asoc->mapping_array_size;
2071 memset(asoc->mapping_array, 0, clr);
2072 memset(asoc->nr_mapping_array, 0, clr);
2074 for (i = 0; i < asoc->mapping_array_size; i++) {
2075 if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2076 SCTP_PRINTF("Error Mapping array's not clean at clear\n");
2077 sctp_print_mapping_array(asoc);
2081 asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2082 asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2083 } else if (at >= 8) {
2084 /* we can slide the mapping array down */
2085 /* slide_from holds where we hit the first NON 0xff byte */
2088 * now calculate the ceiling of the move using our highest
2091 SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2092 slide_end = (lgap >> 3);
2093 if (slide_end < slide_from) {
2094 sctp_print_mapping_array(asoc);
2096 panic("impossible slide");
2098 SCTP_PRINTF("impossible slide lgap:%x slide_end:%x slide_from:%x? at:%d\n",
2099 lgap, slide_end, slide_from, at);
2103 if (slide_end > asoc->mapping_array_size) {
2105 panic("would overrun buffer");
2107 SCTP_PRINTF("Gak, would have overrun map end:%d slide_end:%d\n",
2108 asoc->mapping_array_size, slide_end);
2109 slide_end = asoc->mapping_array_size;
2112 distance = (slide_end - slide_from) + 1;
2113 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2114 sctp_log_map(old_base, old_cumack, old_highest,
2115 SCTP_MAP_PREPARE_SLIDE);
2116 sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
2117 (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
2119 if (distance + slide_from > asoc->mapping_array_size ||
2122 * Here we do NOT slide forward the array so that
2123 * hopefully when more data comes in to fill it up
2124 * we will be able to slide it forward. Really I
2125 * don't think this should happen :-0
2128 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2129 sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
2130 (uint32_t) asoc->mapping_array_size,
2131 SCTP_MAP_SLIDE_NONE);
2136 for (ii = 0; ii < distance; ii++) {
2137 asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2138 asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2141 for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2142 asoc->mapping_array[ii] = 0;
2143 asoc->nr_mapping_array[ii] = 0;
2145 if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2146 asoc->highest_tsn_inside_map += (slide_from << 3);
2148 if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2149 asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2151 asoc->mapping_array_base_tsn += (slide_from << 3);
2152 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2153 sctp_log_map(asoc->mapping_array_base_tsn,
2154 asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2155 SCTP_MAP_SLIDE_RESULT);
2162 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
2164 struct sctp_association *asoc;
2165 uint32_t highest_tsn;
2168 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2169 highest_tsn = asoc->highest_tsn_inside_nr_map;
2171 highest_tsn = asoc->highest_tsn_inside_map;
2175 * Now we need to see if we need to queue a sack or just start the
2176 * timer (if allowed).
2178 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2180 * Ok special case, in SHUTDOWN-SENT case. here we maker
2181 * sure SACK timer is off and instead send a SHUTDOWN and a
2184 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2185 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2186 stcb->sctp_ep, stcb, NULL,
2187 SCTP_FROM_SCTP_INDATA + SCTP_LOC_20);
2189 sctp_send_shutdown(stcb,
2190 ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2191 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2195 /* is there a gap now ? */
2196 is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2199 * CMT DAC algorithm: increase number of packets received
2202 stcb->asoc.cmt_dac_pkts_rcvd++;
2204 if ((stcb->asoc.send_sack == 1) || /* We need to send a
2206 ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no
2208 (stcb->asoc.numduptsns) || /* we have dup's */
2209 (is_a_gap) || /* is still a gap */
2210 (stcb->asoc.delayed_ack == 0) || /* Delayed sack disabled */
2211 (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) /* hit limit of pkts */
2214 if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2215 (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2216 (stcb->asoc.send_sack == 0) &&
2217 (stcb->asoc.numduptsns == 0) &&
2218 (stcb->asoc.delayed_ack) &&
2219 (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2222 * CMT DAC algorithm: With CMT, delay acks
2223 * even in the face of
2225 * reordering. Therefore, if acks that do not
2226 * have to be sent because of the above
2227 * reasons, will be delayed. That is, acks
2228 * that would have been sent due to gap
2229 * reports will be delayed with DAC. Start
2230 * the delayed ack timer.
2232 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2233 stcb->sctp_ep, stcb, NULL);
2236 * Ok we must build a SACK since the timer
2237 * is pending, we got our first packet OR
2238 * there are gaps or duplicates.
2240 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2241 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2244 if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2245 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2246 stcb->sctp_ep, stcb, NULL);
2253 sctp_service_queues(struct sctp_tcb *stcb, struct sctp_association *asoc)
2255 struct sctp_tmit_chunk *chk;
2256 uint32_t tsize, pd_point;
2259 if (asoc->fragmented_delivery_inprogress) {
2260 sctp_service_reassembly(stcb, asoc);
2262 /* Can we proceed further, i.e. the PD-API is complete */
2263 if (asoc->fragmented_delivery_inprogress) {
2268 * Now is there some other chunk I can deliver from the reassembly
2272 chk = TAILQ_FIRST(&asoc->reasmqueue);
2274 asoc->size_on_reasm_queue = 0;
2275 asoc->cnt_on_reasm_queue = 0;
2278 nxt_todel = asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
2279 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
2280 ((nxt_todel == chk->rec.data.stream_seq) ||
2281 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
2283 * Yep the first one is here. We setup to start reception,
2284 * by backing down the TSN just in case we can't deliver.
2288 * Before we start though either all of the message should
2289 * be here or the socket buffer max or nothing on the
2290 * delivery queue and something can be delivered.
2292 if (stcb->sctp_socket) {
2293 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
2294 stcb->sctp_ep->partial_delivery_point);
2296 pd_point = stcb->sctp_ep->partial_delivery_point;
2298 if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
2299 asoc->fragmented_delivery_inprogress = 1;
2300 asoc->tsn_last_delivered = chk->rec.data.TSN_seq - 1;
2301 asoc->str_of_pdapi = chk->rec.data.stream_number;
2302 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
2303 asoc->pdapi_ppid = chk->rec.data.payloadtype;
2304 asoc->fragment_flags = chk->rec.data.rcv_flags;
2305 sctp_service_reassembly(stcb, asoc);
2306 if (asoc->fragmented_delivery_inprogress == 0) {
2314 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2315 struct sockaddr *src, struct sockaddr *dst,
2316 struct sctphdr *sh, struct sctp_inpcb *inp,
2317 struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t * high_tsn,
2318 uint8_t mflowtype, uint32_t mflowid,
2319 uint32_t vrf_id, uint16_t port)
2321 struct sctp_data_chunk *ch, chunk_buf;
2322 struct sctp_association *asoc;
2323 int num_chunks = 0; /* number of control chunks processed */
2325 int chk_length, break_flag, last_chunk;
2326 int abort_flag = 0, was_a_gap;
2328 uint32_t highest_tsn;
2331 sctp_set_rwnd(stcb, &stcb->asoc);
2334 SCTP_TCB_LOCK_ASSERT(stcb);
2336 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2337 highest_tsn = asoc->highest_tsn_inside_nr_map;
2339 highest_tsn = asoc->highest_tsn_inside_map;
2341 was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2343 * setup where we got the last DATA packet from for any SACK that
2344 * may need to go out. Don't bump the net. This is done ONLY when a
2345 * chunk is assigned.
2347 asoc->last_data_chunk_from = net;
2350 * Now before we proceed we must figure out if this is a wasted
2351 * cluster... i.e. it is a small packet sent in and yet the driver
2352 * underneath allocated a full cluster for it. If so we must copy it
2353 * to a smaller mbuf and free up the cluster mbuf. This will help
2354 * with cluster starvation. Note for __Panda__ we don't do this
2355 * since it has clusters all the way down to 64 bytes.
2357 if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2358 /* we only handle mbufs that are singletons.. not chains */
2359 m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA);
2361 /* ok lets see if we can copy the data up */
2364 /* get the pointers and copy */
2365 to = mtod(m, caddr_t *);
2366 from = mtod((*mm), caddr_t *);
2367 memcpy(to, from, SCTP_BUF_LEN((*mm)));
2368 /* copy the length and free up the old */
2369 SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2371 /* sucess, back copy */
2374 /* We are in trouble in the mbuf world .. yikes */
2378 /* get pointer to the first chunk header */
2379 ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2380 sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2385 * process all DATA chunks...
2387 *high_tsn = asoc->cumulative_tsn;
2389 asoc->data_pkts_seen++;
2390 while (stop_proc == 0) {
2391 /* validate chunk length */
2392 chk_length = ntohs(ch->ch.chunk_length);
2393 if (length - *offset < chk_length) {
2394 /* all done, mutulated chunk */
2398 if (ch->ch.chunk_type == SCTP_DATA) {
2399 if ((size_t)chk_length < sizeof(struct sctp_data_chunk)) {
2401 * Need to send an abort since we had a
2402 * invalid data chunk.
2404 struct mbuf *op_err;
2405 char msg[SCTP_DIAG_INFO_LEN];
2407 snprintf(msg, sizeof(msg), "DATA chunk of length %d",
2409 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2410 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_21;
2411 sctp_abort_association(inp, stcb, m, iphlen,
2412 src, dst, sh, op_err,
2417 if ((size_t)chk_length == sizeof(struct sctp_data_chunk)) {
2419 * Need to send an abort since we had an
2422 struct mbuf *op_err;
2424 op_err = sctp_generate_no_user_data_cause(ch->dp.tsn);
2425 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_22;
2426 sctp_abort_association(inp, stcb, m, iphlen,
2427 src, dst, sh, op_err,
2432 #ifdef SCTP_AUDITING_ENABLED
2433 sctp_audit_log(0xB1, 0);
2435 if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2440 if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, ch,
2441 chk_length, net, high_tsn, &abort_flag, &break_flag,
2450 * Set because of out of rwnd space and no
2451 * drop rep space left.
2457 /* not a data chunk in the data region */
2458 switch (ch->ch.chunk_type) {
2459 case SCTP_INITIATION:
2460 case SCTP_INITIATION_ACK:
2461 case SCTP_SELECTIVE_ACK:
2462 case SCTP_NR_SELECTIVE_ACK:
2463 case SCTP_HEARTBEAT_REQUEST:
2464 case SCTP_HEARTBEAT_ACK:
2465 case SCTP_ABORT_ASSOCIATION:
2467 case SCTP_SHUTDOWN_ACK:
2468 case SCTP_OPERATION_ERROR:
2469 case SCTP_COOKIE_ECHO:
2470 case SCTP_COOKIE_ACK:
2473 case SCTP_SHUTDOWN_COMPLETE:
2474 case SCTP_AUTHENTICATION:
2475 case SCTP_ASCONF_ACK:
2476 case SCTP_PACKET_DROPPED:
2477 case SCTP_STREAM_RESET:
2478 case SCTP_FORWARD_CUM_TSN:
2481 * Now, what do we do with KNOWN chunks that
2482 * are NOT in the right place?
2484 * For now, I do nothing but ignore them. We
2485 * may later want to add sysctl stuff to
2486 * switch out and do either an ABORT() or
2487 * possibly process them.
2489 if (SCTP_BASE_SYSCTL(sctp_strict_data_order)) {
2490 struct mbuf *op_err;
2491 char msg[SCTP_DIAG_INFO_LEN];
2493 snprintf(msg, sizeof(msg), "DATA chunk followed by chunk of type %2.2x",
2495 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2496 sctp_abort_association(inp, stcb,
2506 /* unknown chunk type, use bit rules */
2507 if (ch->ch.chunk_type & 0x40) {
2508 /* Add a error report to the queue */
2510 struct sctp_paramhdr *phd;
2512 merr = sctp_get_mbuf_for_msg(sizeof(*phd), 0, M_NOWAIT, 1, MT_DATA);
2514 phd = mtod(merr, struct sctp_paramhdr *);
2516 * We cheat and use param
2517 * type since we did not
2518 * bother to define a error
2519 * cause struct. They are
2520 * the same basic format
2521 * with different names.
2524 htons(SCTP_CAUSE_UNRECOG_CHUNK);
2526 htons(chk_length + sizeof(*phd));
2527 SCTP_BUF_LEN(merr) = sizeof(*phd);
2528 SCTP_BUF_NEXT(merr) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
2529 if (SCTP_BUF_NEXT(merr)) {
2530 if (sctp_pad_lastmbuf(SCTP_BUF_NEXT(merr), SCTP_SIZE32(chk_length) - chk_length, NULL) == NULL) {
2533 sctp_queue_op_err(stcb, merr);
2540 if ((ch->ch.chunk_type & 0x80) == 0) {
2541 /* discard the rest of this packet */
2543 } /* else skip this bad chunk and
2546 } /* switch of chunk type */
2548 *offset += SCTP_SIZE32(chk_length);
2549 if ((*offset >= length) || stop_proc) {
2550 /* no more data left in the mbuf chain */
2554 ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2555 sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2564 * we need to report rwnd overrun drops.
2566 sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
2570 * Did we get data, if so update the time for auto-close and
2571 * give peer credit for being alive.
2573 SCTP_STAT_INCR(sctps_recvpktwithdata);
2574 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2575 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2576 stcb->asoc.overall_error_count,
2578 SCTP_FROM_SCTP_INDATA,
2581 stcb->asoc.overall_error_count = 0;
2582 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2584 /* now service all of the reassm queue if needed */
2585 if (!(TAILQ_EMPTY(&asoc->reasmqueue)))
2586 sctp_service_queues(stcb, asoc);
2588 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2589 /* Assure that we ack right away */
2590 stcb->asoc.send_sack = 1;
2592 /* Start a sack timer or QUEUE a SACK for sending */
2593 sctp_sack_check(stcb, was_a_gap);
2598 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2599 uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2601 uint32_t * biggest_newly_acked_tsn,
2602 uint32_t * this_sack_lowest_newack,
2605 struct sctp_tmit_chunk *tp1;
2606 unsigned int theTSN;
2607 int j, wake_him = 0, circled = 0;
2609 /* Recover the tp1 we last saw */
2612 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2614 for (j = frag_strt; j <= frag_end; j++) {
2615 theTSN = j + last_tsn;
2617 if (tp1->rec.data.doing_fast_retransmit)
2621 * CMT: CUCv2 algorithm. For each TSN being
2622 * processed from the sent queue, track the
2623 * next expected pseudo-cumack, or
2624 * rtx_pseudo_cumack, if required. Separate
2625 * cumack trackers for first transmissions,
2626 * and retransmissions.
2628 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2629 (tp1->whoTo->find_pseudo_cumack == 1) &&
2630 (tp1->snd_count == 1)) {
2631 tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq;
2632 tp1->whoTo->find_pseudo_cumack = 0;
2634 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
2635 (tp1->whoTo->find_rtx_pseudo_cumack == 1) &&
2636 (tp1->snd_count > 1)) {
2637 tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq;
2638 tp1->whoTo->find_rtx_pseudo_cumack = 0;
2640 if (tp1->rec.data.TSN_seq == theTSN) {
2641 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2643 * must be held until
2646 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
2648 * If it is less than RESEND, it is
2649 * now no-longer in flight.
2650 * Higher values may already be set
2651 * via previous Gap Ack Blocks...
2652 * i.e. ACKED or RESEND.
2654 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2655 *biggest_newly_acked_tsn)) {
2656 *biggest_newly_acked_tsn = tp1->rec.data.TSN_seq;
2659 * CMT: SFR algo (and HTNA) - set
2660 * saw_newack to 1 for dest being
2661 * newly acked. update
2662 * this_sack_highest_newack if
2665 if (tp1->rec.data.chunk_was_revoked == 0)
2666 tp1->whoTo->saw_newack = 1;
2668 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2669 tp1->whoTo->this_sack_highest_newack)) {
2670 tp1->whoTo->this_sack_highest_newack =
2671 tp1->rec.data.TSN_seq;
2674 * CMT DAC algo: also update
2675 * this_sack_lowest_newack
2677 if (*this_sack_lowest_newack == 0) {
2678 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2679 sctp_log_sack(*this_sack_lowest_newack,
2681 tp1->rec.data.TSN_seq,
2684 SCTP_LOG_TSN_ACKED);
2686 *this_sack_lowest_newack = tp1->rec.data.TSN_seq;
2689 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
2690 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
2691 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
2692 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
2693 * Separate pseudo_cumack trackers for first transmissions and
2696 if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) {
2697 if (tp1->rec.data.chunk_was_revoked == 0) {
2698 tp1->whoTo->new_pseudo_cumack = 1;
2700 tp1->whoTo->find_pseudo_cumack = 1;
2702 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
2703 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
2705 if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) {
2706 if (tp1->rec.data.chunk_was_revoked == 0) {
2707 tp1->whoTo->new_pseudo_cumack = 1;
2709 tp1->whoTo->find_rtx_pseudo_cumack = 1;
2711 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2712 sctp_log_sack(*biggest_newly_acked_tsn,
2714 tp1->rec.data.TSN_seq,
2717 SCTP_LOG_TSN_ACKED);
2719 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
2720 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
2721 tp1->whoTo->flight_size,
2723 (uintptr_t) tp1->whoTo,
2724 tp1->rec.data.TSN_seq);
2726 sctp_flight_size_decrease(tp1);
2727 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
2728 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
2731 sctp_total_flight_decrease(stcb, tp1);
2733 tp1->whoTo->net_ack += tp1->send_size;
2734 if (tp1->snd_count < 2) {
2736 * True non-retransmited chunk
2738 tp1->whoTo->net_ack2 += tp1->send_size;
2746 sctp_calculate_rto(stcb,
2749 &tp1->sent_rcv_time,
2750 sctp_align_safe_nocopy,
2751 SCTP_RTT_FROM_DATA);
2754 if (tp1->whoTo->rto_needed == 0) {
2755 tp1->whoTo->rto_needed = 1;
2761 if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
2762 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2763 stcb->asoc.this_sack_highest_gap)) {
2764 stcb->asoc.this_sack_highest_gap =
2765 tp1->rec.data.TSN_seq;
2767 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
2768 sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
2769 #ifdef SCTP_AUDITING_ENABLED
2770 sctp_audit_log(0xB2,
2771 (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
2776 * All chunks NOT UNSENT fall through here and are marked
2777 * (leave PR-SCTP ones that are to skip alone though)
2779 if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
2780 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
2781 tp1->sent = SCTP_DATAGRAM_MARKED;
2783 if (tp1->rec.data.chunk_was_revoked) {
2784 /* deflate the cwnd */
2785 tp1->whoTo->cwnd -= tp1->book_size;
2786 tp1->rec.data.chunk_was_revoked = 0;
2788 /* NR Sack code here */
2790 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
2791 if (stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
2792 stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues--;
2795 panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
2798 tp1->sent = SCTP_DATAGRAM_NR_ACKED;
2804 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
2805 sctp_m_freem(tp1->data);
2812 } /* if (tp1->TSN_seq == theTSN) */
2813 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, theTSN)) {
2816 tp1 = TAILQ_NEXT(tp1, sctp_next);
2817 if ((tp1 == NULL) && (circled == 0)) {
2819 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2821 } /* end while (tp1) */
2824 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2826 /* In case the fragments were not in order we must reset */
2827 } /* end for (j = fragStart */
2829 return (wake_him); /* Return value only used for nr-sack */
2834 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
2835 uint32_t last_tsn, uint32_t * biggest_tsn_acked,
2836 uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack,
2837 int num_seg, int num_nr_seg, int *rto_ok)
2839 struct sctp_gap_ack_block *frag, block;
2840 struct sctp_tmit_chunk *tp1;
2845 uint16_t frag_strt, frag_end, prev_frag_end;
2847 tp1 = TAILQ_FIRST(&asoc->sent_queue);
2851 for (i = 0; i < (num_seg + num_nr_seg); i++) {
2854 tp1 = TAILQ_FIRST(&asoc->sent_queue);
2856 frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
2857 sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
2858 *offset += sizeof(block);
2860 return (chunk_freed);
2862 frag_strt = ntohs(frag->start);
2863 frag_end = ntohs(frag->end);
2865 if (frag_strt > frag_end) {
2866 /* This gap report is malformed, skip it. */
2869 if (frag_strt <= prev_frag_end) {
2870 /* This gap report is not in order, so restart. */
2871 tp1 = TAILQ_FIRST(&asoc->sent_queue);
2873 if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
2874 *biggest_tsn_acked = last_tsn + frag_end;
2881 if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
2882 non_revocable, &num_frs, biggest_newly_acked_tsn,
2883 this_sack_lowest_newack, rto_ok)) {
2886 prev_frag_end = frag_end;
2888 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
2890 sctp_log_fr(*biggest_tsn_acked,
2891 *biggest_newly_acked_tsn,
2892 last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
2894 return (chunk_freed);
2898 sctp_check_for_revoked(struct sctp_tcb *stcb,
2899 struct sctp_association *asoc, uint32_t cumack,
2900 uint32_t biggest_tsn_acked)
2902 struct sctp_tmit_chunk *tp1;
2904 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
2905 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cumack)) {
2907 * ok this guy is either ACK or MARKED. If it is
2908 * ACKED it has been previously acked but not this
2909 * time i.e. revoked. If it is MARKED it was ACK'ed
2912 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked)) {
2915 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
2916 /* it has been revoked */
2917 tp1->sent = SCTP_DATAGRAM_SENT;
2918 tp1->rec.data.chunk_was_revoked = 1;
2920 * We must add this stuff back in to assure
2921 * timers and such get started.
2923 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
2924 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
2925 tp1->whoTo->flight_size,
2927 (uintptr_t) tp1->whoTo,
2928 tp1->rec.data.TSN_seq);
2930 sctp_flight_size_increase(tp1);
2931 sctp_total_flight_increase(stcb, tp1);
2933 * We inflate the cwnd to compensate for our
2934 * artificial inflation of the flight_size.
2936 tp1->whoTo->cwnd += tp1->book_size;
2937 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2938 sctp_log_sack(asoc->last_acked_seq,
2940 tp1->rec.data.TSN_seq,
2943 SCTP_LOG_TSN_REVOKED);
2945 } else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
2946 /* it has been re-acked in this SACK */
2947 tp1->sent = SCTP_DATAGRAM_ACKED;
2950 if (tp1->sent == SCTP_DATAGRAM_UNSENT)
2957 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
2958 uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
2960 struct sctp_tmit_chunk *tp1;
2961 int strike_flag = 0;
2963 int tot_retrans = 0;
2964 uint32_t sending_seq;
2965 struct sctp_nets *net;
2966 int num_dests_sacked = 0;
2969 * select the sending_seq, this is either the next thing ready to be
2970 * sent but not transmitted, OR, the next seq we assign.
2972 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
2974 sending_seq = asoc->sending_seq;
2976 sending_seq = tp1->rec.data.TSN_seq;
2979 /* CMT DAC algo: finding out if SACK is a mixed SACK */
2980 if ((asoc->sctp_cmt_on_off > 0) &&
2981 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
2982 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
2983 if (net->saw_newack)
2987 if (stcb->asoc.prsctp_supported) {
2988 (void)SCTP_GETTIME_TIMEVAL(&now);
2990 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
2992 if (tp1->no_fr_allowed) {
2993 /* this one had a timeout or something */
2996 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
2997 if (tp1->sent < SCTP_DATAGRAM_RESEND)
2998 sctp_log_fr(biggest_tsn_newly_acked,
2999 tp1->rec.data.TSN_seq,
3001 SCTP_FR_LOG_CHECK_STRIKE);
3003 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked) ||
3004 tp1->sent == SCTP_DATAGRAM_UNSENT) {
3008 if (stcb->asoc.prsctp_supported) {
3009 if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3010 /* Is it expired? */
3011 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3012 /* Yes so drop it */
3013 if (tp1->data != NULL) {
3014 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3015 SCTP_SO_NOT_LOCKED);
3021 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->this_sack_highest_gap)) {
3022 /* we are beyond the tsn in the sack */
3025 if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3026 /* either a RESEND, ACKED, or MARKED */
3028 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3029 /* Continue strikin FWD-TSN chunks */
3030 tp1->rec.data.fwd_tsn_cnt++;
3035 * CMT : SFR algo (covers part of DAC and HTNA as well)
3037 if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3039 * No new acks were receieved for data sent to this
3040 * dest. Therefore, according to the SFR algo for
3041 * CMT, no data sent to this dest can be marked for
3042 * FR using this SACK.
3045 } else if (tp1->whoTo && SCTP_TSN_GT(tp1->rec.data.TSN_seq,
3046 tp1->whoTo->this_sack_highest_newack)) {
3048 * CMT: New acks were receieved for data sent to
3049 * this dest. But no new acks were seen for data
3050 * sent after tp1. Therefore, according to the SFR
3051 * algo for CMT, tp1 cannot be marked for FR using
3052 * this SACK. This step covers part of the DAC algo
3053 * and the HTNA algo as well.
3058 * Here we check to see if we were have already done a FR
3059 * and if so we see if the biggest TSN we saw in the sack is
3060 * smaller than the recovery point. If so we don't strike
3061 * the tsn... otherwise we CAN strike the TSN.
3064 * @@@ JRI: Check for CMT if (accum_moved &&
3065 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3068 if (accum_moved && asoc->fast_retran_loss_recovery) {
3070 * Strike the TSN if in fast-recovery and cum-ack
3073 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3074 sctp_log_fr(biggest_tsn_newly_acked,
3075 tp1->rec.data.TSN_seq,
3077 SCTP_FR_LOG_STRIKE_CHUNK);
3079 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3082 if ((asoc->sctp_cmt_on_off > 0) &&
3083 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3085 * CMT DAC algorithm: If SACK flag is set to
3086 * 0, then lowest_newack test will not pass
3087 * because it would have been set to the
3088 * cumack earlier. If not already to be
3089 * rtx'd, If not a mixed sack and if tp1 is
3090 * not between two sacked TSNs, then mark by
3091 * one more. NOTE that we are marking by one
3092 * additional time since the SACK DAC flag
3093 * indicates that two packets have been
3094 * received after this missing TSN.
3096 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3097 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3098 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3099 sctp_log_fr(16 + num_dests_sacked,
3100 tp1->rec.data.TSN_seq,
3102 SCTP_FR_LOG_STRIKE_CHUNK);
3107 } else if ((tp1->rec.data.doing_fast_retransmit) &&
3108 (asoc->sctp_cmt_on_off == 0)) {
3110 * For those that have done a FR we must take
3111 * special consideration if we strike. I.e the
3112 * biggest_newly_acked must be higher than the
3113 * sending_seq at the time we did the FR.
3116 #ifdef SCTP_FR_TO_ALTERNATE
3118 * If FR's go to new networks, then we must only do
3119 * this for singly homed asoc's. However if the FR's
3120 * go to the same network (Armando's work) then its
3121 * ok to FR multiple times.
3129 if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3130 tp1->rec.data.fast_retran_tsn)) {
3132 * Strike the TSN, since this ack is
3133 * beyond where things were when we
3136 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3137 sctp_log_fr(biggest_tsn_newly_acked,
3138 tp1->rec.data.TSN_seq,
3140 SCTP_FR_LOG_STRIKE_CHUNK);
3142 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3146 if ((asoc->sctp_cmt_on_off > 0) &&
3147 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3149 * CMT DAC algorithm: If
3150 * SACK flag is set to 0,
3151 * then lowest_newack test
3152 * will not pass because it
3153 * would have been set to
3154 * the cumack earlier. If
3155 * not already to be rtx'd,
3156 * If not a mixed sack and
3157 * if tp1 is not between two
3158 * sacked TSNs, then mark by
3159 * one more. NOTE that we
3160 * are marking by one
3161 * additional time since the
3162 * SACK DAC flag indicates
3163 * that two packets have
3164 * been received after this
3167 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3168 (num_dests_sacked == 1) &&
3169 SCTP_TSN_GT(this_sack_lowest_newack,
3170 tp1->rec.data.TSN_seq)) {
3171 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3172 sctp_log_fr(32 + num_dests_sacked,
3173 tp1->rec.data.TSN_seq,
3175 SCTP_FR_LOG_STRIKE_CHUNK);
3177 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3185 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3188 } else if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
3189 biggest_tsn_newly_acked)) {
3191 * We don't strike these: This is the HTNA
3192 * algorithm i.e. we don't strike If our TSN is
3193 * larger than the Highest TSN Newly Acked.
3197 /* Strike the TSN */
3198 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3199 sctp_log_fr(biggest_tsn_newly_acked,
3200 tp1->rec.data.TSN_seq,
3202 SCTP_FR_LOG_STRIKE_CHUNK);
3204 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3207 if ((asoc->sctp_cmt_on_off > 0) &&
3208 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3210 * CMT DAC algorithm: If SACK flag is set to
3211 * 0, then lowest_newack test will not pass
3212 * because it would have been set to the
3213 * cumack earlier. If not already to be
3214 * rtx'd, If not a mixed sack and if tp1 is
3215 * not between two sacked TSNs, then mark by
3216 * one more. NOTE that we are marking by one
3217 * additional time since the SACK DAC flag
3218 * indicates that two packets have been
3219 * received after this missing TSN.
3221 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3222 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3223 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3224 sctp_log_fr(48 + num_dests_sacked,
3225 tp1->rec.data.TSN_seq,
3227 SCTP_FR_LOG_STRIKE_CHUNK);
3233 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3234 struct sctp_nets *alt;
3236 /* fix counts and things */
3237 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3238 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3239 (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3241 (uintptr_t) tp1->whoTo,
3242 tp1->rec.data.TSN_seq);
3245 tp1->whoTo->net_ack++;
3246 sctp_flight_size_decrease(tp1);
3247 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3248 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3252 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3253 sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3254 asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3256 /* add back to the rwnd */
3257 asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3259 /* remove from the total flight */
3260 sctp_total_flight_decrease(stcb, tp1);
3262 if ((stcb->asoc.prsctp_supported) &&
3263 (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3265 * Has it been retransmitted tv_sec times? -
3266 * we store the retran count there.
3268 if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3269 /* Yes, so drop it */
3270 if (tp1->data != NULL) {
3271 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3272 SCTP_SO_NOT_LOCKED);
3274 /* Make sure to flag we had a FR */
3275 tp1->whoTo->net_ack++;
3280 * SCTP_PRINTF("OK, we are now ready to FR this
3283 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3284 sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count,
3288 /* This is a subsequent FR */
3289 SCTP_STAT_INCR(sctps_sendmultfastretrans);
3291 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3292 if (asoc->sctp_cmt_on_off > 0) {
3294 * CMT: Using RTX_SSTHRESH policy for CMT.
3295 * If CMT is being used, then pick dest with
3296 * largest ssthresh for any retransmission.
3298 tp1->no_fr_allowed = 1;
3300 /* sa_ignore NO_NULL_CHK */
3301 if (asoc->sctp_cmt_pf > 0) {
3303 * JRS 5/18/07 - If CMT PF is on,
3304 * use the PF version of
3307 alt = sctp_find_alternate_net(stcb, alt, 2);
3310 * JRS 5/18/07 - If only CMT is on,
3311 * use the CMT version of
3314 /* sa_ignore NO_NULL_CHK */
3315 alt = sctp_find_alternate_net(stcb, alt, 1);
3321 * CUCv2: If a different dest is picked for
3322 * the retransmission, then new
3323 * (rtx-)pseudo_cumack needs to be tracked
3324 * for orig dest. Let CUCv2 track new (rtx-)
3325 * pseudo-cumack always.
3328 tp1->whoTo->find_pseudo_cumack = 1;
3329 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3331 } else {/* CMT is OFF */
3333 #ifdef SCTP_FR_TO_ALTERNATE
3334 /* Can we find an alternate? */
3335 alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3338 * default behavior is to NOT retransmit
3339 * FR's to an alternate. Armando Caro's
3340 * paper details why.
3346 tp1->rec.data.doing_fast_retransmit = 1;
3348 /* mark the sending seq for possible subsequent FR's */
3350 * SCTP_PRINTF("Marking TSN for FR new value %x\n",
3351 * (uint32_t)tpi->rec.data.TSN_seq);
3353 if (TAILQ_EMPTY(&asoc->send_queue)) {
3355 * If the queue of send is empty then its
3356 * the next sequence number that will be
3357 * assigned so we subtract one from this to
3358 * get the one we last sent.
3360 tp1->rec.data.fast_retran_tsn = sending_seq;
3363 * If there are chunks on the send queue
3364 * (unsent data that has made it from the
3365 * stream queues but not out the door, we
3366 * take the first one (which will have the
3367 * lowest TSN) and subtract one to get the
3370 struct sctp_tmit_chunk *ttt;
3372 ttt = TAILQ_FIRST(&asoc->send_queue);
3373 tp1->rec.data.fast_retran_tsn =
3374 ttt->rec.data.TSN_seq;
3379 * this guy had a RTO calculation pending on
3382 if ((tp1->whoTo != NULL) &&
3383 (tp1->whoTo->rto_needed == 0)) {
3384 tp1->whoTo->rto_needed = 1;
3388 if (alt != tp1->whoTo) {
3389 /* yes, there is an alternate. */
3390 sctp_free_remote_addr(tp1->whoTo);
3391 /* sa_ignore FREED_MEMORY */
3393 atomic_add_int(&alt->ref_count, 1);
3399 struct sctp_tmit_chunk *
3400 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3401 struct sctp_association *asoc)
3403 struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3407 if (asoc->prsctp_supported == 0) {
3410 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3411 if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3412 tp1->sent != SCTP_DATAGRAM_RESEND &&
3413 tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3414 /* no chance to advance, out of here */
3417 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3418 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3419 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3420 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3421 asoc->advanced_peer_ack_point,
3422 tp1->rec.data.TSN_seq, 0, 0);
3425 if (!PR_SCTP_ENABLED(tp1->flags)) {
3427 * We can't fwd-tsn past any that are reliable aka
3428 * retransmitted until the asoc fails.
3433 (void)SCTP_GETTIME_TIMEVAL(&now);
3437 * now we got a chunk which is marked for another
3438 * retransmission to a PR-stream but has run out its chances
3439 * already maybe OR has been marked to skip now. Can we skip
3440 * it if its a resend?
3442 if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3443 (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3445 * Now is this one marked for resend and its time is
3448 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3449 /* Yes so drop it */
3451 (void)sctp_release_pr_sctp_chunk(stcb, tp1,
3452 1, SCTP_SO_NOT_LOCKED);
3456 * No, we are done when hit one for resend
3457 * whos time as not expired.
3463 * Ok now if this chunk is marked to drop it we can clean up
3464 * the chunk, advance our peer ack point and we can check
3467 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3468 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3469 /* advance PeerAckPoint goes forward */
3470 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->advanced_peer_ack_point)) {
3471 asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq;
3473 } else if (tp1->rec.data.TSN_seq == asoc->advanced_peer_ack_point) {
3474 /* No update but we do save the chk */
3479 * If it is still in RESEND we can advance no
3489 sctp_fs_audit(struct sctp_association *asoc)
3491 struct sctp_tmit_chunk *chk;
3492 int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3496 int entry_flight, entry_cnt;
3502 entry_flight = asoc->total_flight;
3503 entry_cnt = asoc->total_flight_count;
3505 if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3508 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3509 if (chk->sent < SCTP_DATAGRAM_RESEND) {
3510 SCTP_PRINTF("Chk TSN:%u size:%d inflight cnt:%d\n",
3511 chk->rec.data.TSN_seq,
3515 } else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3517 } else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3519 } else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3526 if ((inflight > 0) || (inbetween > 0)) {
3528 panic("Flight size-express incorrect? \n");
3530 SCTP_PRINTF("asoc->total_flight:%d cnt:%d\n",
3531 entry_flight, entry_cnt);
3533 SCTP_PRINTF("Flight size-express incorrect F:%d I:%d R:%d Ab:%d ACK:%d\n",
3534 inflight, inbetween, resend, above, acked);
3543 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3544 struct sctp_association *asoc,
3545 struct sctp_tmit_chunk *tp1)
3547 tp1->window_probe = 0;
3548 if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3549 /* TSN's skipped we do NOT move back. */
3550 sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3551 tp1->whoTo ? tp1->whoTo->flight_size : 0,
3553 (uintptr_t) tp1->whoTo,
3554 tp1->rec.data.TSN_seq);
3557 /* First setup this by shrinking flight */
3558 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3559 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3562 sctp_flight_size_decrease(tp1);
3563 sctp_total_flight_decrease(stcb, tp1);
3564 /* Now mark for resend */
3565 tp1->sent = SCTP_DATAGRAM_RESEND;
3566 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3568 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3569 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3570 tp1->whoTo->flight_size,
3572 (uintptr_t) tp1->whoTo,
3573 tp1->rec.data.TSN_seq);
3578 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3579 uint32_t rwnd, int *abort_now, int ecne_seen)
3581 struct sctp_nets *net;
3582 struct sctp_association *asoc;
3583 struct sctp_tmit_chunk *tp1, *tp2;
3585 int win_probe_recovery = 0;
3586 int win_probe_recovered = 0;
3587 int j, done_once = 0;
3590 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3591 sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3592 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3594 SCTP_TCB_LOCK_ASSERT(stcb);
3595 #ifdef SCTP_ASOCLOG_OF_TSNS
3596 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3597 stcb->asoc.cumack_log_at++;
3598 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3599 stcb->asoc.cumack_log_at = 0;
3603 old_rwnd = asoc->peers_rwnd;
3604 if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3607 } else if (asoc->last_acked_seq == cumack) {
3608 /* Window update sack */
3609 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3610 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3611 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3612 /* SWS sender side engages */
3613 asoc->peers_rwnd = 0;
3615 if (asoc->peers_rwnd > old_rwnd) {
3620 /* First setup for CC stuff */
3621 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3622 if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
3623 /* Drag along the window_tsn for cwr's */
3624 net->cwr_window_tsn = cumack;
3626 net->prev_cwnd = net->cwnd;
3631 * CMT: Reset CUC and Fast recovery algo variables before
3634 net->new_pseudo_cumack = 0;
3635 net->will_exit_fast_recovery = 0;
3636 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
3637 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
3640 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
3643 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3644 tp1 = TAILQ_LAST(&asoc->sent_queue,
3645 sctpchunk_listhead);
3646 send_s = tp1->rec.data.TSN_seq + 1;
3648 send_s = asoc->sending_seq;
3650 if (SCTP_TSN_GE(cumack, send_s)) {
3651 struct mbuf *op_err;
3652 char msg[SCTP_DIAG_INFO_LEN];
3656 snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
3658 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
3659 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_23;
3660 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
3664 asoc->this_sack_highest_gap = cumack;
3665 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
3666 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
3667 stcb->asoc.overall_error_count,
3669 SCTP_FROM_SCTP_INDATA,
3672 stcb->asoc.overall_error_count = 0;
3673 if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
3674 /* process the new consecutive TSN first */
3675 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3676 if (SCTP_TSN_GE(cumack, tp1->rec.data.TSN_seq)) {
3677 if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
3678 SCTP_PRINTF("Warning, an unsent is now acked?\n");
3680 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
3682 * If it is less than ACKED, it is
3683 * now no-longer in flight. Higher
3684 * values may occur during marking
3686 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3687 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3688 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
3689 tp1->whoTo->flight_size,
3691 (uintptr_t) tp1->whoTo,
3692 tp1->rec.data.TSN_seq);
3694 sctp_flight_size_decrease(tp1);
3695 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3696 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3699 /* sa_ignore NO_NULL_CHK */
3700 sctp_total_flight_decrease(stcb, tp1);
3702 tp1->whoTo->net_ack += tp1->send_size;
3703 if (tp1->snd_count < 2) {
3705 * True non-retransmited
3708 tp1->whoTo->net_ack2 +=
3711 /* update RTO too? */
3720 sctp_calculate_rto(stcb,
3722 &tp1->sent_rcv_time,
3723 sctp_align_safe_nocopy,
3724 SCTP_RTT_FROM_DATA);
3727 if (tp1->whoTo->rto_needed == 0) {
3728 tp1->whoTo->rto_needed = 1;
3734 * CMT: CUCv2 algorithm. From the
3735 * cumack'd TSNs, for each TSN being
3736 * acked for the first time, set the
3737 * following variables for the
3738 * corresp destination.
3739 * new_pseudo_cumack will trigger a
3741 * find_(rtx_)pseudo_cumack will
3742 * trigger search for the next
3743 * expected (rtx-)pseudo-cumack.
3745 tp1->whoTo->new_pseudo_cumack = 1;
3746 tp1->whoTo->find_pseudo_cumack = 1;
3747 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3749 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3750 /* sa_ignore NO_NULL_CHK */
3751 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
3754 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3755 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
3757 if (tp1->rec.data.chunk_was_revoked) {
3758 /* deflate the cwnd */
3759 tp1->whoTo->cwnd -= tp1->book_size;
3760 tp1->rec.data.chunk_was_revoked = 0;
3762 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3763 if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
3764 asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--;
3767 panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
3771 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
3773 /* sa_ignore NO_NULL_CHK */
3774 sctp_free_bufspace(stcb, asoc, tp1, 1);
3775 sctp_m_freem(tp1->data);
3778 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3779 sctp_log_sack(asoc->last_acked_seq,
3781 tp1->rec.data.TSN_seq,
3784 SCTP_LOG_FREE_SENT);
3786 asoc->sent_queue_cnt--;
3787 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
3794 /* sa_ignore NO_NULL_CHK */
3795 if (stcb->sctp_socket) {
3796 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3800 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
3801 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
3802 /* sa_ignore NO_NULL_CHK */
3803 sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
3805 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3806 so = SCTP_INP_SO(stcb->sctp_ep);
3807 atomic_add_int(&stcb->asoc.refcnt, 1);
3808 SCTP_TCB_UNLOCK(stcb);
3809 SCTP_SOCKET_LOCK(so, 1);
3810 SCTP_TCB_LOCK(stcb);
3811 atomic_subtract_int(&stcb->asoc.refcnt, 1);
3812 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3813 /* assoc was freed while we were unlocked */
3814 SCTP_SOCKET_UNLOCK(so, 1);
3818 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
3819 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3820 SCTP_SOCKET_UNLOCK(so, 1);
3823 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
3824 sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
3828 /* JRS - Use the congestion control given in the CC module */
3829 if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
3830 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3831 if (net->net_ack2 > 0) {
3833 * Karn's rule applies to clearing error
3834 * count, this is optional.
3836 net->error_count = 0;
3837 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
3838 /* addr came good */
3839 net->dest_state |= SCTP_ADDR_REACHABLE;
3840 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
3841 0, (void *)net, SCTP_SO_NOT_LOCKED);
3843 if (net == stcb->asoc.primary_destination) {
3844 if (stcb->asoc.alternate) {
3846 * release the alternate,
3849 sctp_free_remote_addr(stcb->asoc.alternate);
3850 stcb->asoc.alternate = NULL;
3853 if (net->dest_state & SCTP_ADDR_PF) {
3854 net->dest_state &= ~SCTP_ADDR_PF;
3855 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
3856 stcb->sctp_ep, stcb, net,
3857 SCTP_FROM_SCTP_INDATA + SCTP_LOC_24);
3858 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
3859 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
3860 /* Done with this net */
3863 /* restore any doubled timers */
3864 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
3865 if (net->RTO < stcb->asoc.minrto) {
3866 net->RTO = stcb->asoc.minrto;
3868 if (net->RTO > stcb->asoc.maxrto) {
3869 net->RTO = stcb->asoc.maxrto;
3873 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
3875 asoc->last_acked_seq = cumack;
3877 if (TAILQ_EMPTY(&asoc->sent_queue)) {
3878 /* nothing left in-flight */
3879 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3880 net->flight_size = 0;
3881 net->partial_bytes_acked = 0;
3883 asoc->total_flight = 0;
3884 asoc->total_flight_count = 0;
3887 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3888 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3889 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3890 /* SWS sender side engages */
3891 asoc->peers_rwnd = 0;
3893 if (asoc->peers_rwnd > old_rwnd) {
3894 win_probe_recovery = 1;
3896 /* Now assure a timer where data is queued at */
3899 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3902 if (win_probe_recovery && (net->window_probe)) {
3903 win_probe_recovered = 1;
3905 * Find first chunk that was used with window probe
3906 * and clear the sent
3908 /* sa_ignore FREED_MEMORY */
3909 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3910 if (tp1->window_probe) {
3911 /* move back to data send queue */
3912 sctp_window_probe_recovery(stcb, asoc, tp1);
3917 if (net->RTO == 0) {
3918 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
3920 to_ticks = MSEC_TO_TICKS(net->RTO);
3922 if (net->flight_size) {
3924 (void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
3925 sctp_timeout_handler, &net->rxt_timer);
3926 if (net->window_probe) {
3927 net->window_probe = 0;
3930 if (net->window_probe) {
3932 * In window probes we must assure a timer
3933 * is still running there
3935 net->window_probe = 0;
3936 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
3937 SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
3938 sctp_timeout_handler, &net->rxt_timer);
3940 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
3941 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
3943 SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
3948 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
3949 (asoc->sent_queue_retran_cnt == 0) &&
3950 (win_probe_recovered == 0) &&
3953 * huh, this should not happen unless all packets are
3954 * PR-SCTP and marked to skip of course.
3956 if (sctp_fs_audit(asoc)) {
3957 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3958 net->flight_size = 0;
3960 asoc->total_flight = 0;
3961 asoc->total_flight_count = 0;
3962 asoc->sent_queue_retran_cnt = 0;
3963 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3964 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3965 sctp_flight_size_increase(tp1);
3966 sctp_total_flight_increase(stcb, tp1);
3967 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3968 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3975 /**********************************/
3976 /* Now what about shutdown issues */
3977 /**********************************/
3978 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
3979 /* nothing left on sendqueue.. consider done */
3981 if ((asoc->stream_queue_cnt == 1) &&
3982 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
3983 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
3984 (asoc->locked_on_sending)
3986 struct sctp_stream_queue_pending *sp;
3989 * I may be in a state where we got all across.. but
3990 * cannot write more due to a shutdown... we abort
3991 * since the user did not indicate EOR in this case.
3992 * The sp will be cleaned during free of the asoc.
3994 sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
3996 if ((sp) && (sp->length == 0)) {
3997 /* Let cleanup code purge it */
3998 if (sp->msg_is_complete) {
3999 asoc->stream_queue_cnt--;
4001 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4002 asoc->locked_on_sending = NULL;
4003 asoc->stream_queue_cnt--;
4007 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4008 (asoc->stream_queue_cnt == 0)) {
4009 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4010 /* Need to abort here */
4011 struct mbuf *op_err;
4016 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4017 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_26;
4018 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4020 struct sctp_nets *netp;
4022 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4023 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4024 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4026 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4027 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4028 sctp_stop_timers_for_shutdown(stcb);
4029 if (asoc->alternate) {
4030 netp = asoc->alternate;
4032 netp = asoc->primary_destination;
4034 sctp_send_shutdown(stcb, netp);
4035 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4036 stcb->sctp_ep, stcb, netp);
4037 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4038 stcb->sctp_ep, stcb, netp);
4040 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4041 (asoc->stream_queue_cnt == 0)) {
4042 struct sctp_nets *netp;
4044 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4047 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4048 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4049 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4050 sctp_stop_timers_for_shutdown(stcb);
4051 if (asoc->alternate) {
4052 netp = asoc->alternate;
4054 netp = asoc->primary_destination;
4056 sctp_send_shutdown_ack(stcb, netp);
4057 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4058 stcb->sctp_ep, stcb, netp);
4061 /*********************************************/
4062 /* Here we perform PR-SCTP procedures */
4064 /*********************************************/
4065 /* C1. update advancedPeerAckPoint */
4066 if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4067 asoc->advanced_peer_ack_point = cumack;
4069 /* PR-Sctp issues need to be addressed too */
4070 if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
4071 struct sctp_tmit_chunk *lchk;
4072 uint32_t old_adv_peer_ack_point;
4074 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4075 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4076 /* C3. See if we need to send a Fwd-TSN */
4077 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4079 * ISSUE with ECN, see FWD-TSN processing.
4081 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4082 send_forward_tsn(stcb, asoc);
4084 /* try to FR fwd-tsn's that get lost too */
4085 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4086 send_forward_tsn(stcb, asoc);
4091 /* Assure a timer is up */
4092 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4093 stcb->sctp_ep, stcb, lchk->whoTo);
4096 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4097 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4099 stcb->asoc.peers_rwnd,
4100 stcb->asoc.total_flight,
4101 stcb->asoc.total_output_queue_size);
4106 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4107 struct sctp_tcb *stcb,
4108 uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4109 int *abort_now, uint8_t flags,
4110 uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4112 struct sctp_association *asoc;
4113 struct sctp_tmit_chunk *tp1, *tp2;
4114 uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4115 uint16_t wake_him = 0;
4116 uint32_t send_s = 0;
4118 int accum_moved = 0;
4119 int will_exit_fast_recovery = 0;
4120 uint32_t a_rwnd, old_rwnd;
4121 int win_probe_recovery = 0;
4122 int win_probe_recovered = 0;
4123 struct sctp_nets *net = NULL;
4126 uint8_t reneged_all = 0;
4127 uint8_t cmt_dac_flag;
4130 * we take any chance we can to service our queues since we cannot
4131 * get awoken when the socket is read from :<
4134 * Now perform the actual SACK handling: 1) Verify that it is not an
4135 * old sack, if so discard. 2) If there is nothing left in the send
4136 * queue (cum-ack is equal to last acked) then you have a duplicate
4137 * too, update any rwnd change and verify no timers are running.
4138 * then return. 3) Process any new consequtive data i.e. cum-ack
4139 * moved process these first and note that it moved. 4) Process any
4140 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4141 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4142 * sync up flightsizes and things, stop all timers and also check
4143 * for shutdown_pending state. If so then go ahead and send off the
4144 * shutdown. If in shutdown recv, send off the shutdown-ack and
4145 * start that timer, Ret. 9) Strike any non-acked things and do FR
4146 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4147 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4148 * if in shutdown_recv state.
4150 SCTP_TCB_LOCK_ASSERT(stcb);
4152 this_sack_lowest_newack = 0;
4153 SCTP_STAT_INCR(sctps_slowpath_sack);
4155 cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4156 #ifdef SCTP_ASOCLOG_OF_TSNS
4157 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4158 stcb->asoc.cumack_log_at++;
4159 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4160 stcb->asoc.cumack_log_at = 0;
4165 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4166 sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4167 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4169 old_rwnd = stcb->asoc.peers_rwnd;
4170 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4171 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4172 stcb->asoc.overall_error_count,
4174 SCTP_FROM_SCTP_INDATA,
4177 stcb->asoc.overall_error_count = 0;
4179 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4180 sctp_log_sack(asoc->last_acked_seq,
4187 if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4189 uint32_t *dupdata, dblock;
4191 for (i = 0; i < num_dup; i++) {
4192 dupdata = (uint32_t *) sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4193 sizeof(uint32_t), (uint8_t *) & dblock);
4194 if (dupdata == NULL) {
4197 sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4200 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4202 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4203 tp1 = TAILQ_LAST(&asoc->sent_queue,
4204 sctpchunk_listhead);
4205 send_s = tp1->rec.data.TSN_seq + 1;
4208 send_s = asoc->sending_seq;
4210 if (SCTP_TSN_GE(cum_ack, send_s)) {
4211 struct mbuf *op_err;
4212 char msg[SCTP_DIAG_INFO_LEN];
4215 * no way, we have not even sent this TSN out yet.
4216 * Peer is hopelessly messed up with us.
4218 SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4221 SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1:%p\n",
4222 tp1->rec.data.TSN_seq, (void *)tp1);
4227 snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal than TSN %8.8x",
4229 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4230 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_27;
4231 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4235 /**********************/
4236 /* 1) check the range */
4237 /**********************/
4238 if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4239 /* acking something behind */
4242 /* update the Rwnd of the peer */
4243 if (TAILQ_EMPTY(&asoc->sent_queue) &&
4244 TAILQ_EMPTY(&asoc->send_queue) &&
4245 (asoc->stream_queue_cnt == 0)) {
4246 /* nothing left on send/sent and strmq */
4247 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4248 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4249 asoc->peers_rwnd, 0, 0, a_rwnd);
4251 asoc->peers_rwnd = a_rwnd;
4252 if (asoc->sent_queue_retran_cnt) {
4253 asoc->sent_queue_retran_cnt = 0;
4255 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4256 /* SWS sender side engages */
4257 asoc->peers_rwnd = 0;
4259 /* stop any timers */
4260 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4261 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4262 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
4263 net->partial_bytes_acked = 0;
4264 net->flight_size = 0;
4266 asoc->total_flight = 0;
4267 asoc->total_flight_count = 0;
4271 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4272 * things. The total byte count acked is tracked in netAckSz AND
4273 * netAck2 is used to track the total bytes acked that are un-
4274 * amibguious and were never retransmitted. We track these on a per
4275 * destination address basis.
4277 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4278 if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4279 /* Drag along the window_tsn for cwr's */
4280 net->cwr_window_tsn = cum_ack;
4282 net->prev_cwnd = net->cwnd;
4287 * CMT: Reset CUC and Fast recovery algo variables before
4290 net->new_pseudo_cumack = 0;
4291 net->will_exit_fast_recovery = 0;
4292 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4293 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4296 /* process the new consecutive TSN first */
4297 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4298 if (SCTP_TSN_GE(last_tsn, tp1->rec.data.TSN_seq)) {
4299 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4301 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4303 * If it is less than ACKED, it is
4304 * now no-longer in flight. Higher
4305 * values may occur during marking
4307 if ((tp1->whoTo->dest_state &
4308 SCTP_ADDR_UNCONFIRMED) &&
4309 (tp1->snd_count < 2)) {
4311 * If there was no retran
4312 * and the address is
4313 * un-confirmed and we sent
4315 * sacked.. its confirmed,
4318 tp1->whoTo->dest_state &=
4319 ~SCTP_ADDR_UNCONFIRMED;
4321 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4322 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4323 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4324 tp1->whoTo->flight_size,
4326 (uintptr_t) tp1->whoTo,
4327 tp1->rec.data.TSN_seq);
4329 sctp_flight_size_decrease(tp1);
4330 sctp_total_flight_decrease(stcb, tp1);
4331 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4332 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4336 tp1->whoTo->net_ack += tp1->send_size;
4338 /* CMT SFR and DAC algos */
4339 this_sack_lowest_newack = tp1->rec.data.TSN_seq;
4340 tp1->whoTo->saw_newack = 1;
4342 if (tp1->snd_count < 2) {
4344 * True non-retransmited
4347 tp1->whoTo->net_ack2 +=
4350 /* update RTO too? */
4354 sctp_calculate_rto(stcb,
4356 &tp1->sent_rcv_time,
4357 sctp_align_safe_nocopy,
4358 SCTP_RTT_FROM_DATA);
4361 if (tp1->whoTo->rto_needed == 0) {
4362 tp1->whoTo->rto_needed = 1;
4368 * CMT: CUCv2 algorithm. From the
4369 * cumack'd TSNs, for each TSN being
4370 * acked for the first time, set the
4371 * following variables for the
4372 * corresp destination.
4373 * new_pseudo_cumack will trigger a
4375 * find_(rtx_)pseudo_cumack will
4376 * trigger search for the next
4377 * expected (rtx-)pseudo-cumack.
4379 tp1->whoTo->new_pseudo_cumack = 1;
4380 tp1->whoTo->find_pseudo_cumack = 1;
4381 tp1->whoTo->find_rtx_pseudo_cumack = 1;
4384 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4385 sctp_log_sack(asoc->last_acked_seq,
4387 tp1->rec.data.TSN_seq,
4390 SCTP_LOG_TSN_ACKED);
4392 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4393 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
4396 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4397 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4398 #ifdef SCTP_AUDITING_ENABLED
4399 sctp_audit_log(0xB3,
4400 (asoc->sent_queue_retran_cnt & 0x000000ff));
4403 if (tp1->rec.data.chunk_was_revoked) {
4404 /* deflate the cwnd */
4405 tp1->whoTo->cwnd -= tp1->book_size;
4406 tp1->rec.data.chunk_was_revoked = 0;
4408 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4409 tp1->sent = SCTP_DATAGRAM_ACKED;
4416 biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4417 /* always set this up to cum-ack */
4418 asoc->this_sack_highest_gap = last_tsn;
4420 if ((num_seg > 0) || (num_nr_seg > 0)) {
4423 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4424 * to be greater than the cumack. Also reset saw_newack to 0
4427 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4428 net->saw_newack = 0;
4429 net->this_sack_highest_newack = last_tsn;
4433 * thisSackHighestGap will increase while handling NEW
4434 * segments this_sack_highest_newack will increase while
4435 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4436 * used for CMT DAC algo. saw_newack will also change.
4438 if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4439 &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4440 num_seg, num_nr_seg, &rto_ok)) {
4443 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4445 * validate the biggest_tsn_acked in the gap acks if
4446 * strict adherence is wanted.
4448 if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4450 * peer is either confused or we are under
4451 * attack. We must abort.
4453 SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4454 biggest_tsn_acked, send_s);
4459 /*******************************************/
4460 /* cancel ALL T3-send timer if accum moved */
4461 /*******************************************/
4462 if (asoc->sctp_cmt_on_off > 0) {
4463 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4464 if (net->new_pseudo_cumack)
4465 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4467 SCTP_FROM_SCTP_INDATA + SCTP_LOC_29);
4472 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4473 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4474 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4478 /********************************************/
4479 /* drop the acked chunks from the sentqueue */
4480 /********************************************/
4481 asoc->last_acked_seq = cum_ack;
4483 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4484 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cum_ack)) {
4487 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4488 if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
4489 asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--;
4492 panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
4496 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4497 if (PR_SCTP_ENABLED(tp1->flags)) {
4498 if (asoc->pr_sctp_cnt != 0)
4499 asoc->pr_sctp_cnt--;
4501 asoc->sent_queue_cnt--;
4503 /* sa_ignore NO_NULL_CHK */
4504 sctp_free_bufspace(stcb, asoc, tp1, 1);
4505 sctp_m_freem(tp1->data);
4507 if (asoc->prsctp_supported && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4508 asoc->sent_queue_cnt_removeable--;
4511 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4512 sctp_log_sack(asoc->last_acked_seq,
4514 tp1->rec.data.TSN_seq,
4517 SCTP_LOG_FREE_SENT);
4519 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4522 if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4524 panic("Warning flight size is postive and should be 0");
4526 SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4527 asoc->total_flight);
4529 asoc->total_flight = 0;
4531 /* sa_ignore NO_NULL_CHK */
4532 if ((wake_him) && (stcb->sctp_socket)) {
4533 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4537 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4538 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4539 sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
4541 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4542 so = SCTP_INP_SO(stcb->sctp_ep);
4543 atomic_add_int(&stcb->asoc.refcnt, 1);
4544 SCTP_TCB_UNLOCK(stcb);
4545 SCTP_SOCKET_LOCK(so, 1);
4546 SCTP_TCB_LOCK(stcb);
4547 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4548 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4549 /* assoc was freed while we were unlocked */
4550 SCTP_SOCKET_UNLOCK(so, 1);
4554 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4555 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4556 SCTP_SOCKET_UNLOCK(so, 1);
4559 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4560 sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
4564 if (asoc->fast_retran_loss_recovery && accum_moved) {
4565 if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4566 /* Setup so we will exit RFC2582 fast recovery */
4567 will_exit_fast_recovery = 1;
4571 * Check for revoked fragments:
4573 * if Previous sack - Had no frags then we can't have any revoked if
4574 * Previous sack - Had frag's then - If we now have frags aka
4575 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4576 * some of them. else - The peer revoked all ACKED fragments, since
4577 * we had some before and now we have NONE.
4581 sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4582 asoc->saw_sack_with_frags = 1;
4583 } else if (asoc->saw_sack_with_frags) {
4584 int cnt_revoked = 0;
4586 /* Peer revoked all dg's marked or acked */
4587 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4588 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4589 tp1->sent = SCTP_DATAGRAM_SENT;
4590 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4591 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4592 tp1->whoTo->flight_size,
4594 (uintptr_t) tp1->whoTo,
4595 tp1->rec.data.TSN_seq);
4597 sctp_flight_size_increase(tp1);
4598 sctp_total_flight_increase(stcb, tp1);
4599 tp1->rec.data.chunk_was_revoked = 1;
4601 * To ensure that this increase in
4602 * flightsize, which is artificial, does not
4603 * throttle the sender, we also increase the
4604 * cwnd artificially.
4606 tp1->whoTo->cwnd += tp1->book_size;
4613 asoc->saw_sack_with_frags = 0;
4616 asoc->saw_sack_with_nr_frags = 1;
4618 asoc->saw_sack_with_nr_frags = 0;
4620 /* JRS - Use the congestion control given in the CC module */
4621 if (ecne_seen == 0) {
4622 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4623 if (net->net_ack2 > 0) {
4625 * Karn's rule applies to clearing error
4626 * count, this is optional.
4628 net->error_count = 0;
4629 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4630 /* addr came good */
4631 net->dest_state |= SCTP_ADDR_REACHABLE;
4632 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4633 0, (void *)net, SCTP_SO_NOT_LOCKED);
4635 if (net == stcb->asoc.primary_destination) {
4636 if (stcb->asoc.alternate) {
4638 * release the alternate,
4641 sctp_free_remote_addr(stcb->asoc.alternate);
4642 stcb->asoc.alternate = NULL;
4645 if (net->dest_state & SCTP_ADDR_PF) {
4646 net->dest_state &= ~SCTP_ADDR_PF;
4647 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT,
4648 stcb->sctp_ep, stcb, net,
4649 SCTP_FROM_SCTP_INDATA + SCTP_LOC_31);
4650 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4651 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4652 /* Done with this net */
4655 /* restore any doubled timers */
4656 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4657 if (net->RTO < stcb->asoc.minrto) {
4658 net->RTO = stcb->asoc.minrto;
4660 if (net->RTO > stcb->asoc.maxrto) {
4661 net->RTO = stcb->asoc.maxrto;
4665 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4667 if (TAILQ_EMPTY(&asoc->sent_queue)) {
4668 /* nothing left in-flight */
4669 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4670 /* stop all timers */
4671 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4673 SCTP_FROM_SCTP_INDATA + SCTP_LOC_32);
4674 net->flight_size = 0;
4675 net->partial_bytes_acked = 0;
4677 asoc->total_flight = 0;
4678 asoc->total_flight_count = 0;
4680 /**********************************/
4681 /* Now what about shutdown issues */
4682 /**********************************/
4683 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4684 /* nothing left on sendqueue.. consider done */
4685 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4686 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4687 asoc->peers_rwnd, 0, 0, a_rwnd);
4689 asoc->peers_rwnd = a_rwnd;
4690 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4691 /* SWS sender side engages */
4692 asoc->peers_rwnd = 0;
4695 if ((asoc->stream_queue_cnt == 1) &&
4696 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4697 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4698 (asoc->locked_on_sending)
4700 struct sctp_stream_queue_pending *sp;
4703 * I may be in a state where we got all across.. but
4704 * cannot write more due to a shutdown... we abort
4705 * since the user did not indicate EOR in this case.
4707 sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4709 if ((sp) && (sp->length == 0)) {
4710 asoc->locked_on_sending = NULL;
4711 if (sp->msg_is_complete) {
4712 asoc->stream_queue_cnt--;
4714 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4715 asoc->stream_queue_cnt--;
4719 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4720 (asoc->stream_queue_cnt == 0)) {
4721 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4722 /* Need to abort here */
4723 struct mbuf *op_err;
4728 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4729 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
4730 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4733 struct sctp_nets *netp;
4735 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4736 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4737 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4739 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4740 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4741 sctp_stop_timers_for_shutdown(stcb);
4742 if (asoc->alternate) {
4743 netp = asoc->alternate;
4745 netp = asoc->primary_destination;
4747 sctp_send_shutdown(stcb, netp);
4748 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4749 stcb->sctp_ep, stcb, netp);
4750 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4751 stcb->sctp_ep, stcb, netp);
4754 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4755 (asoc->stream_queue_cnt == 0)) {
4756 struct sctp_nets *netp;
4758 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4761 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4762 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4763 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4764 sctp_stop_timers_for_shutdown(stcb);
4765 if (asoc->alternate) {
4766 netp = asoc->alternate;
4768 netp = asoc->primary_destination;
4770 sctp_send_shutdown_ack(stcb, netp);
4771 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4772 stcb->sctp_ep, stcb, netp);
4777 * Now here we are going to recycle net_ack for a different use...
4780 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4785 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
4786 * to be done. Setting this_sack_lowest_newack to the cum_ack will
4787 * automatically ensure that.
4789 if ((asoc->sctp_cmt_on_off > 0) &&
4790 SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
4791 (cmt_dac_flag == 0)) {
4792 this_sack_lowest_newack = cum_ack;
4794 if ((num_seg > 0) || (num_nr_seg > 0)) {
4795 sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
4796 biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
4798 /* JRS - Use the congestion control given in the CC module */
4799 asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
4801 /* Now are we exiting loss recovery ? */
4802 if (will_exit_fast_recovery) {
4803 /* Ok, we must exit fast recovery */
4804 asoc->fast_retran_loss_recovery = 0;
4806 if ((asoc->sat_t3_loss_recovery) &&
4807 SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
4808 /* end satellite t3 loss recovery */
4809 asoc->sat_t3_loss_recovery = 0;
4814 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4815 if (net->will_exit_fast_recovery) {
4816 /* Ok, we must exit fast recovery */
4817 net->fast_retran_loss_recovery = 0;
4821 /* Adjust and set the new rwnd value */
4822 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4823 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4824 asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
4826 asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
4827 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4828 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4829 /* SWS sender side engages */
4830 asoc->peers_rwnd = 0;
4832 if (asoc->peers_rwnd > old_rwnd) {
4833 win_probe_recovery = 1;
4836 * Now we must setup so we have a timer up for anyone with
4842 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4843 if (win_probe_recovery && (net->window_probe)) {
4844 win_probe_recovered = 1;
4846 * Find first chunk that was used with
4847 * window probe and clear the event. Put
4848 * it back into the send queue as if has
4851 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4852 if (tp1->window_probe) {
4853 sctp_window_probe_recovery(stcb, asoc, tp1);
4858 if (net->flight_size) {
4860 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4861 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4862 stcb->sctp_ep, stcb, net);
4864 if (net->window_probe) {
4865 net->window_probe = 0;
4868 if (net->window_probe) {
4870 * In window probes we must assure a timer
4871 * is still running there
4873 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4874 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4875 stcb->sctp_ep, stcb, net);
4878 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4879 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4881 SCTP_FROM_SCTP_INDATA + SCTP_LOC_34);
4886 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4887 (asoc->sent_queue_retran_cnt == 0) &&
4888 (win_probe_recovered == 0) &&
4891 * huh, this should not happen unless all packets are
4892 * PR-SCTP and marked to skip of course.
4894 if (sctp_fs_audit(asoc)) {
4895 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4896 net->flight_size = 0;
4898 asoc->total_flight = 0;
4899 asoc->total_flight_count = 0;
4900 asoc->sent_queue_retran_cnt = 0;
4901 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4902 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4903 sctp_flight_size_increase(tp1);
4904 sctp_total_flight_increase(stcb, tp1);
4905 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4906 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4913 /*********************************************/
4914 /* Here we perform PR-SCTP procedures */
4916 /*********************************************/
4917 /* C1. update advancedPeerAckPoint */
4918 if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
4919 asoc->advanced_peer_ack_point = cum_ack;
4921 /* C2. try to further move advancedPeerAckPoint ahead */
4922 if ((asoc->prsctp_supported) && (asoc->pr_sctp_cnt > 0)) {
4923 struct sctp_tmit_chunk *lchk;
4924 uint32_t old_adv_peer_ack_point;
4926 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4927 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4928 /* C3. See if we need to send a Fwd-TSN */
4929 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
4931 * ISSUE with ECN, see FWD-TSN processing.
4933 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
4934 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
4935 0xee, cum_ack, asoc->advanced_peer_ack_point,
4936 old_adv_peer_ack_point);
4938 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4939 send_forward_tsn(stcb, asoc);
4941 /* try to FR fwd-tsn's that get lost too */
4942 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4943 send_forward_tsn(stcb, asoc);
4948 /* Assure a timer is up */
4949 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4950 stcb->sctp_ep, stcb, lchk->whoTo);
4953 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4954 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4956 stcb->asoc.peers_rwnd,
4957 stcb->asoc.total_flight,
4958 stcb->asoc.total_output_queue_size);
4963 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
4966 uint32_t cum_ack, a_rwnd;
4968 cum_ack = ntohl(cp->cumulative_tsn_ack);
4969 /* Arrange so a_rwnd does NOT change */
4970 a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
4972 /* Now call the express sack handling */
4973 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
4977 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
4978 struct sctp_stream_in *strmin)
4980 struct sctp_queued_to_read *ctl, *nctl;
4981 struct sctp_association *asoc;
4985 tt = strmin->last_sequence_delivered;
4987 * First deliver anything prior to and including the stream no that
4990 TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next, nctl) {
4991 if (SCTP_SSN_GE(tt, ctl->sinfo_ssn)) {
4992 /* this is deliverable now */
4993 TAILQ_REMOVE(&strmin->inqueue, ctl, next);
4994 /* subtract pending on streams */
4995 asoc->size_on_all_streams -= ctl->length;
4996 sctp_ucount_decr(asoc->cnt_on_all_streams);
4997 /* deliver it to at least the delivery-q */
4998 if (stcb->sctp_socket) {
4999 sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5000 sctp_add_to_readq(stcb->sctp_ep, stcb,
5002 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5005 /* no more delivery now. */
5010 * now we must deliver things in queue the normal way if any are
5013 tt = strmin->last_sequence_delivered + 1;
5014 TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next, nctl) {
5015 if (tt == ctl->sinfo_ssn) {
5016 /* this is deliverable now */
5017 TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5018 /* subtract pending on streams */
5019 asoc->size_on_all_streams -= ctl->length;
5020 sctp_ucount_decr(asoc->cnt_on_all_streams);
5021 /* deliver it to at least the delivery-q */
5022 strmin->last_sequence_delivered = ctl->sinfo_ssn;
5023 if (stcb->sctp_socket) {
5024 sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5025 sctp_add_to_readq(stcb->sctp_ep, stcb,
5027 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5030 tt = strmin->last_sequence_delivered + 1;
5038 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5039 struct sctp_association *asoc,
5040 uint16_t stream, uint16_t seq)
5042 struct sctp_tmit_chunk *chk, *nchk;
5044 /* For each one on here see if we need to toss it */
5046 * For now large messages held on the reasmqueue that are complete
5047 * will be tossed too. We could in theory do more work to spin
5048 * through and stop after dumping one msg aka seeing the start of a
5049 * new msg at the head, and call the delivery function... to see if
5050 * it can be delivered... But for now we just dump everything on the
5053 TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
5055 * Do not toss it if on a different stream or marked for
5056 * unordered delivery in which case the stream sequence
5057 * number has no meaning.
5059 if ((chk->rec.data.stream_number != stream) ||
5060 ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == SCTP_DATA_UNORDERED)) {
5063 if (chk->rec.data.stream_seq == seq) {
5064 /* It needs to be tossed */
5065 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5066 if (SCTP_TSN_GT(chk->rec.data.TSN_seq, asoc->tsn_last_delivered)) {
5067 asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
5068 asoc->str_of_pdapi = chk->rec.data.stream_number;
5069 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
5070 asoc->fragment_flags = chk->rec.data.rcv_flags;
5072 asoc->size_on_reasm_queue -= chk->send_size;
5073 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5075 /* Clear up any stream problem */
5076 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != SCTP_DATA_UNORDERED &&
5077 SCTP_SSN_GT(chk->rec.data.stream_seq, asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered)) {
5079 * We must dump forward this streams
5080 * sequence number if the chunk is not
5081 * unordered that is being skipped. There is
5082 * a chance that if the peer does not
5083 * include the last fragment in its FWD-TSN
5084 * we WILL have a problem here since you
5085 * would have a partial chunk in queue that
5086 * may not be deliverable. Also if a Partial
5087 * delivery API as started the user may get
5088 * a partial chunk. The next read returning
5089 * a new chunk... really ugly but I see no
5090 * way around it! Maybe a notify??
5092 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = chk->rec.data.stream_seq;
5095 sctp_m_freem(chk->data);
5098 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5099 } else if (SCTP_SSN_GT(chk->rec.data.stream_seq, seq)) {
5101 * If the stream_seq is > than the purging one, we
5111 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5112 struct sctp_forward_tsn_chunk *fwd,
5113 int *abort_flag, struct mbuf *m, int offset)
5115 /* The pr-sctp fwd tsn */
5117 * here we will perform all the data receiver side steps for
5118 * processing FwdTSN, as required in by pr-sctp draft:
5120 * Assume we get FwdTSN(x):
5122 * 1) update local cumTSN to x 2) try to further advance cumTSN to x +
5123 * others we have 3) examine and update re-ordering queue on
5124 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5125 * report where we are.
5127 struct sctp_association *asoc;
5128 uint32_t new_cum_tsn, gap;
5129 unsigned int i, fwd_sz, m_size;
5131 struct sctp_stream_in *strm;
5132 struct sctp_tmit_chunk *chk, *nchk;
5133 struct sctp_queued_to_read *ctl, *sv;
5136 if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5137 SCTPDBG(SCTP_DEBUG_INDATA1,
5138 "Bad size too small/big fwd-tsn\n");
5141 m_size = (stcb->asoc.mapping_array_size << 3);
5142 /*************************************************************/
5143 /* 1. Here we update local cumTSN and shift the bitmap array */
5144 /*************************************************************/
5145 new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5147 if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5148 /* Already got there ... */
5152 * now we know the new TSN is more advanced, let's find the actual
5155 SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5156 asoc->cumulative_tsn = new_cum_tsn;
5157 if (gap >= m_size) {
5158 if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5159 struct mbuf *op_err;
5160 char msg[SCTP_DIAG_INFO_LEN];
5163 * out of range (of single byte chunks in the rwnd I
5164 * give out). This must be an attacker.
5167 snprintf(msg, sizeof(msg),
5168 "New cum ack %8.8x too high, highest TSN %8.8x",
5169 new_cum_tsn, asoc->highest_tsn_inside_map);
5170 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
5171 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_35;
5172 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5175 SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5177 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5178 asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5179 asoc->highest_tsn_inside_map = new_cum_tsn;
5181 memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5182 asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5184 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5185 sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5188 SCTP_TCB_LOCK_ASSERT(stcb);
5189 for (i = 0; i <= gap; i++) {
5190 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5191 !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5192 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5193 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5194 asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5199 /*************************************************************/
5200 /* 2. Clear up re-assembly queue */
5201 /*************************************************************/
5203 * First service it if pd-api is up, just in case we can progress it
5206 if (asoc->fragmented_delivery_inprogress) {
5207 sctp_service_reassembly(stcb, asoc);
5209 /* For each one on here see if we need to toss it */
5211 * For now large messages held on the reasmqueue that are complete
5212 * will be tossed too. We could in theory do more work to spin
5213 * through and stop after dumping one msg aka seeing the start of a
5214 * new msg at the head, and call the delivery function... to see if
5215 * it can be delivered... But for now we just dump everything on the
5218 TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
5219 if (SCTP_TSN_GE(new_cum_tsn, chk->rec.data.TSN_seq)) {
5220 /* It needs to be tossed */
5221 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5222 if (SCTP_TSN_GT(chk->rec.data.TSN_seq, asoc->tsn_last_delivered)) {
5223 asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
5224 asoc->str_of_pdapi = chk->rec.data.stream_number;
5225 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
5226 asoc->fragment_flags = chk->rec.data.rcv_flags;
5228 asoc->size_on_reasm_queue -= chk->send_size;
5229 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5231 /* Clear up any stream problem */
5232 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != SCTP_DATA_UNORDERED &&
5233 SCTP_SSN_GT(chk->rec.data.stream_seq, asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered)) {
5235 * We must dump forward this streams
5236 * sequence number if the chunk is not
5237 * unordered that is being skipped. There is
5238 * a chance that if the peer does not
5239 * include the last fragment in its FWD-TSN
5240 * we WILL have a problem here since you
5241 * would have a partial chunk in queue that
5242 * may not be deliverable. Also if a Partial
5243 * delivery API as started the user may get
5244 * a partial chunk. The next read returning
5245 * a new chunk... really ugly but I see no
5246 * way around it! Maybe a notify??
5248 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = chk->rec.data.stream_seq;
5251 sctp_m_freem(chk->data);
5254 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5257 * Ok we have gone beyond the end of the fwd-tsn's
5263 /*******************************************************/
5264 /* 3. Update the PR-stream re-ordering queues and fix */
5265 /* delivery issues as needed. */
5266 /*******************************************************/
5267 fwd_sz -= sizeof(*fwd);
5270 unsigned int num_str;
5271 struct sctp_strseq *stseq, strseqbuf;
5273 offset += sizeof(*fwd);
5275 SCTP_INP_READ_LOCK(stcb->sctp_ep);
5276 num_str = fwd_sz / sizeof(struct sctp_strseq);
5277 for (i = 0; i < num_str; i++) {
5280 stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5281 sizeof(struct sctp_strseq),
5282 (uint8_t *) & strseqbuf);
5283 offset += sizeof(struct sctp_strseq);
5284 if (stseq == NULL) {
5288 st = ntohs(stseq->stream);
5290 st = ntohs(stseq->sequence);
5291 stseq->sequence = st;
5296 * Ok we now look for the stream/seq on the read
5297 * queue where its not all delivered. If we find it
5298 * we transmute the read entry into a PDI_ABORTED.
5300 if (stseq->stream >= asoc->streamincnt) {
5301 /* screwed up streams, stop! */
5304 if ((asoc->str_of_pdapi == stseq->stream) &&
5305 (asoc->ssn_of_pdapi == stseq->sequence)) {
5307 * If this is the one we were partially
5308 * delivering now then we no longer are.
5309 * Note this will change with the reassembly
5312 asoc->fragmented_delivery_inprogress = 0;
5314 sctp_flush_reassm_for_str_seq(stcb, asoc, stseq->stream, stseq->sequence);
5315 TAILQ_FOREACH(ctl, &stcb->sctp_ep->read_queue, next) {
5316 if ((ctl->sinfo_stream == stseq->stream) &&
5317 (ctl->sinfo_ssn == stseq->sequence)) {
5318 str_seq = (stseq->stream << 16) | stseq->sequence;
5320 ctl->pdapi_aborted = 1;
5321 sv = stcb->asoc.control_pdapi;
5322 stcb->asoc.control_pdapi = ctl;
5323 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5325 SCTP_PARTIAL_DELIVERY_ABORTED,
5327 SCTP_SO_NOT_LOCKED);
5328 stcb->asoc.control_pdapi = sv;
5330 } else if ((ctl->sinfo_stream == stseq->stream) &&
5331 SCTP_SSN_GT(ctl->sinfo_ssn, stseq->sequence)) {
5332 /* We are past our victim SSN */
5336 strm = &asoc->strmin[stseq->stream];
5337 if (SCTP_SSN_GT(stseq->sequence, strm->last_sequence_delivered)) {
5338 /* Update the sequence number */
5339 strm->last_sequence_delivered = stseq->sequence;
5341 /* now kick the stream the new way */
5342 /* sa_ignore NO_NULL_CHK */
5343 sctp_kick_prsctp_reorder_queue(stcb, strm);
5345 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5348 * Now slide thing forward.
5350 sctp_slide_mapping_arrays(stcb);
5352 if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
5353 /* now lets kick out and check for more fragmented delivery */
5354 /* sa_ignore NO_NULL_CHK */
5355 sctp_deliver_reasm_check(stcb, &stcb->asoc);