2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
9 * a) Redistributions of source code must retain the above copyright notice,
10 * this list of conditions and the following disclaimer.
12 * b) Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the distribution.
16 * c) Neither the name of Cisco Systems, Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_var.h>
38 #include <netinet/sctp_sysctl.h>
39 #include <netinet/sctp_pcb.h>
40 #include <netinet/sctp_header.h>
41 #include <netinet/sctputil.h>
42 #include <netinet/sctp_output.h>
43 #include <netinet/sctp_input.h>
44 #include <netinet/sctp_indata.h>
45 #include <netinet/sctp_uio.h>
46 #include <netinet/sctp_timer.h>
50 * NOTES: On the outbound side of things I need to check the sack timer to
51 * see if I should generate a sack into the chunk queue (if I have data to
52 * send that is and will be sending it .. for bundling.
54 * The callback in sctp_usrreq.c will get called when the socket is read from.
55 * This will cause sctp_service_queues() to get called on the top entry in
60 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
62 asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
65 /* Calculate what the rwnd would be */
67 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
72 * This is really set wrong with respect to a 1-2-m socket. Since
73 * the sb_cc is the count that everyone as put up. When we re-write
74 * sctp_soreceive then we will fix this so that ONLY this
75 * associations data is taken into account.
77 if (stcb->sctp_socket == NULL)
80 if (stcb->asoc.sb_cc == 0 &&
81 asoc->size_on_reasm_queue == 0 &&
82 asoc->size_on_all_streams == 0) {
83 /* Full rwnd granted */
84 calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
87 /* get actual space */
88 calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
91 * take out what has NOT been put on socket queue and we yet hold
94 calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_reasm_queue +
95 asoc->cnt_on_reasm_queue * MSIZE));
96 calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_all_streams +
97 asoc->cnt_on_all_streams * MSIZE));
103 /* what is the overhead of all these rwnd's */
104 calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
106 * If the window gets too small due to ctrl-stuff, reduce it to 1,
107 * even it is 0. SWS engaged
109 if (calc < stcb->asoc.my_rwnd_control_len) {
118 * Build out our readq entry based on the incoming packet.
120 struct sctp_queued_to_read *
121 sctp_build_readq_entry(struct sctp_tcb *stcb,
122 struct sctp_nets *net,
123 uint32_t tsn, uint32_t ppid,
124 uint32_t context, uint16_t stream_no,
125 uint16_t stream_seq, uint8_t flags,
128 struct sctp_queued_to_read *read_queue_e = NULL;
130 sctp_alloc_a_readq(stcb, read_queue_e);
131 if (read_queue_e == NULL) {
134 read_queue_e->sinfo_stream = stream_no;
135 read_queue_e->sinfo_ssn = stream_seq;
136 read_queue_e->sinfo_flags = (flags << 8);
137 read_queue_e->sinfo_ppid = ppid;
138 read_queue_e->sinfo_context = context;
139 read_queue_e->sinfo_timetolive = 0;
140 read_queue_e->sinfo_tsn = tsn;
141 read_queue_e->sinfo_cumtsn = tsn;
142 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
143 read_queue_e->whoFrom = net;
144 read_queue_e->length = 0;
145 atomic_add_int(&net->ref_count, 1);
146 read_queue_e->data = dm;
147 read_queue_e->spec_flags = 0;
148 read_queue_e->tail_mbuf = NULL;
149 read_queue_e->aux_data = NULL;
150 read_queue_e->stcb = stcb;
151 read_queue_e->port_from = stcb->rport;
152 read_queue_e->do_not_ref_stcb = 0;
153 read_queue_e->end_added = 0;
154 read_queue_e->some_taken = 0;
155 read_queue_e->pdapi_aborted = 0;
157 return (read_queue_e);
162 * Build out our readq entry based on the incoming packet.
164 static struct sctp_queued_to_read *
165 sctp_build_readq_entry_chk(struct sctp_tcb *stcb,
166 struct sctp_tmit_chunk *chk)
168 struct sctp_queued_to_read *read_queue_e = NULL;
170 sctp_alloc_a_readq(stcb, read_queue_e);
171 if (read_queue_e == NULL) {
174 read_queue_e->sinfo_stream = chk->rec.data.stream_number;
175 read_queue_e->sinfo_ssn = chk->rec.data.stream_seq;
176 read_queue_e->sinfo_flags = (chk->rec.data.rcv_flags << 8);
177 read_queue_e->sinfo_ppid = chk->rec.data.payloadtype;
178 read_queue_e->sinfo_context = stcb->asoc.context;
179 read_queue_e->sinfo_timetolive = 0;
180 read_queue_e->sinfo_tsn = chk->rec.data.TSN_seq;
181 read_queue_e->sinfo_cumtsn = chk->rec.data.TSN_seq;
182 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
183 read_queue_e->whoFrom = chk->whoTo;
184 read_queue_e->aux_data = NULL;
185 read_queue_e->length = 0;
186 atomic_add_int(&chk->whoTo->ref_count, 1);
187 read_queue_e->data = chk->data;
188 read_queue_e->tail_mbuf = NULL;
189 read_queue_e->stcb = stcb;
190 read_queue_e->port_from = stcb->rport;
191 read_queue_e->spec_flags = 0;
192 read_queue_e->do_not_ref_stcb = 0;
193 read_queue_e->end_added = 0;
194 read_queue_e->some_taken = 0;
195 read_queue_e->pdapi_aborted = 0;
197 return (read_queue_e);
202 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
204 struct sctp_extrcvinfo *seinfo;
205 struct sctp_sndrcvinfo *outinfo;
206 struct sctp_rcvinfo *rcvinfo;
207 struct sctp_nxtinfo *nxtinfo;
214 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
215 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
216 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
217 /* user does not want any ancillary data */
221 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
222 len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
224 seinfo = (struct sctp_extrcvinfo *)sinfo;
225 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
226 (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
228 len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
232 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
233 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
235 len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
238 len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
244 ret = sctp_get_mbuf_for_msg(len, 0, M_DONTWAIT, 1, MT_DATA);
249 SCTP_BUF_LEN(ret) = 0;
251 /* We need a CMSG header followed by the struct */
252 cmh = mtod(ret, struct cmsghdr *);
253 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
254 cmh->cmsg_level = IPPROTO_SCTP;
255 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
256 cmh->cmsg_type = SCTP_RCVINFO;
257 rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
258 rcvinfo->rcv_sid = sinfo->sinfo_stream;
259 rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
260 rcvinfo->rcv_flags = sinfo->sinfo_flags;
261 rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
262 rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
263 rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
264 rcvinfo->rcv_context = sinfo->sinfo_context;
265 rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
266 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
267 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
270 cmh->cmsg_level = IPPROTO_SCTP;
271 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
272 cmh->cmsg_type = SCTP_NXTINFO;
273 nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
274 nxtinfo->nxt_sid = seinfo->sreinfo_next_stream;
275 nxtinfo->nxt_flags = 0;
276 if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
277 nxtinfo->nxt_flags |= SCTP_UNORDERED;
279 if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
280 nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
282 if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
283 nxtinfo->nxt_flags |= SCTP_COMPLETE;
285 nxtinfo->nxt_ppid = seinfo->sreinfo_next_ppid;
286 nxtinfo->nxt_length = seinfo->sreinfo_next_length;
287 nxtinfo->nxt_assoc_id = seinfo->sreinfo_next_aid;
288 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
289 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
291 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
292 cmh->cmsg_level = IPPROTO_SCTP;
293 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
295 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
296 cmh->cmsg_type = SCTP_EXTRCV;
297 memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
298 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
300 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
301 cmh->cmsg_type = SCTP_SNDRCV;
303 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
311 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
313 uint32_t gap, i, cumackp1;
316 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
319 cumackp1 = asoc->cumulative_tsn + 1;
320 if (SCTP_TSN_GT(cumackp1, tsn)) {
322 * this tsn is behind the cum ack and thus we don't need to
323 * worry about it being moved from one to the other.
327 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
328 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
329 SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn);
330 sctp_print_mapping_array(asoc);
332 panic("Things are really messed up now!!");
335 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
336 SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
337 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
338 asoc->highest_tsn_inside_nr_map = tsn;
340 if (tsn == asoc->highest_tsn_inside_map) {
341 /* We must back down to see what the new highest is */
342 for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
343 SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
344 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
345 asoc->highest_tsn_inside_map = i;
351 asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
358 * We are delivering currently from the reassembly queue. We must continue to
359 * deliver until we either: 1) run out of space. 2) run out of sequential
360 * TSN's 3) hit the SCTP_DATA_LAST_FRAG flag.
363 sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc)
365 struct sctp_tmit_chunk *chk, *nchk;
370 struct sctp_queued_to_read *control, *ctl, *nctl;
375 cntDel = stream_no = 0;
376 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
377 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) ||
378 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
379 /* socket above is long gone or going.. */
381 asoc->fragmented_delivery_inprogress = 0;
382 TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
383 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
384 asoc->size_on_reasm_queue -= chk->send_size;
385 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
387 * Lose the data pointer, since its in the socket
391 sctp_m_freem(chk->data);
394 /* Now free the address and data */
395 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
396 /* sa_ignore FREED_MEMORY */
400 SCTP_TCB_LOCK_ASSERT(stcb);
401 TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
402 if (chk->rec.data.TSN_seq != (asoc->tsn_last_delivered + 1)) {
403 /* Can't deliver more :< */
406 stream_no = chk->rec.data.stream_number;
407 nxt_todel = asoc->strmin[stream_no].last_sequence_delivered + 1;
408 if (nxt_todel != chk->rec.data.stream_seq &&
409 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
411 * Not the next sequence to deliver in its stream OR
416 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
418 control = sctp_build_readq_entry_chk(stcb, chk);
419 if (control == NULL) {
423 /* save it off for our future deliveries */
424 stcb->asoc.control_pdapi = control;
425 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
429 sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
430 sctp_add_to_readq(stcb->sctp_ep,
431 stcb, control, &stcb->sctp_socket->so_rcv, end,
432 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
435 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
439 sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
440 if (sctp_append_to_readq(stcb->sctp_ep, stcb,
441 stcb->asoc.control_pdapi,
442 chk->data, end, chk->rec.data.TSN_seq,
443 &stcb->sctp_socket->so_rcv)) {
445 * something is very wrong, either
446 * control_pdapi is NULL, or the tail_mbuf
447 * is corrupt, or there is a EOM already on
450 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
454 if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
455 panic("This should not happen control_pdapi NULL?");
457 /* if we did not panic, it was a EOM */
458 panic("Bad chunking ??");
460 if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
461 SCTP_PRINTF("This should not happen control_pdapi NULL?\n");
463 SCTP_PRINTF("Bad chunking ??\n");
464 SCTP_PRINTF("Dumping re-assembly queue this will probably hose the association\n");
472 /* pull it we did it */
473 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
474 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
475 asoc->fragmented_delivery_inprogress = 0;
476 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
477 asoc->strmin[stream_no].last_sequence_delivered++;
479 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
480 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
482 } else if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
484 * turn the flag back on since we just delivered
487 asoc->fragmented_delivery_inprogress = 1;
489 asoc->tsn_of_pdapi_last_delivered = chk->rec.data.TSN_seq;
490 asoc->last_flags_delivered = chk->rec.data.rcv_flags;
491 asoc->last_strm_seq_delivered = chk->rec.data.stream_seq;
492 asoc->last_strm_no_delivered = chk->rec.data.stream_number;
494 asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
495 asoc->size_on_reasm_queue -= chk->send_size;
496 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
497 /* free up the chk */
499 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
501 if (asoc->fragmented_delivery_inprogress == 0) {
503 * Now lets see if we can deliver the next one on
506 struct sctp_stream_in *strm;
508 strm = &asoc->strmin[stream_no];
509 nxt_todel = strm->last_sequence_delivered + 1;
510 TAILQ_FOREACH_SAFE(ctl, &strm->inqueue, next, nctl) {
511 /* Deliver more if we can. */
512 if (nxt_todel == ctl->sinfo_ssn) {
513 TAILQ_REMOVE(&strm->inqueue, ctl, next);
514 asoc->size_on_all_streams -= ctl->length;
515 sctp_ucount_decr(asoc->cnt_on_all_streams);
516 strm->last_sequence_delivered++;
517 sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
518 sctp_add_to_readq(stcb->sctp_ep, stcb,
520 &stcb->sctp_socket->so_rcv, 1,
521 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
525 nxt_todel = strm->last_sequence_delivered + 1;
533 * Queue the chunk either right into the socket buffer if it is the next one
534 * to go OR put it in the correct place in the delivery queue. If we do
535 * append to the so_buf, keep doing so until we are out of order. One big
536 * question still remains, what to do when the socket buffer is FULL??
539 sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc,
540 struct sctp_queued_to_read *control, int *abort_flag)
543 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
544 * all the data in one stream this could happen quite rapidly. One
545 * could use the TSN to keep track of things, but this scheme breaks
546 * down in the other type of stream useage that could occur. Send a
547 * single msg to stream 0, send 4Billion messages to stream 1, now
548 * send a message to stream 0. You have a situation where the TSN
549 * has wrapped but not in the stream. Is this worth worrying about
550 * or should we just change our queue sort at the bottom to be by
553 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2
554 * with TSN 1? If the peer is doing some sort of funky TSN/SSN
555 * assignment this could happen... and I don't see how this would be
556 * a violation. So for now I am undecided an will leave the sort by
557 * SSN alone. Maybe a hybred approach is the answer
560 struct sctp_stream_in *strm;
561 struct sctp_queued_to_read *at;
565 char msg[SCTP_DIAG_INFO_LEN];
568 asoc->size_on_all_streams += control->length;
569 sctp_ucount_incr(asoc->cnt_on_all_streams);
570 strm = &asoc->strmin[control->sinfo_stream];
571 nxt_todel = strm->last_sequence_delivered + 1;
572 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
573 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
575 SCTPDBG(SCTP_DEBUG_INDATA1,
576 "queue to stream called for ssn:%u lastdel:%u nxt:%u\n",
577 (uint32_t) control->sinfo_stream,
578 (uint32_t) strm->last_sequence_delivered,
579 (uint32_t) nxt_todel);
580 if (SCTP_SSN_GE(strm->last_sequence_delivered, control->sinfo_ssn)) {
581 /* The incoming sseq is behind where we last delivered? */
582 SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ:%d delivered:%d from peer, Abort association\n",
583 control->sinfo_ssn, strm->last_sequence_delivered);
586 * throw it in the stream so it gets cleaned up in
587 * association destruction
589 TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
590 snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
591 strm->last_sequence_delivered, control->sinfo_tsn,
592 control->sinfo_stream, control->sinfo_ssn);
593 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
594 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
595 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
600 if (nxt_todel == control->sinfo_ssn) {
601 /* can be delivered right away? */
602 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
603 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
605 /* EY it wont be queued if it could be delivered directly */
607 asoc->size_on_all_streams -= control->length;
608 sctp_ucount_decr(asoc->cnt_on_all_streams);
609 strm->last_sequence_delivered++;
611 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
612 sctp_add_to_readq(stcb->sctp_ep, stcb,
614 &stcb->sctp_socket->so_rcv, 1,
615 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
616 TAILQ_FOREACH_SAFE(control, &strm->inqueue, next, at) {
618 nxt_todel = strm->last_sequence_delivered + 1;
619 if (nxt_todel == control->sinfo_ssn) {
620 TAILQ_REMOVE(&strm->inqueue, control, next);
621 asoc->size_on_all_streams -= control->length;
622 sctp_ucount_decr(asoc->cnt_on_all_streams);
623 strm->last_sequence_delivered++;
625 * We ignore the return of deliver_data here
626 * since we always can hold the chunk on the
627 * d-queue. And we have a finite number that
628 * can be delivered from the strq.
630 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
631 sctp_log_strm_del(control, NULL,
632 SCTP_STR_LOG_FROM_IMMED_DEL);
634 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
635 sctp_add_to_readq(stcb->sctp_ep, stcb,
637 &stcb->sctp_socket->so_rcv, 1,
638 SCTP_READ_LOCK_NOT_HELD,
647 * Ok, we did not deliver this guy, find the correct place
648 * to put it on the queue.
650 if (SCTP_TSN_GE(asoc->cumulative_tsn, control->sinfo_tsn)) {
653 if (TAILQ_EMPTY(&strm->inqueue)) {
655 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
656 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INSERT_HD);
658 TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
660 TAILQ_FOREACH(at, &strm->inqueue, next) {
661 if (SCTP_SSN_GT(at->sinfo_ssn, control->sinfo_ssn)) {
663 * one in queue is bigger than the
664 * new one, insert before this one
666 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
667 sctp_log_strm_del(control, at,
668 SCTP_STR_LOG_FROM_INSERT_MD);
670 TAILQ_INSERT_BEFORE(at, control, next);
672 } else if (at->sinfo_ssn == control->sinfo_ssn) {
674 * Gak, He sent me a duplicate str
678 * foo bar, I guess I will just free
679 * this new guy, should we abort
680 * too? FIX ME MAYBE? Or it COULD be
681 * that the SSN's have wrapped.
682 * Maybe I should compare to TSN
683 * somehow... sigh for now just blow
688 sctp_m_freem(control->data);
689 control->data = NULL;
690 asoc->size_on_all_streams -= control->length;
691 sctp_ucount_decr(asoc->cnt_on_all_streams);
692 if (control->whoFrom) {
693 sctp_free_remote_addr(control->whoFrom);
694 control->whoFrom = NULL;
696 sctp_free_a_readq(stcb, control);
699 if (TAILQ_NEXT(at, next) == NULL) {
701 * We are at the end, insert
704 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
705 sctp_log_strm_del(control, at,
706 SCTP_STR_LOG_FROM_INSERT_TL);
708 TAILQ_INSERT_AFTER(&strm->inqueue,
719 * Returns two things: You get the total size of the deliverable parts of the
720 * first fragmented message on the reassembly queue. And you get a 1 back if
721 * all of the message is ready or a 0 back if the message is still incomplete
724 sctp_is_all_msg_on_reasm(struct sctp_association *asoc, uint32_t * t_size)
726 struct sctp_tmit_chunk *chk;
730 chk = TAILQ_FIRST(&asoc->reasmqueue);
732 /* nothing on the queue */
735 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
736 /* Not a first on the queue */
739 tsn = chk->rec.data.TSN_seq;
740 TAILQ_FOREACH(chk, &asoc->reasmqueue, sctp_next) {
741 if (tsn != chk->rec.data.TSN_seq) {
744 *t_size += chk->send_size;
745 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
754 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc)
756 struct sctp_tmit_chunk *chk;
758 uint32_t tsize, pd_point;
761 chk = TAILQ_FIRST(&asoc->reasmqueue);
764 asoc->size_on_reasm_queue = 0;
765 asoc->cnt_on_reasm_queue = 0;
768 if (asoc->fragmented_delivery_inprogress == 0) {
770 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
771 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
772 (nxt_todel == chk->rec.data.stream_seq ||
773 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
775 * Yep the first one is here and its ok to deliver
778 if (stcb->sctp_socket) {
779 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
780 stcb->sctp_ep->partial_delivery_point);
782 pd_point = stcb->sctp_ep->partial_delivery_point;
784 if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
786 * Yes, we setup to start reception, by
787 * backing down the TSN just in case we
788 * can't deliver. If we
790 asoc->fragmented_delivery_inprogress = 1;
791 asoc->tsn_last_delivered =
792 chk->rec.data.TSN_seq - 1;
794 chk->rec.data.stream_number;
795 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
796 asoc->pdapi_ppid = chk->rec.data.payloadtype;
797 asoc->fragment_flags = chk->rec.data.rcv_flags;
798 sctp_service_reassembly(stcb, asoc);
803 * Service re-assembly will deliver stream data queued at
804 * the end of fragmented delivery.. but it wont know to go
805 * back and call itself again... we do that here with the
808 sctp_service_reassembly(stcb, asoc);
809 if (asoc->fragmented_delivery_inprogress == 0) {
811 * finished our Fragmented delivery, could be more
820 * Dump onto the re-assembly queue, in its proper place. After dumping on the
821 * queue, see if anthing can be delivered. If so pull it off (or as much as
822 * we can. If we run out of space then we must dump what we can and set the
823 * appropriate flag to say we queued what we could.
826 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
827 struct sctp_tmit_chunk *chk, int *abort_flag)
830 char msg[SCTP_DIAG_INFO_LEN];
832 uint32_t cum_ackp1, prev_tsn, post_tsn;
833 struct sctp_tmit_chunk *at, *prev, *next;
836 cum_ackp1 = asoc->tsn_last_delivered + 1;
837 if (TAILQ_EMPTY(&asoc->reasmqueue)) {
838 /* This is the first one on the queue */
839 TAILQ_INSERT_HEAD(&asoc->reasmqueue, chk, sctp_next);
841 * we do not check for delivery of anything when only one
844 asoc->size_on_reasm_queue = chk->send_size;
845 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
846 if (chk->rec.data.TSN_seq == cum_ackp1) {
847 if (asoc->fragmented_delivery_inprogress == 0 &&
848 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) !=
849 SCTP_DATA_FIRST_FRAG) {
851 * An empty queue, no delivery inprogress,
852 * we hit the next one and it does NOT have
853 * a FIRST fragment mark.
855 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not first, no fragmented delivery in progress\n");
856 snprintf(msg, sizeof(msg),
857 "Expected B-bit for TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
858 chk->rec.data.TSN_seq,
859 chk->rec.data.stream_number,
860 chk->rec.data.stream_seq);
861 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
862 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
863 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
865 } else if (asoc->fragmented_delivery_inprogress &&
866 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
868 * We are doing a partial delivery and the
869 * NEXT chunk MUST be either the LAST or
870 * MIDDLE fragment NOT a FIRST
872 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS a first and fragmented delivery in progress\n");
873 snprintf(msg, sizeof(msg),
874 "Didn't expect B-bit for TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
875 chk->rec.data.TSN_seq,
876 chk->rec.data.stream_number,
877 chk->rec.data.stream_seq);
878 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
879 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
880 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
882 } else if (asoc->fragmented_delivery_inprogress) {
884 * Here we are ok with a MIDDLE or LAST
887 if (chk->rec.data.stream_number !=
888 asoc->str_of_pdapi) {
889 /* Got to be the right STR No */
890 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream number %d vs %d\n",
891 chk->rec.data.stream_number,
893 snprintf(msg, sizeof(msg),
894 "Expected SID=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
896 chk->rec.data.TSN_seq,
897 chk->rec.data.stream_number,
898 chk->rec.data.stream_seq);
899 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
900 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_4;
901 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
903 } else if ((asoc->fragment_flags & SCTP_DATA_UNORDERED) !=
904 SCTP_DATA_UNORDERED &&
905 chk->rec.data.stream_seq != asoc->ssn_of_pdapi) {
906 /* Got to be the right STR Seq */
907 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream seq %d vs %d\n",
908 chk->rec.data.stream_seq,
910 snprintf(msg, sizeof(msg),
911 "Expected SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
913 chk->rec.data.TSN_seq,
914 chk->rec.data.stream_number,
915 chk->rec.data.stream_seq);
916 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
917 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_5;
918 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
926 TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
927 if (SCTP_TSN_GT(at->rec.data.TSN_seq, chk->rec.data.TSN_seq)) {
929 * one in queue is bigger than the new one, insert
933 asoc->size_on_reasm_queue += chk->send_size;
934 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
936 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
938 } else if (at->rec.data.TSN_seq == chk->rec.data.TSN_seq) {
939 /* Gak, He sent me a duplicate str seq number */
941 * foo bar, I guess I will just free this new guy,
942 * should we abort too? FIX ME MAYBE? Or it COULD be
943 * that the SSN's have wrapped. Maybe I should
944 * compare to TSN somehow... sigh for now just blow
948 sctp_m_freem(chk->data);
951 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
955 if (TAILQ_NEXT(at, sctp_next) == NULL) {
957 * We are at the end, insert it after this
961 asoc->size_on_reasm_queue += chk->send_size;
962 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
963 TAILQ_INSERT_AFTER(&asoc->reasmqueue, at, chk, sctp_next);
970 prev_tsn = chk->rec.data.TSN_seq - 1;
971 if (prev_tsn == prev->rec.data.TSN_seq) {
973 * Ok the one I am dropping onto the end is the
974 * NEXT. A bit of valdiation here.
976 if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
977 SCTP_DATA_FIRST_FRAG ||
978 (prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
979 SCTP_DATA_MIDDLE_FRAG) {
981 * Insert chk MUST be a MIDDLE or LAST
984 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
985 SCTP_DATA_FIRST_FRAG) {
986 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - It can be a midlle or last but not a first\n");
987 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it's a FIRST!\n");
988 snprintf(msg, sizeof(msg),
989 "Can't handle B-bit, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
990 chk->rec.data.TSN_seq,
991 chk->rec.data.stream_number,
992 chk->rec.data.stream_seq);
993 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
994 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_6;
995 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
999 if (chk->rec.data.stream_number !=
1000 prev->rec.data.stream_number) {
1002 * Huh, need the correct STR here,
1003 * they must be the same.
1005 SCTP_PRINTF("Prev check - Gak, Evil plot, sid:%d not the same as at:%d\n",
1006 chk->rec.data.stream_number,
1007 prev->rec.data.stream_number);
1008 snprintf(msg, sizeof(msg),
1009 "Expect SID=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1010 prev->rec.data.stream_number,
1011 chk->rec.data.TSN_seq,
1012 chk->rec.data.stream_number,
1013 chk->rec.data.stream_seq);
1014 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1015 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_7;
1016 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1020 if ((prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1021 chk->rec.data.stream_seq !=
1022 prev->rec.data.stream_seq) {
1024 * Huh, need the correct STR here,
1025 * they must be the same.
1027 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1028 chk->rec.data.stream_seq,
1029 prev->rec.data.stream_seq);
1030 snprintf(msg, sizeof(msg),
1031 "Expect SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1032 prev->rec.data.stream_seq,
1033 chk->rec.data.TSN_seq,
1034 chk->rec.data.stream_number,
1035 chk->rec.data.stream_seq);
1036 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1037 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_8;
1038 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1042 } else if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1043 SCTP_DATA_LAST_FRAG) {
1044 /* Insert chk MUST be a FIRST */
1045 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1046 SCTP_DATA_FIRST_FRAG) {
1047 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, evil plot, its not FIRST and it must be!\n");
1048 snprintf(msg, sizeof(msg),
1049 "Expect B-bit, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1050 chk->rec.data.TSN_seq,
1051 chk->rec.data.stream_number,
1052 chk->rec.data.stream_seq);
1053 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1054 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_9;
1055 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1063 post_tsn = chk->rec.data.TSN_seq + 1;
1064 if (post_tsn == next->rec.data.TSN_seq) {
1066 * Ok the one I am inserting ahead of is my NEXT
1067 * one. A bit of valdiation here.
1069 if (next->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1070 /* Insert chk MUST be a last fragment */
1071 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK)
1072 != SCTP_DATA_LAST_FRAG) {
1073 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is FIRST, we must be LAST\n");
1074 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not a last!\n");
1075 snprintf(msg, sizeof(msg),
1076 "Expect only E-bit, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1077 chk->rec.data.TSN_seq,
1078 chk->rec.data.stream_number,
1079 chk->rec.data.stream_seq);
1080 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1081 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_10;
1082 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1086 } else if ((next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1087 SCTP_DATA_MIDDLE_FRAG ||
1088 (next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1089 SCTP_DATA_LAST_FRAG) {
1091 * Insert chk CAN be MIDDLE or FIRST NOT
1094 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1095 SCTP_DATA_LAST_FRAG) {
1096 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is a MIDDLE/LAST\n");
1097 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, new prev chunk is a LAST\n");
1098 snprintf(msg, sizeof(msg),
1099 "Didn't expect E-bit, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1100 chk->rec.data.TSN_seq,
1101 chk->rec.data.stream_number,
1102 chk->rec.data.stream_seq);
1103 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1104 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_11;
1105 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1109 if (chk->rec.data.stream_number !=
1110 next->rec.data.stream_number) {
1112 * Huh, need the correct STR here,
1113 * they must be the same.
1115 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1116 chk->rec.data.stream_number,
1117 next->rec.data.stream_number);
1118 snprintf(msg, sizeof(msg),
1119 "Required SID %4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1120 next->rec.data.stream_number,
1121 chk->rec.data.TSN_seq,
1122 chk->rec.data.stream_number,
1123 chk->rec.data.stream_seq);
1124 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1125 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_12;
1126 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1130 if ((next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1131 chk->rec.data.stream_seq !=
1132 next->rec.data.stream_seq) {
1134 * Huh, need the correct STR here,
1135 * they must be the same.
1137 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1138 chk->rec.data.stream_seq,
1139 next->rec.data.stream_seq);
1140 snprintf(msg, sizeof(msg),
1141 "Required SSN %4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1142 next->rec.data.stream_seq,
1143 chk->rec.data.TSN_seq,
1144 chk->rec.data.stream_number,
1145 chk->rec.data.stream_seq);
1146 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1147 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_13;
1148 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1155 /* Do we need to do some delivery? check */
1156 sctp_deliver_reasm_check(stcb, asoc);
1160 * This is an unfortunate routine. It checks to make sure a evil guy is not
1161 * stuffing us full of bad packet fragments. A broken peer could also do this
1162 * but this is doubtful. It is to bad I must worry about evil crackers sigh
1166 sctp_does_tsn_belong_to_reasm(struct sctp_association *asoc,
1169 struct sctp_tmit_chunk *at;
1172 TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1173 if (SCTP_TSN_GT(TSN_seq, at->rec.data.TSN_seq)) {
1174 /* is it one bigger? */
1175 tsn_est = at->rec.data.TSN_seq + 1;
1176 if (tsn_est == TSN_seq) {
1177 /* yep. It better be a last then */
1178 if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1179 SCTP_DATA_LAST_FRAG) {
1181 * Ok this guy belongs next to a guy
1182 * that is NOT last, it should be a
1183 * middle/last, not a complete
1189 * This guy is ok since its a LAST
1190 * and the new chunk is a fully
1191 * self- contained one.
1196 } else if (TSN_seq == at->rec.data.TSN_seq) {
1197 /* Software error since I have a dup? */
1201 * Ok, 'at' is larger than new chunk but does it
1202 * need to be right before it.
1204 tsn_est = TSN_seq + 1;
1205 if (tsn_est == at->rec.data.TSN_seq) {
1206 /* Yep, It better be a first */
1207 if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1208 SCTP_DATA_FIRST_FRAG) {
1220 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1221 struct mbuf **m, int offset, struct sctp_data_chunk *ch, int chk_length,
1222 struct sctp_nets *net, uint32_t * high_tsn, int *abort_flag,
1223 int *break_flag, int last_chunk)
1225 /* Process a data chunk */
1226 /* struct sctp_tmit_chunk *chk; */
1227 struct sctp_tmit_chunk *chk;
1231 int need_reasm_check = 0;
1232 uint16_t strmno, strmseq;
1233 struct mbuf *op_err;
1234 char msg[SCTP_DIAG_INFO_LEN];
1235 struct sctp_queued_to_read *control;
1237 uint32_t protocol_id;
1238 uint8_t chunk_flags;
1239 struct sctp_stream_reset_list *liste;
1242 tsn = ntohl(ch->dp.tsn);
1243 chunk_flags = ch->ch.chunk_flags;
1244 if ((chunk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1245 asoc->send_sack = 1;
1247 protocol_id = ch->dp.protocol_id;
1248 ordered = ((chunk_flags & SCTP_DATA_UNORDERED) == 0);
1249 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1250 sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1255 SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, ch->ch.chunk_type, tsn);
1256 if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1257 /* It is a duplicate */
1258 SCTP_STAT_INCR(sctps_recvdupdata);
1259 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1260 /* Record a dup for the next outbound sack */
1261 asoc->dup_tsns[asoc->numduptsns] = tsn;
1264 asoc->send_sack = 1;
1267 /* Calculate the number of TSN's between the base and this TSN */
1268 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1269 if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1270 /* Can't hold the bit in the mapping at max array, toss it */
1273 if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
1274 SCTP_TCB_LOCK_ASSERT(stcb);
1275 if (sctp_expand_mapping_array(asoc, gap)) {
1276 /* Can't expand, drop it */
1280 if (SCTP_TSN_GT(tsn, *high_tsn)) {
1283 /* See if we have received this one already */
1284 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1285 SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1286 SCTP_STAT_INCR(sctps_recvdupdata);
1287 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1288 /* Record a dup for the next outbound sack */
1289 asoc->dup_tsns[asoc->numduptsns] = tsn;
1292 asoc->send_sack = 1;
1296 * Check to see about the GONE flag, duplicates would cause a sack
1297 * to be sent up above
1299 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1300 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1301 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) {
1303 * wait a minute, this guy is gone, there is no longer a
1304 * receiver. Send peer an ABORT!
1306 op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
1307 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1312 * Now before going further we see if there is room. If NOT then we
1313 * MAY let one through only IF this TSN is the one we are waiting
1314 * for on a partial delivery API.
1317 /* now do the tests */
1318 if (((asoc->cnt_on_all_streams +
1319 asoc->cnt_on_reasm_queue +
1320 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1321 (((int)asoc->my_rwnd) <= 0)) {
1323 * When we have NO room in the rwnd we check to make sure
1324 * the reader is doing its job...
1326 if (stcb->sctp_socket->so_rcv.sb_cc) {
1327 /* some to read, wake-up */
1328 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1331 so = SCTP_INP_SO(stcb->sctp_ep);
1332 atomic_add_int(&stcb->asoc.refcnt, 1);
1333 SCTP_TCB_UNLOCK(stcb);
1334 SCTP_SOCKET_LOCK(so, 1);
1335 SCTP_TCB_LOCK(stcb);
1336 atomic_subtract_int(&stcb->asoc.refcnt, 1);
1337 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1338 /* assoc was freed while we were unlocked */
1339 SCTP_SOCKET_UNLOCK(so, 1);
1343 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1344 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1345 SCTP_SOCKET_UNLOCK(so, 1);
1348 /* now is it in the mapping array of what we have accepted? */
1349 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1350 SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1351 /* Nope not in the valid range dump it */
1352 sctp_set_rwnd(stcb, asoc);
1353 if ((asoc->cnt_on_all_streams +
1354 asoc->cnt_on_reasm_queue +
1355 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1356 SCTP_STAT_INCR(sctps_datadropchklmt);
1358 SCTP_STAT_INCR(sctps_datadroprwnd);
1364 strmno = ntohs(ch->dp.stream_id);
1365 if (strmno >= asoc->streamincnt) {
1366 struct sctp_paramhdr *phdr;
1369 mb = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) * 2),
1370 0, M_DONTWAIT, 1, MT_DATA);
1372 /* add some space up front so prepend will work well */
1373 SCTP_BUF_RESV_UF(mb, sizeof(struct sctp_chunkhdr));
1374 phdr = mtod(mb, struct sctp_paramhdr *);
1376 * Error causes are just param's and this one has
1377 * two back to back phdr, one with the error type
1378 * and size, the other with the streamid and a rsvd
1380 SCTP_BUF_LEN(mb) = (sizeof(struct sctp_paramhdr) * 2);
1381 phdr->param_type = htons(SCTP_CAUSE_INVALID_STREAM);
1382 phdr->param_length =
1383 htons(sizeof(struct sctp_paramhdr) * 2);
1385 /* We insert the stream in the type field */
1386 phdr->param_type = ch->dp.stream_id;
1387 /* And set the length to 0 for the rsvd field */
1388 phdr->param_length = 0;
1389 sctp_queue_op_err(stcb, mb);
1391 SCTP_STAT_INCR(sctps_badsid);
1392 SCTP_TCB_LOCK_ASSERT(stcb);
1393 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1394 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1395 asoc->highest_tsn_inside_nr_map = tsn;
1397 if (tsn == (asoc->cumulative_tsn + 1)) {
1398 /* Update cum-ack */
1399 asoc->cumulative_tsn = tsn;
1404 * Before we continue lets validate that we are not being fooled by
1405 * an evil attacker. We can only have 4k chunks based on our TSN
1406 * spread allowed by the mapping array 512 * 8 bits, so there is no
1407 * way our stream sequence numbers could have wrapped. We of course
1408 * only validate the FIRST fragment so the bit must be set.
1410 strmseq = ntohs(ch->dp.stream_sequence);
1411 #ifdef SCTP_ASOCLOG_OF_TSNS
1412 SCTP_TCB_LOCK_ASSERT(stcb);
1413 if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1414 asoc->tsn_in_at = 0;
1415 asoc->tsn_in_wrapped = 1;
1417 asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1418 asoc->in_tsnlog[asoc->tsn_in_at].strm = strmno;
1419 asoc->in_tsnlog[asoc->tsn_in_at].seq = strmseq;
1420 asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1421 asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1422 asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1423 asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1424 asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1427 if ((chunk_flags & SCTP_DATA_FIRST_FRAG) &&
1428 (TAILQ_EMPTY(&asoc->resetHead)) &&
1429 (chunk_flags & SCTP_DATA_UNORDERED) == 0 &&
1430 SCTP_SSN_GE(asoc->strmin[strmno].last_sequence_delivered, strmseq)) {
1431 /* The incoming sseq is behind where we last delivered? */
1432 SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n",
1433 strmseq, asoc->strmin[strmno].last_sequence_delivered);
1434 snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1435 asoc->strmin[strmno].last_sequence_delivered,
1436 tsn, strmno, strmseq);
1437 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1438 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1439 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1443 /************************************
1444 * From here down we may find ch-> invalid
1445 * so its a good idea NOT to use it.
1446 *************************************/
1448 the_len = (chk_length - sizeof(struct sctp_data_chunk));
1449 if (last_chunk == 0) {
1450 dmbuf = SCTP_M_COPYM(*m,
1451 (offset + sizeof(struct sctp_data_chunk)),
1452 the_len, M_DONTWAIT);
1453 #ifdef SCTP_MBUF_LOGGING
1454 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
1457 for (mat = dmbuf; mat; mat = SCTP_BUF_NEXT(mat)) {
1458 if (SCTP_BUF_IS_EXTENDED(mat)) {
1459 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
1465 /* We can steal the last chunk */
1469 /* lop off the top part */
1470 m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
1471 if (SCTP_BUF_NEXT(dmbuf) == NULL) {
1472 l_len = SCTP_BUF_LEN(dmbuf);
1475 * need to count up the size hopefully does not hit
1481 for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
1482 l_len += SCTP_BUF_LEN(lat);
1485 if (l_len > the_len) {
1486 /* Trim the end round bytes off too */
1487 m_adj(dmbuf, -(l_len - the_len));
1490 if (dmbuf == NULL) {
1491 SCTP_STAT_INCR(sctps_nomem);
1494 if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
1495 asoc->fragmented_delivery_inprogress == 0 &&
1496 TAILQ_EMPTY(&asoc->resetHead) &&
1498 ((uint16_t) (asoc->strmin[strmno].last_sequence_delivered + 1) == strmseq &&
1499 TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) {
1500 /* Candidate for express delivery */
1502 * Its not fragmented, No PD-API is up, Nothing in the
1503 * delivery queue, Its un-ordered OR ordered and the next to
1504 * deliver AND nothing else is stuck on the stream queue,
1505 * And there is room for it in the socket buffer. Lets just
1506 * stuff it up the buffer....
1509 /* It would be nice to avoid this copy if we could :< */
1510 sctp_alloc_a_readq(stcb, control);
1511 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1516 if (control == NULL) {
1517 goto failed_express_del;
1519 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1520 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1521 asoc->highest_tsn_inside_nr_map = tsn;
1523 sctp_add_to_readq(stcb->sctp_ep, stcb,
1524 control, &stcb->sctp_socket->so_rcv,
1525 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1527 if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1528 /* for ordered, bump what we delivered */
1529 asoc->strmin[strmno].last_sequence_delivered++;
1531 SCTP_STAT_INCR(sctps_recvexpress);
1532 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
1533 sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno,
1534 SCTP_STR_LOG_FROM_EXPRS_DEL);
1538 goto finish_express_del;
1541 /* If we reach here this is a new chunk */
1544 /* Express for fragmented delivery? */
1545 if ((asoc->fragmented_delivery_inprogress) &&
1546 (stcb->asoc.control_pdapi) &&
1547 (asoc->str_of_pdapi == strmno) &&
1548 (asoc->ssn_of_pdapi == strmseq)
1550 control = stcb->asoc.control_pdapi;
1551 if ((chunk_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
1552 /* Can't be another first? */
1553 goto failed_pdapi_express_del;
1555 if (tsn == (control->sinfo_tsn + 1)) {
1556 /* Yep, we can add it on */
1559 if (chunk_flags & SCTP_DATA_LAST_FRAG) {
1562 if (sctp_append_to_readq(stcb->sctp_ep, stcb, control, dmbuf, end,
1564 &stcb->sctp_socket->so_rcv)) {
1565 SCTP_PRINTF("Append fails end:%d\n", end);
1566 goto failed_pdapi_express_del;
1568 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1569 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1570 asoc->highest_tsn_inside_nr_map = tsn;
1572 SCTP_STAT_INCR(sctps_recvexpressm);
1573 asoc->tsn_last_delivered = tsn;
1574 asoc->fragment_flags = chunk_flags;
1575 asoc->tsn_of_pdapi_last_delivered = tsn;
1576 asoc->last_flags_delivered = chunk_flags;
1577 asoc->last_strm_seq_delivered = strmseq;
1578 asoc->last_strm_no_delivered = strmno;
1580 /* clean up the flags and such */
1581 asoc->fragmented_delivery_inprogress = 0;
1582 if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1583 asoc->strmin[strmno].last_sequence_delivered++;
1585 stcb->asoc.control_pdapi = NULL;
1586 if (TAILQ_EMPTY(&asoc->reasmqueue) == 0) {
1588 * There could be another message
1591 need_reasm_check = 1;
1595 goto finish_express_del;
1598 failed_pdapi_express_del:
1600 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
1601 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1602 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1603 asoc->highest_tsn_inside_nr_map = tsn;
1606 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
1607 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
1608 asoc->highest_tsn_inside_map = tsn;
1611 if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1612 sctp_alloc_a_chunk(stcb, chk);
1614 /* No memory so we drop the chunk */
1615 SCTP_STAT_INCR(sctps_nomem);
1616 if (last_chunk == 0) {
1617 /* we copied it, free the copy */
1618 sctp_m_freem(dmbuf);
1622 chk->rec.data.TSN_seq = tsn;
1623 chk->no_fr_allowed = 0;
1624 chk->rec.data.stream_seq = strmseq;
1625 chk->rec.data.stream_number = strmno;
1626 chk->rec.data.payloadtype = protocol_id;
1627 chk->rec.data.context = stcb->asoc.context;
1628 chk->rec.data.doing_fast_retransmit = 0;
1629 chk->rec.data.rcv_flags = chunk_flags;
1631 chk->send_size = the_len;
1633 atomic_add_int(&net->ref_count, 1);
1636 sctp_alloc_a_readq(stcb, control);
1637 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1642 if (control == NULL) {
1643 /* No memory so we drop the chunk */
1644 SCTP_STAT_INCR(sctps_nomem);
1645 if (last_chunk == 0) {
1646 /* we copied it, free the copy */
1647 sctp_m_freem(dmbuf);
1651 control->length = the_len;
1654 /* Mark it as received */
1655 /* Now queue it where it belongs */
1656 if (control != NULL) {
1657 /* First a sanity check */
1658 if (asoc->fragmented_delivery_inprogress) {
1660 * Ok, we have a fragmented delivery in progress if
1661 * this chunk is next to deliver OR belongs in our
1662 * view to the reassembly, the peer is evil or
1665 uint32_t estimate_tsn;
1667 estimate_tsn = asoc->tsn_last_delivered + 1;
1668 if (TAILQ_EMPTY(&asoc->reasmqueue) &&
1669 (estimate_tsn == control->sinfo_tsn)) {
1670 /* Evil/Broke peer */
1671 sctp_m_freem(control->data);
1672 control->data = NULL;
1673 if (control->whoFrom) {
1674 sctp_free_remote_addr(control->whoFrom);
1675 control->whoFrom = NULL;
1677 sctp_free_a_readq(stcb, control);
1678 snprintf(msg, sizeof(msg), "Reas. queue emtpy, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1679 tsn, strmno, strmseq);
1680 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1681 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1682 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1686 if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
1687 sctp_m_freem(control->data);
1688 control->data = NULL;
1689 if (control->whoFrom) {
1690 sctp_free_remote_addr(control->whoFrom);
1691 control->whoFrom = NULL;
1693 sctp_free_a_readq(stcb, control);
1694 snprintf(msg, sizeof(msg), "PD ongoing, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1695 tsn, strmno, strmseq);
1696 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1697 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
1698 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1704 /* No PDAPI running */
1705 if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
1707 * Reassembly queue is NOT empty validate
1708 * that this tsn does not need to be in
1709 * reasembly queue. If it does then our peer
1710 * is broken or evil.
1712 if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
1713 sctp_m_freem(control->data);
1714 control->data = NULL;
1715 if (control->whoFrom) {
1716 sctp_free_remote_addr(control->whoFrom);
1717 control->whoFrom = NULL;
1719 sctp_free_a_readq(stcb, control);
1720 snprintf(msg, sizeof(msg), "No PD ongoing, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1721 tsn, strmno, strmseq);
1722 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1723 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17;
1724 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1730 /* ok, if we reach here we have passed the sanity checks */
1731 if (chunk_flags & SCTP_DATA_UNORDERED) {
1732 /* queue directly into socket buffer */
1733 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
1734 sctp_add_to_readq(stcb->sctp_ep, stcb,
1736 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1739 * Special check for when streams are resetting. We
1740 * could be more smart about this and check the
1741 * actual stream to see if it is not being reset..
1742 * that way we would not create a HOLB when amongst
1743 * streams being reset and those not being reset.
1745 * We take complete messages that have a stream reset
1746 * intervening (aka the TSN is after where our
1747 * cum-ack needs to be) off and put them on a
1748 * pending_reply_queue. The reassembly ones we do
1749 * not have to worry about since they are all sorted
1750 * and proceessed by TSN order. It is only the
1751 * singletons I must worry about.
1753 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
1754 SCTP_TSN_GT(tsn, liste->tsn)) {
1756 * yep its past where we need to reset... go
1757 * ahead and queue it.
1759 if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
1761 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
1763 struct sctp_queued_to_read *ctlOn,
1765 unsigned char inserted = 0;
1767 TAILQ_FOREACH_SAFE(ctlOn, &asoc->pending_reply_queue, next, nctlOn) {
1768 if (SCTP_TSN_GT(control->sinfo_tsn, ctlOn->sinfo_tsn)) {
1772 TAILQ_INSERT_BEFORE(ctlOn, control, next);
1777 if (inserted == 0) {
1779 * must be put at end, use
1780 * prevP (all setup from
1781 * loop) to setup nextP.
1783 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
1787 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag);
1794 /* Into the re-assembly queue */
1795 sctp_queue_data_for_reasm(stcb, asoc, chk, abort_flag);
1798 * the assoc is now gone and chk was put onto the
1799 * reasm queue, which has all been freed.
1806 if (tsn == (asoc->cumulative_tsn + 1)) {
1807 /* Update cum-ack */
1808 asoc->cumulative_tsn = tsn;
1814 SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
1816 SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
1818 SCTP_STAT_INCR(sctps_recvdata);
1819 /* Set it present please */
1820 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
1821 sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, SCTP_STR_LOG_FROM_MARK_TSN);
1823 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1824 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
1825 asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
1827 /* check the special flag for stream resets */
1828 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
1829 SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
1831 * we have finished working through the backlogged TSN's now
1832 * time to reset streams. 1: call reset function. 2: free
1833 * pending_reply space 3: distribute any chunks in
1834 * pending_reply_queue.
1836 struct sctp_queued_to_read *ctl, *nctl;
1838 sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
1839 TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
1840 SCTP_FREE(liste, SCTP_M_STRESET);
1841 /* sa_ignore FREED_MEMORY */
1842 liste = TAILQ_FIRST(&asoc->resetHead);
1843 if (TAILQ_EMPTY(&asoc->resetHead)) {
1844 /* All can be removed */
1845 TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
1846 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
1847 sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
1853 TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
1854 if (SCTP_TSN_GT(ctl->sinfo_tsn, liste->tsn)) {
1858 * if ctl->sinfo_tsn is <= liste->tsn we can
1859 * process it which is the NOT of
1860 * ctl->sinfo_tsn > liste->tsn
1862 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
1863 sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
1870 * Now service re-assembly to pick up anything that has been
1871 * held on reassembly queue?
1873 sctp_deliver_reasm_check(stcb, asoc);
1874 need_reasm_check = 0;
1876 if (need_reasm_check) {
1877 /* Another one waits ? */
1878 sctp_deliver_reasm_check(stcb, asoc);
1883 int8_t sctp_map_lookup_tab[256] = {
1884 0, 1, 0, 2, 0, 1, 0, 3,
1885 0, 1, 0, 2, 0, 1, 0, 4,
1886 0, 1, 0, 2, 0, 1, 0, 3,
1887 0, 1, 0, 2, 0, 1, 0, 5,
1888 0, 1, 0, 2, 0, 1, 0, 3,
1889 0, 1, 0, 2, 0, 1, 0, 4,
1890 0, 1, 0, 2, 0, 1, 0, 3,
1891 0, 1, 0, 2, 0, 1, 0, 6,
1892 0, 1, 0, 2, 0, 1, 0, 3,
1893 0, 1, 0, 2, 0, 1, 0, 4,
1894 0, 1, 0, 2, 0, 1, 0, 3,
1895 0, 1, 0, 2, 0, 1, 0, 5,
1896 0, 1, 0, 2, 0, 1, 0, 3,
1897 0, 1, 0, 2, 0, 1, 0, 4,
1898 0, 1, 0, 2, 0, 1, 0, 3,
1899 0, 1, 0, 2, 0, 1, 0, 7,
1900 0, 1, 0, 2, 0, 1, 0, 3,
1901 0, 1, 0, 2, 0, 1, 0, 4,
1902 0, 1, 0, 2, 0, 1, 0, 3,
1903 0, 1, 0, 2, 0, 1, 0, 5,
1904 0, 1, 0, 2, 0, 1, 0, 3,
1905 0, 1, 0, 2, 0, 1, 0, 4,
1906 0, 1, 0, 2, 0, 1, 0, 3,
1907 0, 1, 0, 2, 0, 1, 0, 6,
1908 0, 1, 0, 2, 0, 1, 0, 3,
1909 0, 1, 0, 2, 0, 1, 0, 4,
1910 0, 1, 0, 2, 0, 1, 0, 3,
1911 0, 1, 0, 2, 0, 1, 0, 5,
1912 0, 1, 0, 2, 0, 1, 0, 3,
1913 0, 1, 0, 2, 0, 1, 0, 4,
1914 0, 1, 0, 2, 0, 1, 0, 3,
1915 0, 1, 0, 2, 0, 1, 0, 8
1920 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
1923 * Now we also need to check the mapping array in a couple of ways.
1924 * 1) Did we move the cum-ack point?
1926 * When you first glance at this you might think that all entries that
1927 * make up the postion of the cum-ack would be in the nr-mapping
1928 * array only.. i.e. things up to the cum-ack are always
1929 * deliverable. Thats true with one exception, when its a fragmented
1930 * message we may not deliver the data until some threshold (or all
1931 * of it) is in place. So we must OR the nr_mapping_array and
1932 * mapping_array to get a true picture of the cum-ack.
1934 struct sctp_association *asoc;
1937 int slide_from, slide_end, lgap, distance;
1938 uint32_t old_cumack, old_base, old_highest, highest_tsn;
1942 old_cumack = asoc->cumulative_tsn;
1943 old_base = asoc->mapping_array_base_tsn;
1944 old_highest = asoc->highest_tsn_inside_map;
1946 * We could probably improve this a small bit by calculating the
1947 * offset of the current cum-ack as the starting point.
1950 for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
1951 val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
1955 /* there is a 0 bit */
1956 at += sctp_map_lookup_tab[val];
1960 asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
1962 if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
1963 SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
1965 panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
1966 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
1968 SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
1969 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
1970 sctp_print_mapping_array(asoc);
1971 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1972 sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
1974 asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
1975 asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
1978 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
1979 highest_tsn = asoc->highest_tsn_inside_nr_map;
1981 highest_tsn = asoc->highest_tsn_inside_map;
1983 if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
1984 /* The complete array was completed by a single FR */
1985 /* highest becomes the cum-ack */
1993 /* clear the array */
1994 clr = ((at + 7) >> 3);
1995 if (clr > asoc->mapping_array_size) {
1996 clr = asoc->mapping_array_size;
1998 memset(asoc->mapping_array, 0, clr);
1999 memset(asoc->nr_mapping_array, 0, clr);
2001 for (i = 0; i < asoc->mapping_array_size; i++) {
2002 if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2003 SCTP_PRINTF("Error Mapping array's not clean at clear\n");
2004 sctp_print_mapping_array(asoc);
2008 asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2009 asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2010 } else if (at >= 8) {
2011 /* we can slide the mapping array down */
2012 /* slide_from holds where we hit the first NON 0xff byte */
2015 * now calculate the ceiling of the move using our highest
2018 SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2019 slide_end = (lgap >> 3);
2020 if (slide_end < slide_from) {
2021 sctp_print_mapping_array(asoc);
2023 panic("impossible slide");
2025 SCTP_PRINTF("impossible slide lgap:%x slide_end:%x slide_from:%x? at:%d\n",
2026 lgap, slide_end, slide_from, at);
2030 if (slide_end > asoc->mapping_array_size) {
2032 panic("would overrun buffer");
2034 SCTP_PRINTF("Gak, would have overrun map end:%d slide_end:%d\n",
2035 asoc->mapping_array_size, slide_end);
2036 slide_end = asoc->mapping_array_size;
2039 distance = (slide_end - slide_from) + 1;
2040 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2041 sctp_log_map(old_base, old_cumack, old_highest,
2042 SCTP_MAP_PREPARE_SLIDE);
2043 sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
2044 (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
2046 if (distance + slide_from > asoc->mapping_array_size ||
2049 * Here we do NOT slide forward the array so that
2050 * hopefully when more data comes in to fill it up
2051 * we will be able to slide it forward. Really I
2052 * don't think this should happen :-0
2055 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2056 sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
2057 (uint32_t) asoc->mapping_array_size,
2058 SCTP_MAP_SLIDE_NONE);
2063 for (ii = 0; ii < distance; ii++) {
2064 asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2065 asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2068 for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2069 asoc->mapping_array[ii] = 0;
2070 asoc->nr_mapping_array[ii] = 0;
2072 if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2073 asoc->highest_tsn_inside_map += (slide_from << 3);
2075 if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2076 asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2078 asoc->mapping_array_base_tsn += (slide_from << 3);
2079 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2080 sctp_log_map(asoc->mapping_array_base_tsn,
2081 asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2082 SCTP_MAP_SLIDE_RESULT);
2089 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
2091 struct sctp_association *asoc;
2092 uint32_t highest_tsn;
2095 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2096 highest_tsn = asoc->highest_tsn_inside_nr_map;
2098 highest_tsn = asoc->highest_tsn_inside_map;
2102 * Now we need to see if we need to queue a sack or just start the
2103 * timer (if allowed).
2105 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2107 * Ok special case, in SHUTDOWN-SENT case. here we maker
2108 * sure SACK timer is off and instead send a SHUTDOWN and a
2111 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2112 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2113 stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_INDATA + SCTP_LOC_18);
2115 sctp_send_shutdown(stcb,
2116 ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2117 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2121 /* is there a gap now ? */
2122 is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2125 * CMT DAC algorithm: increase number of packets received
2128 stcb->asoc.cmt_dac_pkts_rcvd++;
2130 if ((stcb->asoc.send_sack == 1) || /* We need to send a
2132 ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no
2134 (stcb->asoc.numduptsns) || /* we have dup's */
2135 (is_a_gap) || /* is still a gap */
2136 (stcb->asoc.delayed_ack == 0) || /* Delayed sack disabled */
2137 (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) /* hit limit of pkts */
2140 if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2141 (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2142 (stcb->asoc.send_sack == 0) &&
2143 (stcb->asoc.numduptsns == 0) &&
2144 (stcb->asoc.delayed_ack) &&
2145 (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2148 * CMT DAC algorithm: With CMT, delay acks
2149 * even in the face of
2151 * reordering. Therefore, if acks that do not
2152 * have to be sent because of the above
2153 * reasons, will be delayed. That is, acks
2154 * that would have been sent due to gap
2155 * reports will be delayed with DAC. Start
2156 * the delayed ack timer.
2158 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2159 stcb->sctp_ep, stcb, NULL);
2162 * Ok we must build a SACK since the timer
2163 * is pending, we got our first packet OR
2164 * there are gaps or duplicates.
2166 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2167 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2170 if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2171 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2172 stcb->sctp_ep, stcb, NULL);
2179 sctp_service_queues(struct sctp_tcb *stcb, struct sctp_association *asoc)
2181 struct sctp_tmit_chunk *chk;
2182 uint32_t tsize, pd_point;
2185 if (asoc->fragmented_delivery_inprogress) {
2186 sctp_service_reassembly(stcb, asoc);
2188 /* Can we proceed further, i.e. the PD-API is complete */
2189 if (asoc->fragmented_delivery_inprogress) {
2194 * Now is there some other chunk I can deliver from the reassembly
2198 chk = TAILQ_FIRST(&asoc->reasmqueue);
2200 asoc->size_on_reasm_queue = 0;
2201 asoc->cnt_on_reasm_queue = 0;
2204 nxt_todel = asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
2205 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
2206 ((nxt_todel == chk->rec.data.stream_seq) ||
2207 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
2209 * Yep the first one is here. We setup to start reception,
2210 * by backing down the TSN just in case we can't deliver.
2214 * Before we start though either all of the message should
2215 * be here or the socket buffer max or nothing on the
2216 * delivery queue and something can be delivered.
2218 if (stcb->sctp_socket) {
2219 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
2220 stcb->sctp_ep->partial_delivery_point);
2222 pd_point = stcb->sctp_ep->partial_delivery_point;
2224 if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
2225 asoc->fragmented_delivery_inprogress = 1;
2226 asoc->tsn_last_delivered = chk->rec.data.TSN_seq - 1;
2227 asoc->str_of_pdapi = chk->rec.data.stream_number;
2228 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
2229 asoc->pdapi_ppid = chk->rec.data.payloadtype;
2230 asoc->fragment_flags = chk->rec.data.rcv_flags;
2231 sctp_service_reassembly(stcb, asoc);
2232 if (asoc->fragmented_delivery_inprogress == 0) {
2240 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2241 struct sockaddr *src, struct sockaddr *dst,
2242 struct sctphdr *sh, struct sctp_inpcb *inp,
2243 struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t * high_tsn,
2244 uint8_t use_mflowid, uint32_t mflowid,
2245 uint32_t vrf_id, uint16_t port)
2247 struct sctp_data_chunk *ch, chunk_buf;
2248 struct sctp_association *asoc;
2249 int num_chunks = 0; /* number of control chunks processed */
2251 int chk_length, break_flag, last_chunk;
2252 int abort_flag = 0, was_a_gap;
2254 uint32_t highest_tsn;
2257 sctp_set_rwnd(stcb, &stcb->asoc);
2260 SCTP_TCB_LOCK_ASSERT(stcb);
2262 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2263 highest_tsn = asoc->highest_tsn_inside_nr_map;
2265 highest_tsn = asoc->highest_tsn_inside_map;
2267 was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2269 * setup where we got the last DATA packet from for any SACK that
2270 * may need to go out. Don't bump the net. This is done ONLY when a
2271 * chunk is assigned.
2273 asoc->last_data_chunk_from = net;
2276 * Now before we proceed we must figure out if this is a wasted
2277 * cluster... i.e. it is a small packet sent in and yet the driver
2278 * underneath allocated a full cluster for it. If so we must copy it
2279 * to a smaller mbuf and free up the cluster mbuf. This will help
2280 * with cluster starvation. Note for __Panda__ we don't do this
2281 * since it has clusters all the way down to 64 bytes.
2283 if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2284 /* we only handle mbufs that are singletons.. not chains */
2285 m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_DONTWAIT, 1, MT_DATA);
2287 /* ok lets see if we can copy the data up */
2290 /* get the pointers and copy */
2291 to = mtod(m, caddr_t *);
2292 from = mtod((*mm), caddr_t *);
2293 memcpy(to, from, SCTP_BUF_LEN((*mm)));
2294 /* copy the length and free up the old */
2295 SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2297 /* sucess, back copy */
2300 /* We are in trouble in the mbuf world .. yikes */
2304 /* get pointer to the first chunk header */
2305 ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2306 sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2311 * process all DATA chunks...
2313 *high_tsn = asoc->cumulative_tsn;
2315 asoc->data_pkts_seen++;
2316 while (stop_proc == 0) {
2317 /* validate chunk length */
2318 chk_length = ntohs(ch->ch.chunk_length);
2319 if (length - *offset < chk_length) {
2320 /* all done, mutulated chunk */
2324 if (ch->ch.chunk_type == SCTP_DATA) {
2325 if ((size_t)chk_length < sizeof(struct sctp_data_chunk) + 1) {
2327 * Need to send an abort since we had a
2328 * invalid data chunk.
2330 struct mbuf *op_err;
2331 char msg[SCTP_DIAG_INFO_LEN];
2333 snprintf(msg, sizeof(msg), "DATA chunk of length %d",
2335 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2336 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
2337 sctp_abort_association(inp, stcb, m, iphlen,
2338 src, dst, sh, op_err,
2339 use_mflowid, mflowid,
2343 #ifdef SCTP_AUDITING_ENABLED
2344 sctp_audit_log(0xB1, 0);
2346 if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2351 if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, ch,
2352 chk_length, net, high_tsn, &abort_flag, &break_flag,
2361 * Set because of out of rwnd space and no
2362 * drop rep space left.
2368 /* not a data chunk in the data region */
2369 switch (ch->ch.chunk_type) {
2370 case SCTP_INITIATION:
2371 case SCTP_INITIATION_ACK:
2372 case SCTP_SELECTIVE_ACK:
2373 case SCTP_NR_SELECTIVE_ACK:
2374 case SCTP_HEARTBEAT_REQUEST:
2375 case SCTP_HEARTBEAT_ACK:
2376 case SCTP_ABORT_ASSOCIATION:
2378 case SCTP_SHUTDOWN_ACK:
2379 case SCTP_OPERATION_ERROR:
2380 case SCTP_COOKIE_ECHO:
2381 case SCTP_COOKIE_ACK:
2384 case SCTP_SHUTDOWN_COMPLETE:
2385 case SCTP_AUTHENTICATION:
2386 case SCTP_ASCONF_ACK:
2387 case SCTP_PACKET_DROPPED:
2388 case SCTP_STREAM_RESET:
2389 case SCTP_FORWARD_CUM_TSN:
2392 * Now, what do we do with KNOWN chunks that
2393 * are NOT in the right place?
2395 * For now, I do nothing but ignore them. We
2396 * may later want to add sysctl stuff to
2397 * switch out and do either an ABORT() or
2398 * possibly process them.
2400 if (SCTP_BASE_SYSCTL(sctp_strict_data_order)) {
2401 struct mbuf *op_err;
2403 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, "");
2404 sctp_abort_association(inp, stcb,
2408 use_mflowid, mflowid,
2414 /* unknown chunk type, use bit rules */
2415 if (ch->ch.chunk_type & 0x40) {
2416 /* Add a error report to the queue */
2418 struct sctp_paramhdr *phd;
2420 merr = sctp_get_mbuf_for_msg(sizeof(*phd), 0, M_DONTWAIT, 1, MT_DATA);
2422 phd = mtod(merr, struct sctp_paramhdr *);
2424 * We cheat and use param
2425 * type since we did not
2426 * bother to define a error
2427 * cause struct. They are
2428 * the same basic format
2429 * with different names.
2432 htons(SCTP_CAUSE_UNRECOG_CHUNK);
2434 htons(chk_length + sizeof(*phd));
2435 SCTP_BUF_LEN(merr) = sizeof(*phd);
2436 SCTP_BUF_NEXT(merr) = SCTP_M_COPYM(m, *offset, chk_length, M_DONTWAIT);
2437 if (SCTP_BUF_NEXT(merr)) {
2438 if (sctp_pad_lastmbuf(SCTP_BUF_NEXT(merr), SCTP_SIZE32(chk_length) - chk_length, NULL)) {
2441 sctp_queue_op_err(stcb, merr);
2448 if ((ch->ch.chunk_type & 0x80) == 0) {
2449 /* discard the rest of this packet */
2451 } /* else skip this bad chunk and
2454 } /* switch of chunk type */
2456 *offset += SCTP_SIZE32(chk_length);
2457 if ((*offset >= length) || stop_proc) {
2458 /* no more data left in the mbuf chain */
2462 ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2463 sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2472 * we need to report rwnd overrun drops.
2474 sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
2478 * Did we get data, if so update the time for auto-close and
2479 * give peer credit for being alive.
2481 SCTP_STAT_INCR(sctps_recvpktwithdata);
2482 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2483 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2484 stcb->asoc.overall_error_count,
2486 SCTP_FROM_SCTP_INDATA,
2489 stcb->asoc.overall_error_count = 0;
2490 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2492 /* now service all of the reassm queue if needed */
2493 if (!(TAILQ_EMPTY(&asoc->reasmqueue)))
2494 sctp_service_queues(stcb, asoc);
2496 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2497 /* Assure that we ack right away */
2498 stcb->asoc.send_sack = 1;
2500 /* Start a sack timer or QUEUE a SACK for sending */
2501 sctp_sack_check(stcb, was_a_gap);
2506 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2507 uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2509 uint32_t * biggest_newly_acked_tsn,
2510 uint32_t * this_sack_lowest_newack,
2513 struct sctp_tmit_chunk *tp1;
2514 unsigned int theTSN;
2515 int j, wake_him = 0, circled = 0;
2517 /* Recover the tp1 we last saw */
2520 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2522 for (j = frag_strt; j <= frag_end; j++) {
2523 theTSN = j + last_tsn;
2525 if (tp1->rec.data.doing_fast_retransmit)
2529 * CMT: CUCv2 algorithm. For each TSN being
2530 * processed from the sent queue, track the
2531 * next expected pseudo-cumack, or
2532 * rtx_pseudo_cumack, if required. Separate
2533 * cumack trackers for first transmissions,
2534 * and retransmissions.
2536 if ((tp1->whoTo->find_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
2537 (tp1->snd_count == 1)) {
2538 tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq;
2539 tp1->whoTo->find_pseudo_cumack = 0;
2541 if ((tp1->whoTo->find_rtx_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
2542 (tp1->snd_count > 1)) {
2543 tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq;
2544 tp1->whoTo->find_rtx_pseudo_cumack = 0;
2546 if (tp1->rec.data.TSN_seq == theTSN) {
2547 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2549 * must be held until
2552 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
2554 * If it is less than RESEND, it is
2555 * now no-longer in flight.
2556 * Higher values may already be set
2557 * via previous Gap Ack Blocks...
2558 * i.e. ACKED or RESEND.
2560 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2561 *biggest_newly_acked_tsn)) {
2562 *biggest_newly_acked_tsn = tp1->rec.data.TSN_seq;
2565 * CMT: SFR algo (and HTNA) - set
2566 * saw_newack to 1 for dest being
2567 * newly acked. update
2568 * this_sack_highest_newack if
2571 if (tp1->rec.data.chunk_was_revoked == 0)
2572 tp1->whoTo->saw_newack = 1;
2574 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2575 tp1->whoTo->this_sack_highest_newack)) {
2576 tp1->whoTo->this_sack_highest_newack =
2577 tp1->rec.data.TSN_seq;
2580 * CMT DAC algo: also update
2581 * this_sack_lowest_newack
2583 if (*this_sack_lowest_newack == 0) {
2584 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2585 sctp_log_sack(*this_sack_lowest_newack,
2587 tp1->rec.data.TSN_seq,
2590 SCTP_LOG_TSN_ACKED);
2592 *this_sack_lowest_newack = tp1->rec.data.TSN_seq;
2595 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
2596 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
2597 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
2598 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
2599 * Separate pseudo_cumack trackers for first transmissions and
2602 if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) {
2603 if (tp1->rec.data.chunk_was_revoked == 0) {
2604 tp1->whoTo->new_pseudo_cumack = 1;
2606 tp1->whoTo->find_pseudo_cumack = 1;
2608 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
2609 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
2611 if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) {
2612 if (tp1->rec.data.chunk_was_revoked == 0) {
2613 tp1->whoTo->new_pseudo_cumack = 1;
2615 tp1->whoTo->find_rtx_pseudo_cumack = 1;
2617 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2618 sctp_log_sack(*biggest_newly_acked_tsn,
2620 tp1->rec.data.TSN_seq,
2623 SCTP_LOG_TSN_ACKED);
2625 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
2626 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
2627 tp1->whoTo->flight_size,
2629 (uintptr_t) tp1->whoTo,
2630 tp1->rec.data.TSN_seq);
2632 sctp_flight_size_decrease(tp1);
2633 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
2634 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
2637 sctp_total_flight_decrease(stcb, tp1);
2639 tp1->whoTo->net_ack += tp1->send_size;
2640 if (tp1->snd_count < 2) {
2642 * True non-retransmited chunk
2644 tp1->whoTo->net_ack2 += tp1->send_size;
2652 sctp_calculate_rto(stcb,
2655 &tp1->sent_rcv_time,
2656 sctp_align_safe_nocopy,
2657 SCTP_RTT_FROM_DATA);
2660 if (tp1->whoTo->rto_needed == 0) {
2661 tp1->whoTo->rto_needed = 1;
2667 if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
2668 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2669 stcb->asoc.this_sack_highest_gap)) {
2670 stcb->asoc.this_sack_highest_gap =
2671 tp1->rec.data.TSN_seq;
2673 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
2674 sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
2675 #ifdef SCTP_AUDITING_ENABLED
2676 sctp_audit_log(0xB2,
2677 (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
2682 * All chunks NOT UNSENT fall through here and are marked
2683 * (leave PR-SCTP ones that are to skip alone though)
2685 if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
2686 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
2687 tp1->sent = SCTP_DATAGRAM_MARKED;
2689 if (tp1->rec.data.chunk_was_revoked) {
2690 /* deflate the cwnd */
2691 tp1->whoTo->cwnd -= tp1->book_size;
2692 tp1->rec.data.chunk_was_revoked = 0;
2694 /* NR Sack code here */
2696 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
2697 if (stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
2698 stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues--;
2701 panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
2704 tp1->sent = SCTP_DATAGRAM_NR_ACKED;
2710 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
2711 sctp_m_freem(tp1->data);
2718 } /* if (tp1->TSN_seq == theTSN) */
2719 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, theTSN)) {
2722 tp1 = TAILQ_NEXT(tp1, sctp_next);
2723 if ((tp1 == NULL) && (circled == 0)) {
2725 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2727 } /* end while (tp1) */
2730 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2732 /* In case the fragments were not in order we must reset */
2733 } /* end for (j = fragStart */
2735 return (wake_him); /* Return value only used for nr-sack */
2740 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
2741 uint32_t last_tsn, uint32_t * biggest_tsn_acked,
2742 uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack,
2743 int num_seg, int num_nr_seg, int *rto_ok)
2745 struct sctp_gap_ack_block *frag, block;
2746 struct sctp_tmit_chunk *tp1;
2751 uint16_t frag_strt, frag_end, prev_frag_end;
2753 tp1 = TAILQ_FIRST(&asoc->sent_queue);
2757 for (i = 0; i < (num_seg + num_nr_seg); i++) {
2760 tp1 = TAILQ_FIRST(&asoc->sent_queue);
2762 frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
2763 sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
2764 *offset += sizeof(block);
2766 return (chunk_freed);
2768 frag_strt = ntohs(frag->start);
2769 frag_end = ntohs(frag->end);
2771 if (frag_strt > frag_end) {
2772 /* This gap report is malformed, skip it. */
2775 if (frag_strt <= prev_frag_end) {
2776 /* This gap report is not in order, so restart. */
2777 tp1 = TAILQ_FIRST(&asoc->sent_queue);
2779 if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
2780 *biggest_tsn_acked = last_tsn + frag_end;
2787 if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
2788 non_revocable, &num_frs, biggest_newly_acked_tsn,
2789 this_sack_lowest_newack, rto_ok)) {
2792 prev_frag_end = frag_end;
2794 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
2796 sctp_log_fr(*biggest_tsn_acked,
2797 *biggest_newly_acked_tsn,
2798 last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
2800 return (chunk_freed);
2804 sctp_check_for_revoked(struct sctp_tcb *stcb,
2805 struct sctp_association *asoc, uint32_t cumack,
2806 uint32_t biggest_tsn_acked)
2808 struct sctp_tmit_chunk *tp1;
2810 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
2811 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cumack)) {
2813 * ok this guy is either ACK or MARKED. If it is
2814 * ACKED it has been previously acked but not this
2815 * time i.e. revoked. If it is MARKED it was ACK'ed
2818 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked)) {
2821 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
2822 /* it has been revoked */
2823 tp1->sent = SCTP_DATAGRAM_SENT;
2824 tp1->rec.data.chunk_was_revoked = 1;
2826 * We must add this stuff back in to assure
2827 * timers and such get started.
2829 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
2830 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
2831 tp1->whoTo->flight_size,
2833 (uintptr_t) tp1->whoTo,
2834 tp1->rec.data.TSN_seq);
2836 sctp_flight_size_increase(tp1);
2837 sctp_total_flight_increase(stcb, tp1);
2839 * We inflate the cwnd to compensate for our
2840 * artificial inflation of the flight_size.
2842 tp1->whoTo->cwnd += tp1->book_size;
2843 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2844 sctp_log_sack(asoc->last_acked_seq,
2846 tp1->rec.data.TSN_seq,
2849 SCTP_LOG_TSN_REVOKED);
2851 } else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
2852 /* it has been re-acked in this SACK */
2853 tp1->sent = SCTP_DATAGRAM_ACKED;
2856 if (tp1->sent == SCTP_DATAGRAM_UNSENT)
2863 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
2864 uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
2866 struct sctp_tmit_chunk *tp1;
2867 int strike_flag = 0;
2869 int tot_retrans = 0;
2870 uint32_t sending_seq;
2871 struct sctp_nets *net;
2872 int num_dests_sacked = 0;
2875 * select the sending_seq, this is either the next thing ready to be
2876 * sent but not transmitted, OR, the next seq we assign.
2878 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
2880 sending_seq = asoc->sending_seq;
2882 sending_seq = tp1->rec.data.TSN_seq;
2885 /* CMT DAC algo: finding out if SACK is a mixed SACK */
2886 if ((asoc->sctp_cmt_on_off > 0) &&
2887 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
2888 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
2889 if (net->saw_newack)
2893 if (stcb->asoc.peer_supports_prsctp) {
2894 (void)SCTP_GETTIME_TIMEVAL(&now);
2896 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
2898 if (tp1->no_fr_allowed) {
2899 /* this one had a timeout or something */
2902 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
2903 if (tp1->sent < SCTP_DATAGRAM_RESEND)
2904 sctp_log_fr(biggest_tsn_newly_acked,
2905 tp1->rec.data.TSN_seq,
2907 SCTP_FR_LOG_CHECK_STRIKE);
2909 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked) ||
2910 tp1->sent == SCTP_DATAGRAM_UNSENT) {
2914 if (stcb->asoc.peer_supports_prsctp) {
2915 if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
2916 /* Is it expired? */
2917 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
2918 /* Yes so drop it */
2919 if (tp1->data != NULL) {
2920 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
2921 SCTP_SO_NOT_LOCKED);
2927 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->this_sack_highest_gap)) {
2928 /* we are beyond the tsn in the sack */
2931 if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
2932 /* either a RESEND, ACKED, or MARKED */
2934 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
2935 /* Continue strikin FWD-TSN chunks */
2936 tp1->rec.data.fwd_tsn_cnt++;
2941 * CMT : SFR algo (covers part of DAC and HTNA as well)
2943 if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
2945 * No new acks were receieved for data sent to this
2946 * dest. Therefore, according to the SFR algo for
2947 * CMT, no data sent to this dest can be marked for
2948 * FR using this SACK.
2951 } else if (tp1->whoTo && SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2952 tp1->whoTo->this_sack_highest_newack)) {
2954 * CMT: New acks were receieved for data sent to
2955 * this dest. But no new acks were seen for data
2956 * sent after tp1. Therefore, according to the SFR
2957 * algo for CMT, tp1 cannot be marked for FR using
2958 * this SACK. This step covers part of the DAC algo
2959 * and the HTNA algo as well.
2964 * Here we check to see if we were have already done a FR
2965 * and if so we see if the biggest TSN we saw in the sack is
2966 * smaller than the recovery point. If so we don't strike
2967 * the tsn... otherwise we CAN strike the TSN.
2970 * @@@ JRI: Check for CMT if (accum_moved &&
2971 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
2974 if (accum_moved && asoc->fast_retran_loss_recovery) {
2976 * Strike the TSN if in fast-recovery and cum-ack
2979 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
2980 sctp_log_fr(biggest_tsn_newly_acked,
2981 tp1->rec.data.TSN_seq,
2983 SCTP_FR_LOG_STRIKE_CHUNK);
2985 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
2988 if ((asoc->sctp_cmt_on_off > 0) &&
2989 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
2991 * CMT DAC algorithm: If SACK flag is set to
2992 * 0, then lowest_newack test will not pass
2993 * because it would have been set to the
2994 * cumack earlier. If not already to be
2995 * rtx'd, If not a mixed sack and if tp1 is
2996 * not between two sacked TSNs, then mark by
2997 * one more. NOTE that we are marking by one
2998 * additional time since the SACK DAC flag
2999 * indicates that two packets have been
3000 * received after this missing TSN.
3002 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3003 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3004 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3005 sctp_log_fr(16 + num_dests_sacked,
3006 tp1->rec.data.TSN_seq,
3008 SCTP_FR_LOG_STRIKE_CHUNK);
3013 } else if ((tp1->rec.data.doing_fast_retransmit) &&
3014 (asoc->sctp_cmt_on_off == 0)) {
3016 * For those that have done a FR we must take
3017 * special consideration if we strike. I.e the
3018 * biggest_newly_acked must be higher than the
3019 * sending_seq at the time we did the FR.
3022 #ifdef SCTP_FR_TO_ALTERNATE
3024 * If FR's go to new networks, then we must only do
3025 * this for singly homed asoc's. However if the FR's
3026 * go to the same network (Armando's work) then its
3027 * ok to FR multiple times.
3035 if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3036 tp1->rec.data.fast_retran_tsn)) {
3038 * Strike the TSN, since this ack is
3039 * beyond where things were when we
3042 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3043 sctp_log_fr(biggest_tsn_newly_acked,
3044 tp1->rec.data.TSN_seq,
3046 SCTP_FR_LOG_STRIKE_CHUNK);
3048 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3052 if ((asoc->sctp_cmt_on_off > 0) &&
3053 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3055 * CMT DAC algorithm: If
3056 * SACK flag is set to 0,
3057 * then lowest_newack test
3058 * will not pass because it
3059 * would have been set to
3060 * the cumack earlier. If
3061 * not already to be rtx'd,
3062 * If not a mixed sack and
3063 * if tp1 is not between two
3064 * sacked TSNs, then mark by
3065 * one more. NOTE that we
3066 * are marking by one
3067 * additional time since the
3068 * SACK DAC flag indicates
3069 * that two packets have
3070 * been received after this
3073 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3074 (num_dests_sacked == 1) &&
3075 SCTP_TSN_GT(this_sack_lowest_newack,
3076 tp1->rec.data.TSN_seq)) {
3077 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3078 sctp_log_fr(32 + num_dests_sacked,
3079 tp1->rec.data.TSN_seq,
3081 SCTP_FR_LOG_STRIKE_CHUNK);
3083 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3091 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3094 } else if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
3095 biggest_tsn_newly_acked)) {
3097 * We don't strike these: This is the HTNA
3098 * algorithm i.e. we don't strike If our TSN is
3099 * larger than the Highest TSN Newly Acked.
3103 /* Strike the TSN */
3104 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3105 sctp_log_fr(biggest_tsn_newly_acked,
3106 tp1->rec.data.TSN_seq,
3108 SCTP_FR_LOG_STRIKE_CHUNK);
3110 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3113 if ((asoc->sctp_cmt_on_off > 0) &&
3114 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3116 * CMT DAC algorithm: If SACK flag is set to
3117 * 0, then lowest_newack test will not pass
3118 * because it would have been set to the
3119 * cumack earlier. If not already to be
3120 * rtx'd, If not a mixed sack and if tp1 is
3121 * not between two sacked TSNs, then mark by
3122 * one more. NOTE that we are marking by one
3123 * additional time since the SACK DAC flag
3124 * indicates that two packets have been
3125 * received after this missing TSN.
3127 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3128 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3129 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3130 sctp_log_fr(48 + num_dests_sacked,
3131 tp1->rec.data.TSN_seq,
3133 SCTP_FR_LOG_STRIKE_CHUNK);
3139 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3140 struct sctp_nets *alt;
3142 /* fix counts and things */
3143 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3144 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3145 (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3147 (uintptr_t) tp1->whoTo,
3148 tp1->rec.data.TSN_seq);
3151 tp1->whoTo->net_ack++;
3152 sctp_flight_size_decrease(tp1);
3153 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3154 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3158 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3159 sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3160 asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3162 /* add back to the rwnd */
3163 asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3165 /* remove from the total flight */
3166 sctp_total_flight_decrease(stcb, tp1);
3168 if ((stcb->asoc.peer_supports_prsctp) &&
3169 (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3171 * Has it been retransmitted tv_sec times? -
3172 * we store the retran count there.
3174 if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3175 /* Yes, so drop it */
3176 if (tp1->data != NULL) {
3177 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3178 SCTP_SO_NOT_LOCKED);
3180 /* Make sure to flag we had a FR */
3181 tp1->whoTo->net_ack++;
3186 * SCTP_PRINTF("OK, we are now ready to FR this
3189 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3190 sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count,
3194 /* This is a subsequent FR */
3195 SCTP_STAT_INCR(sctps_sendmultfastretrans);
3197 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3198 if (asoc->sctp_cmt_on_off > 0) {
3200 * CMT: Using RTX_SSTHRESH policy for CMT.
3201 * If CMT is being used, then pick dest with
3202 * largest ssthresh for any retransmission.
3204 tp1->no_fr_allowed = 1;
3206 /* sa_ignore NO_NULL_CHK */
3207 if (asoc->sctp_cmt_pf > 0) {
3209 * JRS 5/18/07 - If CMT PF is on,
3210 * use the PF version of
3213 alt = sctp_find_alternate_net(stcb, alt, 2);
3216 * JRS 5/18/07 - If only CMT is on,
3217 * use the CMT version of
3220 /* sa_ignore NO_NULL_CHK */
3221 alt = sctp_find_alternate_net(stcb, alt, 1);
3227 * CUCv2: If a different dest is picked for
3228 * the retransmission, then new
3229 * (rtx-)pseudo_cumack needs to be tracked
3230 * for orig dest. Let CUCv2 track new (rtx-)
3231 * pseudo-cumack always.
3234 tp1->whoTo->find_pseudo_cumack = 1;
3235 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3237 } else {/* CMT is OFF */
3239 #ifdef SCTP_FR_TO_ALTERNATE
3240 /* Can we find an alternate? */
3241 alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3244 * default behavior is to NOT retransmit
3245 * FR's to an alternate. Armando Caro's
3246 * paper details why.
3252 tp1->rec.data.doing_fast_retransmit = 1;
3254 /* mark the sending seq for possible subsequent FR's */
3256 * SCTP_PRINTF("Marking TSN for FR new value %x\n",
3257 * (uint32_t)tpi->rec.data.TSN_seq);
3259 if (TAILQ_EMPTY(&asoc->send_queue)) {
3261 * If the queue of send is empty then its
3262 * the next sequence number that will be
3263 * assigned so we subtract one from this to
3264 * get the one we last sent.
3266 tp1->rec.data.fast_retran_tsn = sending_seq;
3269 * If there are chunks on the send queue
3270 * (unsent data that has made it from the
3271 * stream queues but not out the door, we
3272 * take the first one (which will have the
3273 * lowest TSN) and subtract one to get the
3276 struct sctp_tmit_chunk *ttt;
3278 ttt = TAILQ_FIRST(&asoc->send_queue);
3279 tp1->rec.data.fast_retran_tsn =
3280 ttt->rec.data.TSN_seq;
3285 * this guy had a RTO calculation pending on
3288 if ((tp1->whoTo != NULL) &&
3289 (tp1->whoTo->rto_needed == 0)) {
3290 tp1->whoTo->rto_needed = 1;
3294 if (alt != tp1->whoTo) {
3295 /* yes, there is an alternate. */
3296 sctp_free_remote_addr(tp1->whoTo);
3297 /* sa_ignore FREED_MEMORY */
3299 atomic_add_int(&alt->ref_count, 1);
3305 struct sctp_tmit_chunk *
3306 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3307 struct sctp_association *asoc)
3309 struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3313 if (asoc->peer_supports_prsctp == 0) {
3316 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3317 if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3318 tp1->sent != SCTP_DATAGRAM_RESEND &&
3319 tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3320 /* no chance to advance, out of here */
3323 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3324 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3325 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3326 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3327 asoc->advanced_peer_ack_point,
3328 tp1->rec.data.TSN_seq, 0, 0);
3331 if (!PR_SCTP_ENABLED(tp1->flags)) {
3333 * We can't fwd-tsn past any that are reliable aka
3334 * retransmitted until the asoc fails.
3339 (void)SCTP_GETTIME_TIMEVAL(&now);
3343 * now we got a chunk which is marked for another
3344 * retransmission to a PR-stream but has run out its chances
3345 * already maybe OR has been marked to skip now. Can we skip
3346 * it if its a resend?
3348 if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3349 (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3351 * Now is this one marked for resend and its time is
3354 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3355 /* Yes so drop it */
3357 (void)sctp_release_pr_sctp_chunk(stcb, tp1,
3358 1, SCTP_SO_NOT_LOCKED);
3362 * No, we are done when hit one for resend
3363 * whos time as not expired.
3369 * Ok now if this chunk is marked to drop it we can clean up
3370 * the chunk, advance our peer ack point and we can check
3373 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3374 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3375 /* advance PeerAckPoint goes forward */
3376 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->advanced_peer_ack_point)) {
3377 asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq;
3379 } else if (tp1->rec.data.TSN_seq == asoc->advanced_peer_ack_point) {
3380 /* No update but we do save the chk */
3385 * If it is still in RESEND we can advance no
3395 sctp_fs_audit(struct sctp_association *asoc)
3397 struct sctp_tmit_chunk *chk;
3398 int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3399 int entry_flight, entry_cnt, ret;
3401 entry_flight = asoc->total_flight;
3402 entry_cnt = asoc->total_flight_count;
3405 if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3408 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3409 if (chk->sent < SCTP_DATAGRAM_RESEND) {
3410 SCTP_PRINTF("Chk TSN:%u size:%d inflight cnt:%d\n",
3411 chk->rec.data.TSN_seq,
3415 } else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3417 } else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3419 } else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3426 if ((inflight > 0) || (inbetween > 0)) {
3428 panic("Flight size-express incorrect? \n");
3430 SCTP_PRINTF("asoc->total_flight:%d cnt:%d\n",
3431 entry_flight, entry_cnt);
3433 SCTP_PRINTF("Flight size-express incorrect F:%d I:%d R:%d Ab:%d ACK:%d\n",
3434 inflight, inbetween, resend, above, acked);
3443 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3444 struct sctp_association *asoc,
3445 struct sctp_tmit_chunk *tp1)
3447 tp1->window_probe = 0;
3448 if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3449 /* TSN's skipped we do NOT move back. */
3450 sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3451 tp1->whoTo->flight_size,
3453 (uintptr_t) tp1->whoTo,
3454 tp1->rec.data.TSN_seq);
3457 /* First setup this by shrinking flight */
3458 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3459 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3462 sctp_flight_size_decrease(tp1);
3463 sctp_total_flight_decrease(stcb, tp1);
3464 /* Now mark for resend */
3465 tp1->sent = SCTP_DATAGRAM_RESEND;
3466 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3468 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3469 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3470 tp1->whoTo->flight_size,
3472 (uintptr_t) tp1->whoTo,
3473 tp1->rec.data.TSN_seq);
3478 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3479 uint32_t rwnd, int *abort_now, int ecne_seen)
3481 struct sctp_nets *net;
3482 struct sctp_association *asoc;
3483 struct sctp_tmit_chunk *tp1, *tp2;
3485 int win_probe_recovery = 0;
3486 int win_probe_recovered = 0;
3487 int j, done_once = 0;
3490 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3491 sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3492 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3494 SCTP_TCB_LOCK_ASSERT(stcb);
3495 #ifdef SCTP_ASOCLOG_OF_TSNS
3496 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3497 stcb->asoc.cumack_log_at++;
3498 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3499 stcb->asoc.cumack_log_at = 0;
3503 old_rwnd = asoc->peers_rwnd;
3504 if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3507 } else if (asoc->last_acked_seq == cumack) {
3508 /* Window update sack */
3509 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3510 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3511 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3512 /* SWS sender side engages */
3513 asoc->peers_rwnd = 0;
3515 if (asoc->peers_rwnd > old_rwnd) {
3520 /* First setup for CC stuff */
3521 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3522 if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
3523 /* Drag along the window_tsn for cwr's */
3524 net->cwr_window_tsn = cumack;
3526 net->prev_cwnd = net->cwnd;
3531 * CMT: Reset CUC and Fast recovery algo variables before
3534 net->new_pseudo_cumack = 0;
3535 net->will_exit_fast_recovery = 0;
3536 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
3537 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
3540 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
3543 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3544 tp1 = TAILQ_LAST(&asoc->sent_queue,
3545 sctpchunk_listhead);
3546 send_s = tp1->rec.data.TSN_seq + 1;
3548 send_s = asoc->sending_seq;
3550 if (SCTP_TSN_GE(cumack, send_s)) {
3552 struct mbuf *op_err;
3553 char msg[SCTP_DIAG_INFO_LEN];
3557 panic("Impossible sack 1");
3562 snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal then TSN %8.8x",
3564 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
3565 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
3566 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
3571 asoc->this_sack_highest_gap = cumack;
3572 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
3573 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
3574 stcb->asoc.overall_error_count,
3576 SCTP_FROM_SCTP_INDATA,
3579 stcb->asoc.overall_error_count = 0;
3580 if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
3581 /* process the new consecutive TSN first */
3582 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3583 if (SCTP_TSN_GE(cumack, tp1->rec.data.TSN_seq)) {
3584 if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
3585 SCTP_PRINTF("Warning, an unsent is now acked?\n");
3587 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
3589 * If it is less than ACKED, it is
3590 * now no-longer in flight. Higher
3591 * values may occur during marking
3593 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3594 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3595 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
3596 tp1->whoTo->flight_size,
3598 (uintptr_t) tp1->whoTo,
3599 tp1->rec.data.TSN_seq);
3601 sctp_flight_size_decrease(tp1);
3602 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3603 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3606 /* sa_ignore NO_NULL_CHK */
3607 sctp_total_flight_decrease(stcb, tp1);
3609 tp1->whoTo->net_ack += tp1->send_size;
3610 if (tp1->snd_count < 2) {
3612 * True non-retransmited
3615 tp1->whoTo->net_ack2 +=
3618 /* update RTO too? */
3627 sctp_calculate_rto(stcb,
3629 &tp1->sent_rcv_time,
3630 sctp_align_safe_nocopy,
3631 SCTP_RTT_FROM_DATA);
3634 if (tp1->whoTo->rto_needed == 0) {
3635 tp1->whoTo->rto_needed = 1;
3641 * CMT: CUCv2 algorithm. From the
3642 * cumack'd TSNs, for each TSN being
3643 * acked for the first time, set the
3644 * following variables for the
3645 * corresp destination.
3646 * new_pseudo_cumack will trigger a
3648 * find_(rtx_)pseudo_cumack will
3649 * trigger search for the next
3650 * expected (rtx-)pseudo-cumack.
3652 tp1->whoTo->new_pseudo_cumack = 1;
3653 tp1->whoTo->find_pseudo_cumack = 1;
3654 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3656 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3657 /* sa_ignore NO_NULL_CHK */
3658 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
3661 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3662 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
3664 if (tp1->rec.data.chunk_was_revoked) {
3665 /* deflate the cwnd */
3666 tp1->whoTo->cwnd -= tp1->book_size;
3667 tp1->rec.data.chunk_was_revoked = 0;
3669 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3670 if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
3671 asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--;
3674 panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
3678 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
3680 /* sa_ignore NO_NULL_CHK */
3681 sctp_free_bufspace(stcb, asoc, tp1, 1);
3682 sctp_m_freem(tp1->data);
3685 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3686 sctp_log_sack(asoc->last_acked_seq,
3688 tp1->rec.data.TSN_seq,
3691 SCTP_LOG_FREE_SENT);
3693 asoc->sent_queue_cnt--;
3694 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
3701 /* sa_ignore NO_NULL_CHK */
3702 if (stcb->sctp_socket) {
3703 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3707 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
3708 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
3709 /* sa_ignore NO_NULL_CHK */
3710 sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
3712 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3713 so = SCTP_INP_SO(stcb->sctp_ep);
3714 atomic_add_int(&stcb->asoc.refcnt, 1);
3715 SCTP_TCB_UNLOCK(stcb);
3716 SCTP_SOCKET_LOCK(so, 1);
3717 SCTP_TCB_LOCK(stcb);
3718 atomic_subtract_int(&stcb->asoc.refcnt, 1);
3719 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3720 /* assoc was freed while we were unlocked */
3721 SCTP_SOCKET_UNLOCK(so, 1);
3725 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
3726 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3727 SCTP_SOCKET_UNLOCK(so, 1);
3730 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
3731 sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
3735 /* JRS - Use the congestion control given in the CC module */
3736 if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
3737 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3738 if (net->net_ack2 > 0) {
3740 * Karn's rule applies to clearing error
3741 * count, this is optional.
3743 net->error_count = 0;
3744 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
3745 /* addr came good */
3746 net->dest_state |= SCTP_ADDR_REACHABLE;
3747 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
3748 0, (void *)net, SCTP_SO_NOT_LOCKED);
3750 if (net == stcb->asoc.primary_destination) {
3751 if (stcb->asoc.alternate) {
3753 * release the alternate,
3756 sctp_free_remote_addr(stcb->asoc.alternate);
3757 stcb->asoc.alternate = NULL;
3760 if (net->dest_state & SCTP_ADDR_PF) {
3761 net->dest_state &= ~SCTP_ADDR_PF;
3762 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_3);
3763 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
3764 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
3765 /* Done with this net */
3768 /* restore any doubled timers */
3769 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
3770 if (net->RTO < stcb->asoc.minrto) {
3771 net->RTO = stcb->asoc.minrto;
3773 if (net->RTO > stcb->asoc.maxrto) {
3774 net->RTO = stcb->asoc.maxrto;
3778 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
3780 asoc->last_acked_seq = cumack;
3782 if (TAILQ_EMPTY(&asoc->sent_queue)) {
3783 /* nothing left in-flight */
3784 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3785 net->flight_size = 0;
3786 net->partial_bytes_acked = 0;
3788 asoc->total_flight = 0;
3789 asoc->total_flight_count = 0;
3792 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3793 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3794 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3795 /* SWS sender side engages */
3796 asoc->peers_rwnd = 0;
3798 if (asoc->peers_rwnd > old_rwnd) {
3799 win_probe_recovery = 1;
3801 /* Now assure a timer where data is queued at */
3804 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3807 if (win_probe_recovery && (net->window_probe)) {
3808 win_probe_recovered = 1;
3810 * Find first chunk that was used with window probe
3811 * and clear the sent
3813 /* sa_ignore FREED_MEMORY */
3814 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3815 if (tp1->window_probe) {
3816 /* move back to data send queue */
3817 sctp_window_probe_recovery(stcb, asoc, tp1);
3822 if (net->RTO == 0) {
3823 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
3825 to_ticks = MSEC_TO_TICKS(net->RTO);
3827 if (net->flight_size) {
3829 (void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
3830 sctp_timeout_handler, &net->rxt_timer);
3831 if (net->window_probe) {
3832 net->window_probe = 0;
3835 if (net->window_probe) {
3837 * In window probes we must assure a timer
3838 * is still running there
3840 net->window_probe = 0;
3841 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
3842 SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
3843 sctp_timeout_handler, &net->rxt_timer);
3845 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
3846 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
3848 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
3853 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
3854 (asoc->sent_queue_retran_cnt == 0) &&
3855 (win_probe_recovered == 0) &&
3858 * huh, this should not happen unless all packets are
3859 * PR-SCTP and marked to skip of course.
3861 if (sctp_fs_audit(asoc)) {
3862 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3863 net->flight_size = 0;
3865 asoc->total_flight = 0;
3866 asoc->total_flight_count = 0;
3867 asoc->sent_queue_retran_cnt = 0;
3868 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3869 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3870 sctp_flight_size_increase(tp1);
3871 sctp_total_flight_increase(stcb, tp1);
3872 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3873 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3880 /**********************************/
3881 /* Now what about shutdown issues */
3882 /**********************************/
3883 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
3884 /* nothing left on sendqueue.. consider done */
3886 if ((asoc->stream_queue_cnt == 1) &&
3887 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
3888 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
3889 (asoc->locked_on_sending)
3891 struct sctp_stream_queue_pending *sp;
3894 * I may be in a state where we got all across.. but
3895 * cannot write more due to a shutdown... we abort
3896 * since the user did not indicate EOR in this case.
3897 * The sp will be cleaned during free of the asoc.
3899 sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
3901 if ((sp) && (sp->length == 0)) {
3902 /* Let cleanup code purge it */
3903 if (sp->msg_is_complete) {
3904 asoc->stream_queue_cnt--;
3906 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
3907 asoc->locked_on_sending = NULL;
3908 asoc->stream_queue_cnt--;
3912 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
3913 (asoc->stream_queue_cnt == 0)) {
3914 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
3915 /* Need to abort here */
3916 struct mbuf *op_err;
3921 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
3922 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
3923 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
3925 struct sctp_nets *netp;
3927 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
3928 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3929 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3931 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
3932 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
3933 sctp_stop_timers_for_shutdown(stcb);
3934 if (asoc->alternate) {
3935 netp = asoc->alternate;
3937 netp = asoc->primary_destination;
3939 sctp_send_shutdown(stcb, netp);
3940 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
3941 stcb->sctp_ep, stcb, netp);
3942 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
3943 stcb->sctp_ep, stcb, netp);
3945 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
3946 (asoc->stream_queue_cnt == 0)) {
3947 struct sctp_nets *netp;
3949 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
3952 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3953 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
3954 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
3955 sctp_stop_timers_for_shutdown(stcb);
3956 if (asoc->alternate) {
3957 netp = asoc->alternate;
3959 netp = asoc->primary_destination;
3961 sctp_send_shutdown_ack(stcb, netp);
3962 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
3963 stcb->sctp_ep, stcb, netp);
3966 /*********************************************/
3967 /* Here we perform PR-SCTP procedures */
3969 /*********************************************/
3970 /* C1. update advancedPeerAckPoint */
3971 if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
3972 asoc->advanced_peer_ack_point = cumack;
3974 /* PR-Sctp issues need to be addressed too */
3975 if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
3976 struct sctp_tmit_chunk *lchk;
3977 uint32_t old_adv_peer_ack_point;
3979 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
3980 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
3981 /* C3. See if we need to send a Fwd-TSN */
3982 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
3984 * ISSUE with ECN, see FWD-TSN processing.
3986 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
3987 send_forward_tsn(stcb, asoc);
3989 /* try to FR fwd-tsn's that get lost too */
3990 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
3991 send_forward_tsn(stcb, asoc);
3996 /* Assure a timer is up */
3997 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
3998 stcb->sctp_ep, stcb, lchk->whoTo);
4001 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4002 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4004 stcb->asoc.peers_rwnd,
4005 stcb->asoc.total_flight,
4006 stcb->asoc.total_output_queue_size);
4011 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4012 struct sctp_tcb *stcb,
4013 uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4014 int *abort_now, uint8_t flags,
4015 uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4017 struct sctp_association *asoc;
4018 struct sctp_tmit_chunk *tp1, *tp2;
4019 uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4020 uint16_t wake_him = 0;
4021 uint32_t send_s = 0;
4023 int accum_moved = 0;
4024 int will_exit_fast_recovery = 0;
4025 uint32_t a_rwnd, old_rwnd;
4026 int win_probe_recovery = 0;
4027 int win_probe_recovered = 0;
4028 struct sctp_nets *net = NULL;
4031 uint8_t reneged_all = 0;
4032 uint8_t cmt_dac_flag;
4035 * we take any chance we can to service our queues since we cannot
4036 * get awoken when the socket is read from :<
4039 * Now perform the actual SACK handling: 1) Verify that it is not an
4040 * old sack, if so discard. 2) If there is nothing left in the send
4041 * queue (cum-ack is equal to last acked) then you have a duplicate
4042 * too, update any rwnd change and verify no timers are running.
4043 * then return. 3) Process any new consequtive data i.e. cum-ack
4044 * moved process these first and note that it moved. 4) Process any
4045 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4046 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4047 * sync up flightsizes and things, stop all timers and also check
4048 * for shutdown_pending state. If so then go ahead and send off the
4049 * shutdown. If in shutdown recv, send off the shutdown-ack and
4050 * start that timer, Ret. 9) Strike any non-acked things and do FR
4051 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4052 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4053 * if in shutdown_recv state.
4055 SCTP_TCB_LOCK_ASSERT(stcb);
4057 this_sack_lowest_newack = 0;
4058 SCTP_STAT_INCR(sctps_slowpath_sack);
4060 cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4061 #ifdef SCTP_ASOCLOG_OF_TSNS
4062 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4063 stcb->asoc.cumack_log_at++;
4064 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4065 stcb->asoc.cumack_log_at = 0;
4070 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4071 sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4072 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4074 old_rwnd = stcb->asoc.peers_rwnd;
4075 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4076 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4077 stcb->asoc.overall_error_count,
4079 SCTP_FROM_SCTP_INDATA,
4082 stcb->asoc.overall_error_count = 0;
4084 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4085 sctp_log_sack(asoc->last_acked_seq,
4092 if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4094 uint32_t *dupdata, dblock;
4096 for (i = 0; i < num_dup; i++) {
4097 dupdata = (uint32_t *) sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4098 sizeof(uint32_t), (uint8_t *) & dblock);
4099 if (dupdata == NULL) {
4102 sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4105 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4107 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4108 tp1 = TAILQ_LAST(&asoc->sent_queue,
4109 sctpchunk_listhead);
4110 send_s = tp1->rec.data.TSN_seq + 1;
4113 send_s = asoc->sending_seq;
4115 if (SCTP_TSN_GE(cum_ack, send_s)) {
4116 struct mbuf *op_err;
4117 char msg[SCTP_DIAG_INFO_LEN];
4120 * no way, we have not even sent this TSN out yet.
4121 * Peer is hopelessly messed up with us.
4123 SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4126 SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1:%p\n",
4127 tp1->rec.data.TSN_seq, (void *)tp1);
4132 snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal then TSN %8.8x",
4134 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4135 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4136 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4140 /**********************/
4141 /* 1) check the range */
4142 /**********************/
4143 if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4144 /* acking something behind */
4147 /* update the Rwnd of the peer */
4148 if (TAILQ_EMPTY(&asoc->sent_queue) &&
4149 TAILQ_EMPTY(&asoc->send_queue) &&
4150 (asoc->stream_queue_cnt == 0)) {
4151 /* nothing left on send/sent and strmq */
4152 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4153 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4154 asoc->peers_rwnd, 0, 0, a_rwnd);
4156 asoc->peers_rwnd = a_rwnd;
4157 if (asoc->sent_queue_retran_cnt) {
4158 asoc->sent_queue_retran_cnt = 0;
4160 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4161 /* SWS sender side engages */
4162 asoc->peers_rwnd = 0;
4164 /* stop any timers */
4165 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4166 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4167 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4168 net->partial_bytes_acked = 0;
4169 net->flight_size = 0;
4171 asoc->total_flight = 0;
4172 asoc->total_flight_count = 0;
4176 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4177 * things. The total byte count acked is tracked in netAckSz AND
4178 * netAck2 is used to track the total bytes acked that are un-
4179 * amibguious and were never retransmitted. We track these on a per
4180 * destination address basis.
4182 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4183 if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4184 /* Drag along the window_tsn for cwr's */
4185 net->cwr_window_tsn = cum_ack;
4187 net->prev_cwnd = net->cwnd;
4192 * CMT: Reset CUC and Fast recovery algo variables before
4195 net->new_pseudo_cumack = 0;
4196 net->will_exit_fast_recovery = 0;
4197 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4198 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4201 /* process the new consecutive TSN first */
4202 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4203 if (SCTP_TSN_GE(last_tsn, tp1->rec.data.TSN_seq)) {
4204 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4206 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4208 * If it is less than ACKED, it is
4209 * now no-longer in flight. Higher
4210 * values may occur during marking
4212 if ((tp1->whoTo->dest_state &
4213 SCTP_ADDR_UNCONFIRMED) &&
4214 (tp1->snd_count < 2)) {
4216 * If there was no retran
4217 * and the address is
4218 * un-confirmed and we sent
4220 * sacked.. its confirmed,
4223 tp1->whoTo->dest_state &=
4224 ~SCTP_ADDR_UNCONFIRMED;
4226 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4227 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4228 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4229 tp1->whoTo->flight_size,
4231 (uintptr_t) tp1->whoTo,
4232 tp1->rec.data.TSN_seq);
4234 sctp_flight_size_decrease(tp1);
4235 sctp_total_flight_decrease(stcb, tp1);
4236 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4237 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4241 tp1->whoTo->net_ack += tp1->send_size;
4243 /* CMT SFR and DAC algos */
4244 this_sack_lowest_newack = tp1->rec.data.TSN_seq;
4245 tp1->whoTo->saw_newack = 1;
4247 if (tp1->snd_count < 2) {
4249 * True non-retransmited
4252 tp1->whoTo->net_ack2 +=
4255 /* update RTO too? */
4259 sctp_calculate_rto(stcb,
4261 &tp1->sent_rcv_time,
4262 sctp_align_safe_nocopy,
4263 SCTP_RTT_FROM_DATA);
4266 if (tp1->whoTo->rto_needed == 0) {
4267 tp1->whoTo->rto_needed = 1;
4273 * CMT: CUCv2 algorithm. From the
4274 * cumack'd TSNs, for each TSN being
4275 * acked for the first time, set the
4276 * following variables for the
4277 * corresp destination.
4278 * new_pseudo_cumack will trigger a
4280 * find_(rtx_)pseudo_cumack will
4281 * trigger search for the next
4282 * expected (rtx-)pseudo-cumack.
4284 tp1->whoTo->new_pseudo_cumack = 1;
4285 tp1->whoTo->find_pseudo_cumack = 1;
4286 tp1->whoTo->find_rtx_pseudo_cumack = 1;
4289 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4290 sctp_log_sack(asoc->last_acked_seq,
4292 tp1->rec.data.TSN_seq,
4295 SCTP_LOG_TSN_ACKED);
4297 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4298 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
4301 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4302 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4303 #ifdef SCTP_AUDITING_ENABLED
4304 sctp_audit_log(0xB3,
4305 (asoc->sent_queue_retran_cnt & 0x000000ff));
4308 if (tp1->rec.data.chunk_was_revoked) {
4309 /* deflate the cwnd */
4310 tp1->whoTo->cwnd -= tp1->book_size;
4311 tp1->rec.data.chunk_was_revoked = 0;
4313 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4314 tp1->sent = SCTP_DATAGRAM_ACKED;
4321 biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4322 /* always set this up to cum-ack */
4323 asoc->this_sack_highest_gap = last_tsn;
4325 if ((num_seg > 0) || (num_nr_seg > 0)) {
4328 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4329 * to be greater than the cumack. Also reset saw_newack to 0
4332 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4333 net->saw_newack = 0;
4334 net->this_sack_highest_newack = last_tsn;
4338 * thisSackHighestGap will increase while handling NEW
4339 * segments this_sack_highest_newack will increase while
4340 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4341 * used for CMT DAC algo. saw_newack will also change.
4343 if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4344 &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4345 num_seg, num_nr_seg, &rto_ok)) {
4348 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4350 * validate the biggest_tsn_acked in the gap acks if
4351 * strict adherence is wanted.
4353 if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4355 * peer is either confused or we are under
4356 * attack. We must abort.
4358 SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4359 biggest_tsn_acked, send_s);
4364 /*******************************************/
4365 /* cancel ALL T3-send timer if accum moved */
4366 /*******************************************/
4367 if (asoc->sctp_cmt_on_off > 0) {
4368 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4369 if (net->new_pseudo_cumack)
4370 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4372 SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4377 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4378 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4379 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
4383 /********************************************/
4384 /* drop the acked chunks from the sentqueue */
4385 /********************************************/
4386 asoc->last_acked_seq = cum_ack;
4388 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4389 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cum_ack)) {
4392 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4393 if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
4394 asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--;
4397 panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
4401 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4402 if (PR_SCTP_ENABLED(tp1->flags)) {
4403 if (asoc->pr_sctp_cnt != 0)
4404 asoc->pr_sctp_cnt--;
4406 asoc->sent_queue_cnt--;
4408 /* sa_ignore NO_NULL_CHK */
4409 sctp_free_bufspace(stcb, asoc, tp1, 1);
4410 sctp_m_freem(tp1->data);
4412 if (asoc->peer_supports_prsctp && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4413 asoc->sent_queue_cnt_removeable--;
4416 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4417 sctp_log_sack(asoc->last_acked_seq,
4419 tp1->rec.data.TSN_seq,
4422 SCTP_LOG_FREE_SENT);
4424 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4427 if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4429 panic("Warning flight size is postive and should be 0");
4431 SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4432 asoc->total_flight);
4434 asoc->total_flight = 0;
4436 /* sa_ignore NO_NULL_CHK */
4437 if ((wake_him) && (stcb->sctp_socket)) {
4438 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4442 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4443 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4444 sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
4446 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4447 so = SCTP_INP_SO(stcb->sctp_ep);
4448 atomic_add_int(&stcb->asoc.refcnt, 1);
4449 SCTP_TCB_UNLOCK(stcb);
4450 SCTP_SOCKET_LOCK(so, 1);
4451 SCTP_TCB_LOCK(stcb);
4452 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4453 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4454 /* assoc was freed while we were unlocked */
4455 SCTP_SOCKET_UNLOCK(so, 1);
4459 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4460 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4461 SCTP_SOCKET_UNLOCK(so, 1);
4464 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4465 sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
4469 if (asoc->fast_retran_loss_recovery && accum_moved) {
4470 if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4471 /* Setup so we will exit RFC2582 fast recovery */
4472 will_exit_fast_recovery = 1;
4476 * Check for revoked fragments:
4478 * if Previous sack - Had no frags then we can't have any revoked if
4479 * Previous sack - Had frag's then - If we now have frags aka
4480 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4481 * some of them. else - The peer revoked all ACKED fragments, since
4482 * we had some before and now we have NONE.
4486 sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4487 asoc->saw_sack_with_frags = 1;
4488 } else if (asoc->saw_sack_with_frags) {
4489 int cnt_revoked = 0;
4491 /* Peer revoked all dg's marked or acked */
4492 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4493 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4494 tp1->sent = SCTP_DATAGRAM_SENT;
4495 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4496 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4497 tp1->whoTo->flight_size,
4499 (uintptr_t) tp1->whoTo,
4500 tp1->rec.data.TSN_seq);
4502 sctp_flight_size_increase(tp1);
4503 sctp_total_flight_increase(stcb, tp1);
4504 tp1->rec.data.chunk_was_revoked = 1;
4506 * To ensure that this increase in
4507 * flightsize, which is artificial, does not
4508 * throttle the sender, we also increase the
4509 * cwnd artificially.
4511 tp1->whoTo->cwnd += tp1->book_size;
4518 asoc->saw_sack_with_frags = 0;
4521 asoc->saw_sack_with_nr_frags = 1;
4523 asoc->saw_sack_with_nr_frags = 0;
4525 /* JRS - Use the congestion control given in the CC module */
4526 if (ecne_seen == 0) {
4527 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4528 if (net->net_ack2 > 0) {
4530 * Karn's rule applies to clearing error
4531 * count, this is optional.
4533 net->error_count = 0;
4534 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4535 /* addr came good */
4536 net->dest_state |= SCTP_ADDR_REACHABLE;
4537 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4538 0, (void *)net, SCTP_SO_NOT_LOCKED);
4540 if (net == stcb->asoc.primary_destination) {
4541 if (stcb->asoc.alternate) {
4543 * release the alternate,
4546 sctp_free_remote_addr(stcb->asoc.alternate);
4547 stcb->asoc.alternate = NULL;
4550 if (net->dest_state & SCTP_ADDR_PF) {
4551 net->dest_state &= ~SCTP_ADDR_PF;
4552 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_3);
4553 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4554 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4555 /* Done with this net */
4558 /* restore any doubled timers */
4559 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4560 if (net->RTO < stcb->asoc.minrto) {
4561 net->RTO = stcb->asoc.minrto;
4563 if (net->RTO > stcb->asoc.maxrto) {
4564 net->RTO = stcb->asoc.maxrto;
4568 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4570 if (TAILQ_EMPTY(&asoc->sent_queue)) {
4571 /* nothing left in-flight */
4572 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4573 /* stop all timers */
4574 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4575 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4576 net->flight_size = 0;
4577 net->partial_bytes_acked = 0;
4579 asoc->total_flight = 0;
4580 asoc->total_flight_count = 0;
4582 /**********************************/
4583 /* Now what about shutdown issues */
4584 /**********************************/
4585 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4586 /* nothing left on sendqueue.. consider done */
4587 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4588 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4589 asoc->peers_rwnd, 0, 0, a_rwnd);
4591 asoc->peers_rwnd = a_rwnd;
4592 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4593 /* SWS sender side engages */
4594 asoc->peers_rwnd = 0;
4597 if ((asoc->stream_queue_cnt == 1) &&
4598 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4599 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4600 (asoc->locked_on_sending)
4602 struct sctp_stream_queue_pending *sp;
4605 * I may be in a state where we got all across.. but
4606 * cannot write more due to a shutdown... we abort
4607 * since the user did not indicate EOR in this case.
4609 sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4611 if ((sp) && (sp->length == 0)) {
4612 asoc->locked_on_sending = NULL;
4613 if (sp->msg_is_complete) {
4614 asoc->stream_queue_cnt--;
4616 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4617 asoc->stream_queue_cnt--;
4621 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4622 (asoc->stream_queue_cnt == 0)) {
4623 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4624 /* Need to abort here */
4625 struct mbuf *op_err;
4630 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4631 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31;
4632 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4635 struct sctp_nets *netp;
4637 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4638 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4639 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4641 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4642 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4643 sctp_stop_timers_for_shutdown(stcb);
4644 if (asoc->alternate) {
4645 netp = asoc->alternate;
4647 netp = asoc->primary_destination;
4649 sctp_send_shutdown(stcb, netp);
4650 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4651 stcb->sctp_ep, stcb, netp);
4652 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4653 stcb->sctp_ep, stcb, netp);
4656 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4657 (asoc->stream_queue_cnt == 0)) {
4658 struct sctp_nets *netp;
4660 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4663 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4664 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4665 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4666 sctp_stop_timers_for_shutdown(stcb);
4667 if (asoc->alternate) {
4668 netp = asoc->alternate;
4670 netp = asoc->primary_destination;
4672 sctp_send_shutdown_ack(stcb, netp);
4673 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4674 stcb->sctp_ep, stcb, netp);
4679 * Now here we are going to recycle net_ack for a different use...
4682 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4687 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
4688 * to be done. Setting this_sack_lowest_newack to the cum_ack will
4689 * automatically ensure that.
4691 if ((asoc->sctp_cmt_on_off > 0) &&
4692 SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
4693 (cmt_dac_flag == 0)) {
4694 this_sack_lowest_newack = cum_ack;
4696 if ((num_seg > 0) || (num_nr_seg > 0)) {
4697 sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
4698 biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
4700 /* JRS - Use the congestion control given in the CC module */
4701 asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
4703 /* Now are we exiting loss recovery ? */
4704 if (will_exit_fast_recovery) {
4705 /* Ok, we must exit fast recovery */
4706 asoc->fast_retran_loss_recovery = 0;
4708 if ((asoc->sat_t3_loss_recovery) &&
4709 SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
4710 /* end satellite t3 loss recovery */
4711 asoc->sat_t3_loss_recovery = 0;
4716 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4717 if (net->will_exit_fast_recovery) {
4718 /* Ok, we must exit fast recovery */
4719 net->fast_retran_loss_recovery = 0;
4723 /* Adjust and set the new rwnd value */
4724 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4725 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4726 asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
4728 asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
4729 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4730 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4731 /* SWS sender side engages */
4732 asoc->peers_rwnd = 0;
4734 if (asoc->peers_rwnd > old_rwnd) {
4735 win_probe_recovery = 1;
4738 * Now we must setup so we have a timer up for anyone with
4744 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4745 if (win_probe_recovery && (net->window_probe)) {
4746 win_probe_recovered = 1;
4748 * Find first chunk that was used with
4749 * window probe and clear the event. Put
4750 * it back into the send queue as if has
4753 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4754 if (tp1->window_probe) {
4755 sctp_window_probe_recovery(stcb, asoc, tp1);
4760 if (net->flight_size) {
4762 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4763 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4764 stcb->sctp_ep, stcb, net);
4766 if (net->window_probe) {
4767 net->window_probe = 0;
4770 if (net->window_probe) {
4772 * In window probes we must assure a timer
4773 * is still running there
4775 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4776 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4777 stcb->sctp_ep, stcb, net);
4780 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4781 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4783 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4788 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4789 (asoc->sent_queue_retran_cnt == 0) &&
4790 (win_probe_recovered == 0) &&
4793 * huh, this should not happen unless all packets are
4794 * PR-SCTP and marked to skip of course.
4796 if (sctp_fs_audit(asoc)) {
4797 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4798 net->flight_size = 0;
4800 asoc->total_flight = 0;
4801 asoc->total_flight_count = 0;
4802 asoc->sent_queue_retran_cnt = 0;
4803 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4804 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4805 sctp_flight_size_increase(tp1);
4806 sctp_total_flight_increase(stcb, tp1);
4807 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4808 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4815 /*********************************************/
4816 /* Here we perform PR-SCTP procedures */
4818 /*********************************************/
4819 /* C1. update advancedPeerAckPoint */
4820 if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
4821 asoc->advanced_peer_ack_point = cum_ack;
4823 /* C2. try to further move advancedPeerAckPoint ahead */
4824 if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
4825 struct sctp_tmit_chunk *lchk;
4826 uint32_t old_adv_peer_ack_point;
4828 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4829 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4830 /* C3. See if we need to send a Fwd-TSN */
4831 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
4833 * ISSUE with ECN, see FWD-TSN processing.
4835 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
4836 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
4837 0xee, cum_ack, asoc->advanced_peer_ack_point,
4838 old_adv_peer_ack_point);
4840 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4841 send_forward_tsn(stcb, asoc);
4843 /* try to FR fwd-tsn's that get lost too */
4844 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4845 send_forward_tsn(stcb, asoc);
4850 /* Assure a timer is up */
4851 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4852 stcb->sctp_ep, stcb, lchk->whoTo);
4855 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4856 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4858 stcb->asoc.peers_rwnd,
4859 stcb->asoc.total_flight,
4860 stcb->asoc.total_output_queue_size);
4865 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
4868 uint32_t cum_ack, a_rwnd;
4870 cum_ack = ntohl(cp->cumulative_tsn_ack);
4871 /* Arrange so a_rwnd does NOT change */
4872 a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
4874 /* Now call the express sack handling */
4875 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
4879 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
4880 struct sctp_stream_in *strmin)
4882 struct sctp_queued_to_read *ctl, *nctl;
4883 struct sctp_association *asoc;
4887 tt = strmin->last_sequence_delivered;
4889 * First deliver anything prior to and including the stream no that
4892 TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next, nctl) {
4893 if (SCTP_SSN_GE(tt, ctl->sinfo_ssn)) {
4894 /* this is deliverable now */
4895 TAILQ_REMOVE(&strmin->inqueue, ctl, next);
4896 /* subtract pending on streams */
4897 asoc->size_on_all_streams -= ctl->length;
4898 sctp_ucount_decr(asoc->cnt_on_all_streams);
4899 /* deliver it to at least the delivery-q */
4900 if (stcb->sctp_socket) {
4901 sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
4902 sctp_add_to_readq(stcb->sctp_ep, stcb,
4904 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
4907 /* no more delivery now. */
4912 * now we must deliver things in queue the normal way if any are
4915 tt = strmin->last_sequence_delivered + 1;
4916 TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next, nctl) {
4917 if (tt == ctl->sinfo_ssn) {
4918 /* this is deliverable now */
4919 TAILQ_REMOVE(&strmin->inqueue, ctl, next);
4920 /* subtract pending on streams */
4921 asoc->size_on_all_streams -= ctl->length;
4922 sctp_ucount_decr(asoc->cnt_on_all_streams);
4923 /* deliver it to at least the delivery-q */
4924 strmin->last_sequence_delivered = ctl->sinfo_ssn;
4925 if (stcb->sctp_socket) {
4926 sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
4927 sctp_add_to_readq(stcb->sctp_ep, stcb,
4929 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
4932 tt = strmin->last_sequence_delivered + 1;
4940 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
4941 struct sctp_association *asoc,
4942 uint16_t stream, uint16_t seq)
4944 struct sctp_tmit_chunk *chk, *nchk;
4946 /* For each one on here see if we need to toss it */
4948 * For now large messages held on the reasmqueue that are complete
4949 * will be tossed too. We could in theory do more work to spin
4950 * through and stop after dumping one msg aka seeing the start of a
4951 * new msg at the head, and call the delivery function... to see if
4952 * it can be delivered... But for now we just dump everything on the
4955 TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
4957 * Do not toss it if on a different stream or marked for
4958 * unordered delivery in which case the stream sequence
4959 * number has no meaning.
4961 if ((chk->rec.data.stream_number != stream) ||
4962 ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == SCTP_DATA_UNORDERED)) {
4965 if (chk->rec.data.stream_seq == seq) {
4966 /* It needs to be tossed */
4967 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
4968 if (SCTP_TSN_GT(chk->rec.data.TSN_seq, asoc->tsn_last_delivered)) {
4969 asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
4970 asoc->str_of_pdapi = chk->rec.data.stream_number;
4971 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
4972 asoc->fragment_flags = chk->rec.data.rcv_flags;
4974 asoc->size_on_reasm_queue -= chk->send_size;
4975 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
4977 /* Clear up any stream problem */
4978 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != SCTP_DATA_UNORDERED &&
4979 SCTP_SSN_GT(chk->rec.data.stream_seq, asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered)) {
4981 * We must dump forward this streams
4982 * sequence number if the chunk is not
4983 * unordered that is being skipped. There is
4984 * a chance that if the peer does not
4985 * include the last fragment in its FWD-TSN
4986 * we WILL have a problem here since you
4987 * would have a partial chunk in queue that
4988 * may not be deliverable. Also if a Partial
4989 * delivery API as started the user may get
4990 * a partial chunk. The next read returning
4991 * a new chunk... really ugly but I see no
4992 * way around it! Maybe a notify??
4994 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = chk->rec.data.stream_seq;
4997 sctp_m_freem(chk->data);
5000 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5001 } else if (SCTP_SSN_GT(chk->rec.data.stream_seq, seq)) {
5003 * If the stream_seq is > than the purging one, we
5013 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5014 struct sctp_forward_tsn_chunk *fwd,
5015 int *abort_flag, struct mbuf *m, int offset)
5017 /* The pr-sctp fwd tsn */
5019 * here we will perform all the data receiver side steps for
5020 * processing FwdTSN, as required in by pr-sctp draft:
5022 * Assume we get FwdTSN(x):
5024 * 1) update local cumTSN to x 2) try to further advance cumTSN to x +
5025 * others we have 3) examine and update re-ordering queue on
5026 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5027 * report where we are.
5029 struct sctp_association *asoc;
5030 uint32_t new_cum_tsn, gap;
5031 unsigned int i, fwd_sz, m_size;
5033 struct sctp_stream_in *strm;
5034 struct sctp_tmit_chunk *chk, *nchk;
5035 struct sctp_queued_to_read *ctl, *sv;
5038 if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5039 SCTPDBG(SCTP_DEBUG_INDATA1,
5040 "Bad size too small/big fwd-tsn\n");
5043 m_size = (stcb->asoc.mapping_array_size << 3);
5044 /*************************************************************/
5045 /* 1. Here we update local cumTSN and shift the bitmap array */
5046 /*************************************************************/
5047 new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5049 if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5050 /* Already got there ... */
5054 * now we know the new TSN is more advanced, let's find the actual
5057 SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5058 asoc->cumulative_tsn = new_cum_tsn;
5059 if (gap >= m_size) {
5060 if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5061 struct mbuf *op_err;
5062 char msg[SCTP_DIAG_INFO_LEN];
5065 * out of range (of single byte chunks in the rwnd I
5066 * give out). This must be an attacker.
5069 snprintf(msg, sizeof(msg),
5070 "New cum ack %8.8x too high, highest TSN %8.8x",
5071 new_cum_tsn, asoc->highest_tsn_inside_map);
5072 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
5073 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
5074 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5077 SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5079 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5080 asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5081 asoc->highest_tsn_inside_map = new_cum_tsn;
5083 memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5084 asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5086 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5087 sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5090 SCTP_TCB_LOCK_ASSERT(stcb);
5091 for (i = 0; i <= gap; i++) {
5092 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5093 !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5094 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5095 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5096 asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5101 /*************************************************************/
5102 /* 2. Clear up re-assembly queue */
5103 /*************************************************************/
5105 * First service it if pd-api is up, just in case we can progress it
5108 if (asoc->fragmented_delivery_inprogress) {
5109 sctp_service_reassembly(stcb, asoc);
5111 /* For each one on here see if we need to toss it */
5113 * For now large messages held on the reasmqueue that are complete
5114 * will be tossed too. We could in theory do more work to spin
5115 * through and stop after dumping one msg aka seeing the start of a
5116 * new msg at the head, and call the delivery function... to see if
5117 * it can be delivered... But for now we just dump everything on the
5120 TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
5121 if (SCTP_TSN_GE(new_cum_tsn, chk->rec.data.TSN_seq)) {
5122 /* It needs to be tossed */
5123 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5124 if (SCTP_TSN_GT(chk->rec.data.TSN_seq, asoc->tsn_last_delivered)) {
5125 asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
5126 asoc->str_of_pdapi = chk->rec.data.stream_number;
5127 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
5128 asoc->fragment_flags = chk->rec.data.rcv_flags;
5130 asoc->size_on_reasm_queue -= chk->send_size;
5131 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5133 /* Clear up any stream problem */
5134 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != SCTP_DATA_UNORDERED &&
5135 SCTP_SSN_GT(chk->rec.data.stream_seq, asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered)) {
5137 * We must dump forward this streams
5138 * sequence number if the chunk is not
5139 * unordered that is being skipped. There is
5140 * a chance that if the peer does not
5141 * include the last fragment in its FWD-TSN
5142 * we WILL have a problem here since you
5143 * would have a partial chunk in queue that
5144 * may not be deliverable. Also if a Partial
5145 * delivery API as started the user may get
5146 * a partial chunk. The next read returning
5147 * a new chunk... really ugly but I see no
5148 * way around it! Maybe a notify??
5150 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = chk->rec.data.stream_seq;
5153 sctp_m_freem(chk->data);
5156 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5159 * Ok we have gone beyond the end of the fwd-tsn's
5165 /*******************************************************/
5166 /* 3. Update the PR-stream re-ordering queues and fix */
5167 /* delivery issues as needed. */
5168 /*******************************************************/
5169 fwd_sz -= sizeof(*fwd);
5172 unsigned int num_str;
5173 struct sctp_strseq *stseq, strseqbuf;
5175 offset += sizeof(*fwd);
5177 SCTP_INP_READ_LOCK(stcb->sctp_ep);
5178 num_str = fwd_sz / sizeof(struct sctp_strseq);
5179 for (i = 0; i < num_str; i++) {
5182 stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5183 sizeof(struct sctp_strseq),
5184 (uint8_t *) & strseqbuf);
5185 offset += sizeof(struct sctp_strseq);
5186 if (stseq == NULL) {
5190 st = ntohs(stseq->stream);
5192 st = ntohs(stseq->sequence);
5193 stseq->sequence = st;
5198 * Ok we now look for the stream/seq on the read
5199 * queue where its not all delivered. If we find it
5200 * we transmute the read entry into a PDI_ABORTED.
5202 if (stseq->stream >= asoc->streamincnt) {
5203 /* screwed up streams, stop! */
5206 if ((asoc->str_of_pdapi == stseq->stream) &&
5207 (asoc->ssn_of_pdapi == stseq->sequence)) {
5209 * If this is the one we were partially
5210 * delivering now then we no longer are.
5211 * Note this will change with the reassembly
5214 asoc->fragmented_delivery_inprogress = 0;
5216 sctp_flush_reassm_for_str_seq(stcb, asoc, stseq->stream, stseq->sequence);
5217 TAILQ_FOREACH(ctl, &stcb->sctp_ep->read_queue, next) {
5218 if ((ctl->sinfo_stream == stseq->stream) &&
5219 (ctl->sinfo_ssn == stseq->sequence)) {
5220 str_seq = (stseq->stream << 16) | stseq->sequence;
5222 ctl->pdapi_aborted = 1;
5223 sv = stcb->asoc.control_pdapi;
5224 stcb->asoc.control_pdapi = ctl;
5225 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5227 SCTP_PARTIAL_DELIVERY_ABORTED,
5229 SCTP_SO_NOT_LOCKED);
5230 stcb->asoc.control_pdapi = sv;
5232 } else if ((ctl->sinfo_stream == stseq->stream) &&
5233 SCTP_SSN_GT(ctl->sinfo_ssn, stseq->sequence)) {
5234 /* We are past our victim SSN */
5238 strm = &asoc->strmin[stseq->stream];
5239 if (SCTP_SSN_GT(stseq->sequence, strm->last_sequence_delivered)) {
5240 /* Update the sequence number */
5241 strm->last_sequence_delivered = stseq->sequence;
5243 /* now kick the stream the new way */
5244 /* sa_ignore NO_NULL_CHK */
5245 sctp_kick_prsctp_reorder_queue(stcb, strm);
5247 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5250 * Now slide thing forward.
5252 sctp_slide_mapping_arrays(stcb);
5254 if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
5255 /* now lets kick out and check for more fragmented delivery */
5256 /* sa_ignore NO_NULL_CHK */
5257 sctp_deliver_reasm_check(stcb, &stcb->asoc);