2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
9 * a) Redistributions of source code must retain the above copyright notice,
10 * this list of conditions and the following disclaimer.
12 * b) Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the distribution.
16 * c) Neither the name of Cisco Systems, Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_var.h>
38 #include <netinet/sctp_sysctl.h>
39 #include <netinet/sctp_pcb.h>
40 #include <netinet/sctp_header.h>
41 #include <netinet/sctputil.h>
42 #include <netinet/sctp_output.h>
43 #include <netinet/sctp_input.h>
44 #include <netinet/sctp_indata.h>
45 #include <netinet/sctp_uio.h>
46 #include <netinet/sctp_timer.h>
50 * NOTES: On the outbound side of things I need to check the sack timer to
51 * see if I should generate a sack into the chunk queue (if I have data to
52 * send that is and will be sending it .. for bundling.
54 * The callback in sctp_usrreq.c will get called when the socket is read from.
55 * This will cause sctp_service_queues() to get called on the top entry in
60 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
62 asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
65 /* Calculate what the rwnd would be */
67 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
72 * This is really set wrong with respect to a 1-2-m socket. Since
73 * the sb_cc is the count that everyone as put up. When we re-write
74 * sctp_soreceive then we will fix this so that ONLY this
75 * associations data is taken into account.
77 if (stcb->sctp_socket == NULL)
80 if (stcb->asoc.sb_cc == 0 &&
81 asoc->size_on_reasm_queue == 0 &&
82 asoc->size_on_all_streams == 0) {
83 /* Full rwnd granted */
84 calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
87 /* get actual space */
88 calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
91 * take out what has NOT been put on socket queue and we yet hold
94 calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_reasm_queue +
95 asoc->cnt_on_reasm_queue * MSIZE));
96 calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_all_streams +
97 asoc->cnt_on_all_streams * MSIZE));
103 /* what is the overhead of all these rwnd's */
104 calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
106 * If the window gets too small due to ctrl-stuff, reduce it to 1,
107 * even it is 0. SWS engaged
109 if (calc < stcb->asoc.my_rwnd_control_len) {
118 * Build out our readq entry based on the incoming packet.
120 struct sctp_queued_to_read *
121 sctp_build_readq_entry(struct sctp_tcb *stcb,
122 struct sctp_nets *net,
123 uint32_t tsn, uint32_t ppid,
124 uint32_t context, uint16_t stream_no,
125 uint16_t stream_seq, uint8_t flags,
128 struct sctp_queued_to_read *read_queue_e = NULL;
130 sctp_alloc_a_readq(stcb, read_queue_e);
131 if (read_queue_e == NULL) {
134 read_queue_e->sinfo_stream = stream_no;
135 read_queue_e->sinfo_ssn = stream_seq;
136 read_queue_e->sinfo_flags = (flags << 8);
137 read_queue_e->sinfo_ppid = ppid;
138 read_queue_e->sinfo_context = context;
139 read_queue_e->sinfo_timetolive = 0;
140 read_queue_e->sinfo_tsn = tsn;
141 read_queue_e->sinfo_cumtsn = tsn;
142 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
143 read_queue_e->whoFrom = net;
144 read_queue_e->length = 0;
145 atomic_add_int(&net->ref_count, 1);
146 read_queue_e->data = dm;
147 read_queue_e->spec_flags = 0;
148 read_queue_e->tail_mbuf = NULL;
149 read_queue_e->aux_data = NULL;
150 read_queue_e->stcb = stcb;
151 read_queue_e->port_from = stcb->rport;
152 read_queue_e->do_not_ref_stcb = 0;
153 read_queue_e->end_added = 0;
154 read_queue_e->some_taken = 0;
155 read_queue_e->pdapi_aborted = 0;
157 return (read_queue_e);
162 * Build out our readq entry based on the incoming packet.
164 static struct sctp_queued_to_read *
165 sctp_build_readq_entry_chk(struct sctp_tcb *stcb,
166 struct sctp_tmit_chunk *chk)
168 struct sctp_queued_to_read *read_queue_e = NULL;
170 sctp_alloc_a_readq(stcb, read_queue_e);
171 if (read_queue_e == NULL) {
174 read_queue_e->sinfo_stream = chk->rec.data.stream_number;
175 read_queue_e->sinfo_ssn = chk->rec.data.stream_seq;
176 read_queue_e->sinfo_flags = (chk->rec.data.rcv_flags << 8);
177 read_queue_e->sinfo_ppid = chk->rec.data.payloadtype;
178 read_queue_e->sinfo_context = stcb->asoc.context;
179 read_queue_e->sinfo_timetolive = 0;
180 read_queue_e->sinfo_tsn = chk->rec.data.TSN_seq;
181 read_queue_e->sinfo_cumtsn = chk->rec.data.TSN_seq;
182 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
183 read_queue_e->whoFrom = chk->whoTo;
184 read_queue_e->aux_data = NULL;
185 read_queue_e->length = 0;
186 atomic_add_int(&chk->whoTo->ref_count, 1);
187 read_queue_e->data = chk->data;
188 read_queue_e->tail_mbuf = NULL;
189 read_queue_e->stcb = stcb;
190 read_queue_e->port_from = stcb->rport;
191 read_queue_e->spec_flags = 0;
192 read_queue_e->do_not_ref_stcb = 0;
193 read_queue_e->end_added = 0;
194 read_queue_e->some_taken = 0;
195 read_queue_e->pdapi_aborted = 0;
197 return (read_queue_e);
202 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
204 struct sctp_extrcvinfo *seinfo;
205 struct sctp_sndrcvinfo *outinfo;
206 struct sctp_rcvinfo *rcvinfo;
207 struct sctp_nxtinfo *nxtinfo;
214 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
215 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
216 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
217 /* user does not want any ancillary data */
221 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
222 len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
224 seinfo = (struct sctp_extrcvinfo *)sinfo;
225 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
226 (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
228 len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
232 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
233 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
235 len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
238 len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
244 ret = sctp_get_mbuf_for_msg(len, 0, M_DONTWAIT, 1, MT_DATA);
249 SCTP_BUF_LEN(ret) = 0;
251 /* We need a CMSG header followed by the struct */
252 cmh = mtod(ret, struct cmsghdr *);
253 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
254 cmh->cmsg_level = IPPROTO_SCTP;
255 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
256 cmh->cmsg_type = SCTP_RCVINFO;
257 rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
258 rcvinfo->rcv_sid = sinfo->sinfo_stream;
259 rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
260 rcvinfo->rcv_flags = sinfo->sinfo_flags;
261 rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
262 rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
263 rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
264 rcvinfo->rcv_context = sinfo->sinfo_context;
265 rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
266 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
267 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
270 cmh->cmsg_level = IPPROTO_SCTP;
271 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
272 cmh->cmsg_type = SCTP_NXTINFO;
273 nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
274 nxtinfo->nxt_sid = seinfo->sreinfo_next_stream;
275 nxtinfo->nxt_flags = 0;
276 if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
277 nxtinfo->nxt_flags |= SCTP_UNORDERED;
279 if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
280 nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
282 if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
283 nxtinfo->nxt_flags |= SCTP_COMPLETE;
285 nxtinfo->nxt_ppid = seinfo->sreinfo_next_ppid;
286 nxtinfo->nxt_length = seinfo->sreinfo_next_length;
287 nxtinfo->nxt_assoc_id = seinfo->sreinfo_next_aid;
288 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
289 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
291 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
292 cmh->cmsg_level = IPPROTO_SCTP;
293 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
295 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
296 cmh->cmsg_type = SCTP_EXTRCV;
297 memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
298 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
300 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
301 cmh->cmsg_type = SCTP_SNDRCV;
303 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
311 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
313 uint32_t gap, i, cumackp1;
316 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
319 cumackp1 = asoc->cumulative_tsn + 1;
320 if (SCTP_TSN_GT(cumackp1, tsn)) {
322 * this tsn is behind the cum ack and thus we don't need to
323 * worry about it being moved from one to the other.
327 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
328 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
329 SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn);
330 sctp_print_mapping_array(asoc);
332 panic("Things are really messed up now!!");
335 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
336 SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
337 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
338 asoc->highest_tsn_inside_nr_map = tsn;
340 if (tsn == asoc->highest_tsn_inside_map) {
341 /* We must back down to see what the new highest is */
342 for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
343 SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
344 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
345 asoc->highest_tsn_inside_map = i;
351 asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
358 * We are delivering currently from the reassembly queue. We must continue to
359 * deliver until we either: 1) run out of space. 2) run out of sequential
360 * TSN's 3) hit the SCTP_DATA_LAST_FRAG flag.
363 sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc)
365 struct sctp_tmit_chunk *chk, *nchk;
370 struct sctp_queued_to_read *control, *ctl, *nctl;
375 cntDel = stream_no = 0;
376 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
377 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) ||
378 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
379 /* socket above is long gone or going.. */
381 asoc->fragmented_delivery_inprogress = 0;
382 TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
383 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
384 asoc->size_on_reasm_queue -= chk->send_size;
385 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
387 * Lose the data pointer, since its in the socket
391 sctp_m_freem(chk->data);
394 /* Now free the address and data */
395 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
396 /* sa_ignore FREED_MEMORY */
400 SCTP_TCB_LOCK_ASSERT(stcb);
401 TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
402 if (chk->rec.data.TSN_seq != (asoc->tsn_last_delivered + 1)) {
403 /* Can't deliver more :< */
406 stream_no = chk->rec.data.stream_number;
407 nxt_todel = asoc->strmin[stream_no].last_sequence_delivered + 1;
408 if (nxt_todel != chk->rec.data.stream_seq &&
409 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
411 * Not the next sequence to deliver in its stream OR
416 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
418 control = sctp_build_readq_entry_chk(stcb, chk);
419 if (control == NULL) {
423 /* save it off for our future deliveries */
424 stcb->asoc.control_pdapi = control;
425 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
429 sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
430 sctp_add_to_readq(stcb->sctp_ep,
431 stcb, control, &stcb->sctp_socket->so_rcv, end,
432 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
435 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
439 sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
440 if (sctp_append_to_readq(stcb->sctp_ep, stcb,
441 stcb->asoc.control_pdapi,
442 chk->data, end, chk->rec.data.TSN_seq,
443 &stcb->sctp_socket->so_rcv)) {
445 * something is very wrong, either
446 * control_pdapi is NULL, or the tail_mbuf
447 * is corrupt, or there is a EOM already on
450 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
454 if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
455 panic("This should not happen control_pdapi NULL?");
457 /* if we did not panic, it was a EOM */
458 panic("Bad chunking ??");
460 if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
461 SCTP_PRINTF("This should not happen control_pdapi NULL?\n");
463 SCTP_PRINTF("Bad chunking ??\n");
464 SCTP_PRINTF("Dumping re-assembly queue this will probably hose the association\n");
472 /* pull it we did it */
473 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
474 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
475 asoc->fragmented_delivery_inprogress = 0;
476 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
477 asoc->strmin[stream_no].last_sequence_delivered++;
479 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
480 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
482 } else if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
484 * turn the flag back on since we just delivered
487 asoc->fragmented_delivery_inprogress = 1;
489 asoc->tsn_of_pdapi_last_delivered = chk->rec.data.TSN_seq;
490 asoc->last_flags_delivered = chk->rec.data.rcv_flags;
491 asoc->last_strm_seq_delivered = chk->rec.data.stream_seq;
492 asoc->last_strm_no_delivered = chk->rec.data.stream_number;
494 asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
495 asoc->size_on_reasm_queue -= chk->send_size;
496 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
497 /* free up the chk */
499 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
501 if (asoc->fragmented_delivery_inprogress == 0) {
503 * Now lets see if we can deliver the next one on
506 struct sctp_stream_in *strm;
508 strm = &asoc->strmin[stream_no];
509 nxt_todel = strm->last_sequence_delivered + 1;
510 TAILQ_FOREACH_SAFE(ctl, &strm->inqueue, next, nctl) {
511 /* Deliver more if we can. */
512 if (nxt_todel == ctl->sinfo_ssn) {
513 TAILQ_REMOVE(&strm->inqueue, ctl, next);
514 asoc->size_on_all_streams -= ctl->length;
515 sctp_ucount_decr(asoc->cnt_on_all_streams);
516 strm->last_sequence_delivered++;
517 sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
518 sctp_add_to_readq(stcb->sctp_ep, stcb,
520 &stcb->sctp_socket->so_rcv, 1,
521 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
525 nxt_todel = strm->last_sequence_delivered + 1;
533 * Queue the chunk either right into the socket buffer if it is the next one
534 * to go OR put it in the correct place in the delivery queue. If we do
535 * append to the so_buf, keep doing so until we are out of order. One big
536 * question still remains, what to do when the socket buffer is FULL??
539 sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc,
540 struct sctp_queued_to_read *control, int *abort_flag)
543 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
544 * all the data in one stream this could happen quite rapidly. One
545 * could use the TSN to keep track of things, but this scheme breaks
546 * down in the other type of stream useage that could occur. Send a
547 * single msg to stream 0, send 4Billion messages to stream 1, now
548 * send a message to stream 0. You have a situation where the TSN
549 * has wrapped but not in the stream. Is this worth worrying about
550 * or should we just change our queue sort at the bottom to be by
553 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2
554 * with TSN 1? If the peer is doing some sort of funky TSN/SSN
555 * assignment this could happen... and I don't see how this would be
556 * a violation. So for now I am undecided an will leave the sort by
557 * SSN alone. Maybe a hybred approach is the answer
560 struct sctp_stream_in *strm;
561 struct sctp_queued_to_read *at;
565 char msg[SCTP_DIAG_INFO_LEN];
568 asoc->size_on_all_streams += control->length;
569 sctp_ucount_incr(asoc->cnt_on_all_streams);
570 strm = &asoc->strmin[control->sinfo_stream];
571 nxt_todel = strm->last_sequence_delivered + 1;
572 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
573 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
575 SCTPDBG(SCTP_DEBUG_INDATA1,
576 "queue to stream called for ssn:%u lastdel:%u nxt:%u\n",
577 (uint32_t) control->sinfo_stream,
578 (uint32_t) strm->last_sequence_delivered,
579 (uint32_t) nxt_todel);
580 if (SCTP_SSN_GE(strm->last_sequence_delivered, control->sinfo_ssn)) {
581 /* The incoming sseq is behind where we last delivered? */
582 SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ:%d delivered:%d from peer, Abort association\n",
583 control->sinfo_ssn, strm->last_sequence_delivered);
586 * throw it in the stream so it gets cleaned up in
587 * association destruction
589 TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
590 snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
591 strm->last_sequence_delivered, control->sinfo_tsn,
592 control->sinfo_stream, control->sinfo_ssn);
593 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
594 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
595 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
600 if (nxt_todel == control->sinfo_ssn) {
601 /* can be delivered right away? */
602 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
603 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
605 /* EY it wont be queued if it could be delivered directly */
607 asoc->size_on_all_streams -= control->length;
608 sctp_ucount_decr(asoc->cnt_on_all_streams);
609 strm->last_sequence_delivered++;
611 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
612 sctp_add_to_readq(stcb->sctp_ep, stcb,
614 &stcb->sctp_socket->so_rcv, 1,
615 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
616 TAILQ_FOREACH_SAFE(control, &strm->inqueue, next, at) {
618 nxt_todel = strm->last_sequence_delivered + 1;
619 if (nxt_todel == control->sinfo_ssn) {
620 TAILQ_REMOVE(&strm->inqueue, control, next);
621 asoc->size_on_all_streams -= control->length;
622 sctp_ucount_decr(asoc->cnt_on_all_streams);
623 strm->last_sequence_delivered++;
625 * We ignore the return of deliver_data here
626 * since we always can hold the chunk on the
627 * d-queue. And we have a finite number that
628 * can be delivered from the strq.
630 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
631 sctp_log_strm_del(control, NULL,
632 SCTP_STR_LOG_FROM_IMMED_DEL);
634 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
635 sctp_add_to_readq(stcb->sctp_ep, stcb,
637 &stcb->sctp_socket->so_rcv, 1,
638 SCTP_READ_LOCK_NOT_HELD,
647 * Ok, we did not deliver this guy, find the correct place
648 * to put it on the queue.
650 if (SCTP_TSN_GE(asoc->cumulative_tsn, control->sinfo_tsn)) {
653 if (TAILQ_EMPTY(&strm->inqueue)) {
655 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
656 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INSERT_HD);
658 TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
660 TAILQ_FOREACH(at, &strm->inqueue, next) {
661 if (SCTP_SSN_GT(at->sinfo_ssn, control->sinfo_ssn)) {
663 * one in queue is bigger than the
664 * new one, insert before this one
666 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
667 sctp_log_strm_del(control, at,
668 SCTP_STR_LOG_FROM_INSERT_MD);
670 TAILQ_INSERT_BEFORE(at, control, next);
672 } else if (at->sinfo_ssn == control->sinfo_ssn) {
674 * Gak, He sent me a duplicate str
678 * foo bar, I guess I will just free
679 * this new guy, should we abort
680 * too? FIX ME MAYBE? Or it COULD be
681 * that the SSN's have wrapped.
682 * Maybe I should compare to TSN
683 * somehow... sigh for now just blow
688 sctp_m_freem(control->data);
689 control->data = NULL;
690 asoc->size_on_all_streams -= control->length;
691 sctp_ucount_decr(asoc->cnt_on_all_streams);
692 if (control->whoFrom) {
693 sctp_free_remote_addr(control->whoFrom);
694 control->whoFrom = NULL;
696 sctp_free_a_readq(stcb, control);
699 if (TAILQ_NEXT(at, next) == NULL) {
701 * We are at the end, insert
704 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
705 sctp_log_strm_del(control, at,
706 SCTP_STR_LOG_FROM_INSERT_TL);
708 TAILQ_INSERT_AFTER(&strm->inqueue,
719 * Returns two things: You get the total size of the deliverable parts of the
720 * first fragmented message on the reassembly queue. And you get a 1 back if
721 * all of the message is ready or a 0 back if the message is still incomplete
724 sctp_is_all_msg_on_reasm(struct sctp_association *asoc, uint32_t * t_size)
726 struct sctp_tmit_chunk *chk;
730 chk = TAILQ_FIRST(&asoc->reasmqueue);
732 /* nothing on the queue */
735 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
736 /* Not a first on the queue */
739 tsn = chk->rec.data.TSN_seq;
740 TAILQ_FOREACH(chk, &asoc->reasmqueue, sctp_next) {
741 if (tsn != chk->rec.data.TSN_seq) {
744 *t_size += chk->send_size;
745 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
754 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc)
756 struct sctp_tmit_chunk *chk;
758 uint32_t tsize, pd_point;
761 chk = TAILQ_FIRST(&asoc->reasmqueue);
764 asoc->size_on_reasm_queue = 0;
765 asoc->cnt_on_reasm_queue = 0;
768 if (asoc->fragmented_delivery_inprogress == 0) {
770 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
771 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
772 (nxt_todel == chk->rec.data.stream_seq ||
773 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
775 * Yep the first one is here and its ok to deliver
778 if (stcb->sctp_socket) {
779 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
780 stcb->sctp_ep->partial_delivery_point);
782 pd_point = stcb->sctp_ep->partial_delivery_point;
784 if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
786 * Yes, we setup to start reception, by
787 * backing down the TSN just in case we
788 * can't deliver. If we
790 asoc->fragmented_delivery_inprogress = 1;
791 asoc->tsn_last_delivered =
792 chk->rec.data.TSN_seq - 1;
794 chk->rec.data.stream_number;
795 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
796 asoc->pdapi_ppid = chk->rec.data.payloadtype;
797 asoc->fragment_flags = chk->rec.data.rcv_flags;
798 sctp_service_reassembly(stcb, asoc);
803 * Service re-assembly will deliver stream data queued at
804 * the end of fragmented delivery.. but it wont know to go
805 * back and call itself again... we do that here with the
808 sctp_service_reassembly(stcb, asoc);
809 if (asoc->fragmented_delivery_inprogress == 0) {
811 * finished our Fragmented delivery, could be more
820 * Dump onto the re-assembly queue, in its proper place. After dumping on the
821 * queue, see if anthing can be delivered. If so pull it off (or as much as
822 * we can. If we run out of space then we must dump what we can and set the
823 * appropriate flag to say we queued what we could.
826 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
827 struct sctp_tmit_chunk *chk, int *abort_flag)
830 char msg[SCTP_DIAG_INFO_LEN];
831 uint32_t cum_ackp1, prev_tsn, post_tsn;
832 struct sctp_tmit_chunk *at, *prev, *next;
835 cum_ackp1 = asoc->tsn_last_delivered + 1;
836 if (TAILQ_EMPTY(&asoc->reasmqueue)) {
837 /* This is the first one on the queue */
838 TAILQ_INSERT_HEAD(&asoc->reasmqueue, chk, sctp_next);
840 * we do not check for delivery of anything when only one
843 asoc->size_on_reasm_queue = chk->send_size;
844 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
845 if (chk->rec.data.TSN_seq == cum_ackp1) {
846 if (asoc->fragmented_delivery_inprogress == 0 &&
847 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) !=
848 SCTP_DATA_FIRST_FRAG) {
850 * An empty queue, no delivery inprogress,
851 * we hit the next one and it does NOT have
852 * a FIRST fragment mark.
854 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not first, no fragmented delivery in progress\n");
855 snprintf(msg, sizeof(msg),
856 "Expected B-bit for TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
857 chk->rec.data.TSN_seq,
858 chk->rec.data.stream_number,
859 chk->rec.data.stream_seq);
860 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
861 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
862 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
864 } else if (asoc->fragmented_delivery_inprogress &&
865 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
867 * We are doing a partial delivery and the
868 * NEXT chunk MUST be either the LAST or
869 * MIDDLE fragment NOT a FIRST
871 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS a first and fragmented delivery in progress\n");
872 snprintf(msg, sizeof(msg),
873 "Didn't expect B-bit for TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
874 chk->rec.data.TSN_seq,
875 chk->rec.data.stream_number,
876 chk->rec.data.stream_seq);
877 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
878 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
879 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
881 } else if (asoc->fragmented_delivery_inprogress) {
883 * Here we are ok with a MIDDLE or LAST
886 if (chk->rec.data.stream_number !=
887 asoc->str_of_pdapi) {
888 /* Got to be the right STR No */
889 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream number %d vs %d\n",
890 chk->rec.data.stream_number,
892 snprintf(msg, sizeof(msg),
893 "Expected SID=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
895 chk->rec.data.TSN_seq,
896 chk->rec.data.stream_number,
897 chk->rec.data.stream_seq);
898 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
899 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_4;
900 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
902 } else if ((asoc->fragment_flags & SCTP_DATA_UNORDERED) !=
903 SCTP_DATA_UNORDERED &&
904 chk->rec.data.stream_seq != asoc->ssn_of_pdapi) {
905 /* Got to be the right STR Seq */
906 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream seq %d vs %d\n",
907 chk->rec.data.stream_seq,
909 snprintf(msg, sizeof(msg),
910 "Expected SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
912 chk->rec.data.TSN_seq,
913 chk->rec.data.stream_number,
914 chk->rec.data.stream_seq);
915 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
916 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_5;
917 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
925 TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
926 if (SCTP_TSN_GT(at->rec.data.TSN_seq, chk->rec.data.TSN_seq)) {
928 * one in queue is bigger than the new one, insert
932 asoc->size_on_reasm_queue += chk->send_size;
933 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
935 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
937 } else if (at->rec.data.TSN_seq == chk->rec.data.TSN_seq) {
938 /* Gak, He sent me a duplicate str seq number */
940 * foo bar, I guess I will just free this new guy,
941 * should we abort too? FIX ME MAYBE? Or it COULD be
942 * that the SSN's have wrapped. Maybe I should
943 * compare to TSN somehow... sigh for now just blow
947 sctp_m_freem(chk->data);
950 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
954 if (TAILQ_NEXT(at, sctp_next) == NULL) {
956 * We are at the end, insert it after this
960 asoc->size_on_reasm_queue += chk->send_size;
961 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
962 TAILQ_INSERT_AFTER(&asoc->reasmqueue, at, chk, sctp_next);
969 prev_tsn = chk->rec.data.TSN_seq - 1;
970 if (prev_tsn == prev->rec.data.TSN_seq) {
972 * Ok the one I am dropping onto the end is the
973 * NEXT. A bit of valdiation here.
975 if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
976 SCTP_DATA_FIRST_FRAG ||
977 (prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
978 SCTP_DATA_MIDDLE_FRAG) {
980 * Insert chk MUST be a MIDDLE or LAST
983 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
984 SCTP_DATA_FIRST_FRAG) {
985 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - It can be a midlle or last but not a first\n");
986 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it's a FIRST!\n");
987 snprintf(msg, sizeof(msg),
988 "Can't handle B-bit, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
989 chk->rec.data.TSN_seq,
990 chk->rec.data.stream_number,
991 chk->rec.data.stream_seq);
992 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
993 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_6;
994 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
998 if (chk->rec.data.stream_number !=
999 prev->rec.data.stream_number) {
1001 * Huh, need the correct STR here,
1002 * they must be the same.
1004 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, sid:%d not the same as at:%d\n",
1005 chk->rec.data.stream_number,
1006 prev->rec.data.stream_number);
1007 snprintf(msg, sizeof(msg),
1008 "Expect SID=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1009 prev->rec.data.stream_number,
1010 chk->rec.data.TSN_seq,
1011 chk->rec.data.stream_number,
1012 chk->rec.data.stream_seq);
1013 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1014 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_7;
1015 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1019 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) !=
1020 (prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED)) {
1022 * Huh, need the same ordering here,
1023 * they must be the same.
1025 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, U-bit not constant\n");
1026 snprintf(msg, sizeof(msg),
1027 "Expect U-bit=%d for TSN=%8.8x, got U-bit=%d",
1028 (prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) ? 1 : 0,
1029 chk->rec.data.TSN_seq,
1030 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) ? 1 : 0);
1031 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1032 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_7;
1033 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1037 if ((prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1038 chk->rec.data.stream_seq !=
1039 prev->rec.data.stream_seq) {
1041 * Huh, need the correct STR here,
1042 * they must be the same.
1044 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1045 chk->rec.data.stream_seq,
1046 prev->rec.data.stream_seq);
1047 snprintf(msg, sizeof(msg),
1048 "Expect SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1049 prev->rec.data.stream_seq,
1050 chk->rec.data.TSN_seq,
1051 chk->rec.data.stream_number,
1052 chk->rec.data.stream_seq);
1053 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1054 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_8;
1055 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1059 } else if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1060 SCTP_DATA_LAST_FRAG) {
1061 /* Insert chk MUST be a FIRST */
1062 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1063 SCTP_DATA_FIRST_FRAG) {
1064 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, evil plot, its not FIRST and it must be!\n");
1065 snprintf(msg, sizeof(msg),
1066 "Expect B-bit, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1067 chk->rec.data.TSN_seq,
1068 chk->rec.data.stream_number,
1069 chk->rec.data.stream_seq);
1070 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1071 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_9;
1072 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1080 post_tsn = chk->rec.data.TSN_seq + 1;
1081 if (post_tsn == next->rec.data.TSN_seq) {
1083 * Ok the one I am inserting ahead of is my NEXT
1084 * one. A bit of valdiation here.
1086 if (next->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1087 /* Insert chk MUST be a last fragment */
1088 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK)
1089 != SCTP_DATA_LAST_FRAG) {
1090 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is FIRST, we must be LAST\n");
1091 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not a last!\n");
1092 snprintf(msg, sizeof(msg),
1093 "Expect only E-bit, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1094 chk->rec.data.TSN_seq,
1095 chk->rec.data.stream_number,
1096 chk->rec.data.stream_seq);
1097 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1098 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_10;
1099 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1103 } else if ((next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1104 SCTP_DATA_MIDDLE_FRAG ||
1105 (next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1106 SCTP_DATA_LAST_FRAG) {
1108 * Insert chk CAN be MIDDLE or FIRST NOT
1111 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1112 SCTP_DATA_LAST_FRAG) {
1113 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is a MIDDLE/LAST\n");
1114 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, new prev chunk is a LAST\n");
1115 snprintf(msg, sizeof(msg),
1116 "Didn't expect E-bit, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1117 chk->rec.data.TSN_seq,
1118 chk->rec.data.stream_number,
1119 chk->rec.data.stream_seq);
1120 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1121 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_11;
1122 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1126 if (chk->rec.data.stream_number !=
1127 next->rec.data.stream_number) {
1129 * Huh, need the correct STR here,
1130 * they must be the same.
1132 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1133 chk->rec.data.stream_number,
1134 next->rec.data.stream_number);
1135 snprintf(msg, sizeof(msg),
1136 "Required SID %4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1137 next->rec.data.stream_number,
1138 chk->rec.data.TSN_seq,
1139 chk->rec.data.stream_number,
1140 chk->rec.data.stream_seq);
1141 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1142 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_12;
1143 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1147 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) !=
1148 (next->rec.data.rcv_flags & SCTP_DATA_UNORDERED)) {
1150 * Huh, need the same ordering here,
1151 * they must be the same.
1153 SCTPDBG(SCTP_DEBUG_INDATA1, "Next check - Gak, Evil plot, U-bit not constant\n");
1154 snprintf(msg, sizeof(msg),
1155 "Expect U-bit=%d for TSN=%8.8x, got U-bit=%d",
1156 (next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) ? 1 : 0,
1157 chk->rec.data.TSN_seq,
1158 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) ? 1 : 0);
1159 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1160 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_12;
1161 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1165 if ((next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1166 chk->rec.data.stream_seq !=
1167 next->rec.data.stream_seq) {
1169 * Huh, need the correct STR here,
1170 * they must be the same.
1172 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1173 chk->rec.data.stream_seq,
1174 next->rec.data.stream_seq);
1175 snprintf(msg, sizeof(msg),
1176 "Required SSN %4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1177 next->rec.data.stream_seq,
1178 chk->rec.data.TSN_seq,
1179 chk->rec.data.stream_number,
1180 chk->rec.data.stream_seq);
1181 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1182 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_13;
1183 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1190 /* Do we need to do some delivery? check */
1191 sctp_deliver_reasm_check(stcb, asoc);
1195 * This is an unfortunate routine. It checks to make sure a evil guy is not
1196 * stuffing us full of bad packet fragments. A broken peer could also do this
1197 * but this is doubtful. It is to bad I must worry about evil crackers sigh
1201 sctp_does_tsn_belong_to_reasm(struct sctp_association *asoc,
1204 struct sctp_tmit_chunk *at;
1207 TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1208 if (SCTP_TSN_GT(TSN_seq, at->rec.data.TSN_seq)) {
1209 /* is it one bigger? */
1210 tsn_est = at->rec.data.TSN_seq + 1;
1211 if (tsn_est == TSN_seq) {
1212 /* yep. It better be a last then */
1213 if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1214 SCTP_DATA_LAST_FRAG) {
1216 * Ok this guy belongs next to a guy
1217 * that is NOT last, it should be a
1218 * middle/last, not a complete
1224 * This guy is ok since its a LAST
1225 * and the new chunk is a fully
1226 * self- contained one.
1231 } else if (TSN_seq == at->rec.data.TSN_seq) {
1232 /* Software error since I have a dup? */
1236 * Ok, 'at' is larger than new chunk but does it
1237 * need to be right before it.
1239 tsn_est = TSN_seq + 1;
1240 if (tsn_est == at->rec.data.TSN_seq) {
1241 /* Yep, It better be a first */
1242 if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1243 SCTP_DATA_FIRST_FRAG) {
1255 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1256 struct mbuf **m, int offset, struct sctp_data_chunk *ch, int chk_length,
1257 struct sctp_nets *net, uint32_t * high_tsn, int *abort_flag,
1258 int *break_flag, int last_chunk)
1260 /* Process a data chunk */
1261 /* struct sctp_tmit_chunk *chk; */
1262 struct sctp_tmit_chunk *chk;
1266 int need_reasm_check = 0;
1267 uint16_t strmno, strmseq;
1268 struct mbuf *op_err;
1269 char msg[SCTP_DIAG_INFO_LEN];
1270 struct sctp_queued_to_read *control;
1272 uint32_t protocol_id;
1273 uint8_t chunk_flags;
1274 struct sctp_stream_reset_list *liste;
1277 tsn = ntohl(ch->dp.tsn);
1278 chunk_flags = ch->ch.chunk_flags;
1279 if ((chunk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1280 asoc->send_sack = 1;
1282 protocol_id = ch->dp.protocol_id;
1283 ordered = ((chunk_flags & SCTP_DATA_UNORDERED) == 0);
1284 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1285 sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1290 SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, ch->ch.chunk_type, tsn);
1291 if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1292 /* It is a duplicate */
1293 SCTP_STAT_INCR(sctps_recvdupdata);
1294 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1295 /* Record a dup for the next outbound sack */
1296 asoc->dup_tsns[asoc->numduptsns] = tsn;
1299 asoc->send_sack = 1;
1302 /* Calculate the number of TSN's between the base and this TSN */
1303 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1304 if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1305 /* Can't hold the bit in the mapping at max array, toss it */
1308 if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
1309 SCTP_TCB_LOCK_ASSERT(stcb);
1310 if (sctp_expand_mapping_array(asoc, gap)) {
1311 /* Can't expand, drop it */
1315 if (SCTP_TSN_GT(tsn, *high_tsn)) {
1318 /* See if we have received this one already */
1319 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1320 SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1321 SCTP_STAT_INCR(sctps_recvdupdata);
1322 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1323 /* Record a dup for the next outbound sack */
1324 asoc->dup_tsns[asoc->numduptsns] = tsn;
1327 asoc->send_sack = 1;
1331 * Check to see about the GONE flag, duplicates would cause a sack
1332 * to be sent up above
1334 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1335 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1336 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))) {
1338 * wait a minute, this guy is gone, there is no longer a
1339 * receiver. Send peer an ABORT!
1341 op_err = sctp_generate_cause(SCTP_CAUSE_OUT_OF_RESC, "");
1342 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1347 * Now before going further we see if there is room. If NOT then we
1348 * MAY let one through only IF this TSN is the one we are waiting
1349 * for on a partial delivery API.
1352 /* now do the tests */
1353 if (((asoc->cnt_on_all_streams +
1354 asoc->cnt_on_reasm_queue +
1355 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1356 (((int)asoc->my_rwnd) <= 0)) {
1358 * When we have NO room in the rwnd we check to make sure
1359 * the reader is doing its job...
1361 if (stcb->sctp_socket->so_rcv.sb_cc) {
1362 /* some to read, wake-up */
1363 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1366 so = SCTP_INP_SO(stcb->sctp_ep);
1367 atomic_add_int(&stcb->asoc.refcnt, 1);
1368 SCTP_TCB_UNLOCK(stcb);
1369 SCTP_SOCKET_LOCK(so, 1);
1370 SCTP_TCB_LOCK(stcb);
1371 atomic_subtract_int(&stcb->asoc.refcnt, 1);
1372 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1373 /* assoc was freed while we were unlocked */
1374 SCTP_SOCKET_UNLOCK(so, 1);
1378 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1379 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1380 SCTP_SOCKET_UNLOCK(so, 1);
1383 /* now is it in the mapping array of what we have accepted? */
1384 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1385 SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1386 /* Nope not in the valid range dump it */
1387 sctp_set_rwnd(stcb, asoc);
1388 if ((asoc->cnt_on_all_streams +
1389 asoc->cnt_on_reasm_queue +
1390 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1391 SCTP_STAT_INCR(sctps_datadropchklmt);
1393 SCTP_STAT_INCR(sctps_datadroprwnd);
1399 strmno = ntohs(ch->dp.stream_id);
1400 if (strmno >= asoc->streamincnt) {
1401 struct sctp_paramhdr *phdr;
1404 mb = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) * 2),
1405 0, M_DONTWAIT, 1, MT_DATA);
1407 /* add some space up front so prepend will work well */
1408 SCTP_BUF_RESV_UF(mb, sizeof(struct sctp_chunkhdr));
1409 phdr = mtod(mb, struct sctp_paramhdr *);
1411 * Error causes are just param's and this one has
1412 * two back to back phdr, one with the error type
1413 * and size, the other with the streamid and a rsvd
1415 SCTP_BUF_LEN(mb) = (sizeof(struct sctp_paramhdr) * 2);
1416 phdr->param_type = htons(SCTP_CAUSE_INVALID_STREAM);
1417 phdr->param_length =
1418 htons(sizeof(struct sctp_paramhdr) * 2);
1420 /* We insert the stream in the type field */
1421 phdr->param_type = ch->dp.stream_id;
1422 /* And set the length to 0 for the rsvd field */
1423 phdr->param_length = 0;
1424 sctp_queue_op_err(stcb, mb);
1426 SCTP_STAT_INCR(sctps_badsid);
1427 SCTP_TCB_LOCK_ASSERT(stcb);
1428 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1429 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1430 asoc->highest_tsn_inside_nr_map = tsn;
1432 if (tsn == (asoc->cumulative_tsn + 1)) {
1433 /* Update cum-ack */
1434 asoc->cumulative_tsn = tsn;
1439 * Before we continue lets validate that we are not being fooled by
1440 * an evil attacker. We can only have 4k chunks based on our TSN
1441 * spread allowed by the mapping array 512 * 8 bits, so there is no
1442 * way our stream sequence numbers could have wrapped. We of course
1443 * only validate the FIRST fragment so the bit must be set.
1445 strmseq = ntohs(ch->dp.stream_sequence);
1446 #ifdef SCTP_ASOCLOG_OF_TSNS
1447 SCTP_TCB_LOCK_ASSERT(stcb);
1448 if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1449 asoc->tsn_in_at = 0;
1450 asoc->tsn_in_wrapped = 1;
1452 asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1453 asoc->in_tsnlog[asoc->tsn_in_at].strm = strmno;
1454 asoc->in_tsnlog[asoc->tsn_in_at].seq = strmseq;
1455 asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1456 asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1457 asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1458 asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1459 asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1462 if ((chunk_flags & SCTP_DATA_FIRST_FRAG) &&
1463 (TAILQ_EMPTY(&asoc->resetHead)) &&
1464 (chunk_flags & SCTP_DATA_UNORDERED) == 0 &&
1465 SCTP_SSN_GE(asoc->strmin[strmno].last_sequence_delivered, strmseq)) {
1466 /* The incoming sseq is behind where we last delivered? */
1467 SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n",
1468 strmseq, asoc->strmin[strmno].last_sequence_delivered);
1469 snprintf(msg, sizeof(msg), "Delivered SSN=%4.4x, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1470 asoc->strmin[strmno].last_sequence_delivered,
1471 tsn, strmno, strmseq);
1472 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1473 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1474 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1478 /************************************
1479 * From here down we may find ch-> invalid
1480 * so its a good idea NOT to use it.
1481 *************************************/
1483 the_len = (chk_length - sizeof(struct sctp_data_chunk));
1484 if (last_chunk == 0) {
1485 dmbuf = SCTP_M_COPYM(*m,
1486 (offset + sizeof(struct sctp_data_chunk)),
1487 the_len, M_DONTWAIT);
1488 #ifdef SCTP_MBUF_LOGGING
1489 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
1492 for (mat = dmbuf; mat; mat = SCTP_BUF_NEXT(mat)) {
1493 if (SCTP_BUF_IS_EXTENDED(mat)) {
1494 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
1500 /* We can steal the last chunk */
1504 /* lop off the top part */
1505 m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
1506 if (SCTP_BUF_NEXT(dmbuf) == NULL) {
1507 l_len = SCTP_BUF_LEN(dmbuf);
1510 * need to count up the size hopefully does not hit
1516 for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
1517 l_len += SCTP_BUF_LEN(lat);
1520 if (l_len > the_len) {
1521 /* Trim the end round bytes off too */
1522 m_adj(dmbuf, -(l_len - the_len));
1525 if (dmbuf == NULL) {
1526 SCTP_STAT_INCR(sctps_nomem);
1529 if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
1530 asoc->fragmented_delivery_inprogress == 0 &&
1531 TAILQ_EMPTY(&asoc->resetHead) &&
1533 ((uint16_t) (asoc->strmin[strmno].last_sequence_delivered + 1) == strmseq &&
1534 TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) {
1535 /* Candidate for express delivery */
1537 * Its not fragmented, No PD-API is up, Nothing in the
1538 * delivery queue, Its un-ordered OR ordered and the next to
1539 * deliver AND nothing else is stuck on the stream queue,
1540 * And there is room for it in the socket buffer. Lets just
1541 * stuff it up the buffer....
1544 /* It would be nice to avoid this copy if we could :< */
1545 sctp_alloc_a_readq(stcb, control);
1546 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1551 if (control == NULL) {
1552 goto failed_express_del;
1554 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1555 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1556 asoc->highest_tsn_inside_nr_map = tsn;
1558 sctp_add_to_readq(stcb->sctp_ep, stcb,
1559 control, &stcb->sctp_socket->so_rcv,
1560 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1562 if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1563 /* for ordered, bump what we delivered */
1564 asoc->strmin[strmno].last_sequence_delivered++;
1566 SCTP_STAT_INCR(sctps_recvexpress);
1567 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
1568 sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno,
1569 SCTP_STR_LOG_FROM_EXPRS_DEL);
1573 goto finish_express_del;
1576 /* If we reach here this is a new chunk */
1579 /* Express for fragmented delivery? */
1580 if ((asoc->fragmented_delivery_inprogress) &&
1581 (stcb->asoc.control_pdapi) &&
1582 (asoc->str_of_pdapi == strmno) &&
1583 (asoc->ssn_of_pdapi == strmseq)
1585 control = stcb->asoc.control_pdapi;
1586 if ((chunk_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
1587 /* Can't be another first? */
1588 goto failed_pdapi_express_del;
1590 if (tsn == (control->sinfo_tsn + 1)) {
1591 /* Yep, we can add it on */
1594 if (chunk_flags & SCTP_DATA_LAST_FRAG) {
1597 if (sctp_append_to_readq(stcb->sctp_ep, stcb, control, dmbuf, end,
1599 &stcb->sctp_socket->so_rcv)) {
1600 SCTP_PRINTF("Append fails end:%d\n", end);
1601 goto failed_pdapi_express_del;
1603 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1604 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1605 asoc->highest_tsn_inside_nr_map = tsn;
1607 SCTP_STAT_INCR(sctps_recvexpressm);
1608 asoc->tsn_last_delivered = tsn;
1609 asoc->fragment_flags = chunk_flags;
1610 asoc->tsn_of_pdapi_last_delivered = tsn;
1611 asoc->last_flags_delivered = chunk_flags;
1612 asoc->last_strm_seq_delivered = strmseq;
1613 asoc->last_strm_no_delivered = strmno;
1615 /* clean up the flags and such */
1616 asoc->fragmented_delivery_inprogress = 0;
1617 if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1618 asoc->strmin[strmno].last_sequence_delivered++;
1620 stcb->asoc.control_pdapi = NULL;
1621 if (TAILQ_EMPTY(&asoc->reasmqueue) == 0) {
1623 * There could be another message
1626 need_reasm_check = 1;
1630 goto finish_express_del;
1633 failed_pdapi_express_del:
1635 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
1636 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1637 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1638 asoc->highest_tsn_inside_nr_map = tsn;
1641 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
1642 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
1643 asoc->highest_tsn_inside_map = tsn;
1646 if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1647 sctp_alloc_a_chunk(stcb, chk);
1649 /* No memory so we drop the chunk */
1650 SCTP_STAT_INCR(sctps_nomem);
1651 if (last_chunk == 0) {
1652 /* we copied it, free the copy */
1653 sctp_m_freem(dmbuf);
1657 chk->rec.data.TSN_seq = tsn;
1658 chk->no_fr_allowed = 0;
1659 chk->rec.data.stream_seq = strmseq;
1660 chk->rec.data.stream_number = strmno;
1661 chk->rec.data.payloadtype = protocol_id;
1662 chk->rec.data.context = stcb->asoc.context;
1663 chk->rec.data.doing_fast_retransmit = 0;
1664 chk->rec.data.rcv_flags = chunk_flags;
1666 chk->send_size = the_len;
1668 atomic_add_int(&net->ref_count, 1);
1671 sctp_alloc_a_readq(stcb, control);
1672 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1677 if (control == NULL) {
1678 /* No memory so we drop the chunk */
1679 SCTP_STAT_INCR(sctps_nomem);
1680 if (last_chunk == 0) {
1681 /* we copied it, free the copy */
1682 sctp_m_freem(dmbuf);
1686 control->length = the_len;
1689 /* Mark it as received */
1690 /* Now queue it where it belongs */
1691 if (control != NULL) {
1692 /* First a sanity check */
1693 if (asoc->fragmented_delivery_inprogress) {
1695 * Ok, we have a fragmented delivery in progress if
1696 * this chunk is next to deliver OR belongs in our
1697 * view to the reassembly, the peer is evil or
1700 uint32_t estimate_tsn;
1702 estimate_tsn = asoc->tsn_last_delivered + 1;
1703 if (TAILQ_EMPTY(&asoc->reasmqueue) &&
1704 (estimate_tsn == control->sinfo_tsn)) {
1705 /* Evil/Broke peer */
1706 sctp_m_freem(control->data);
1707 control->data = NULL;
1708 if (control->whoFrom) {
1709 sctp_free_remote_addr(control->whoFrom);
1710 control->whoFrom = NULL;
1712 sctp_free_a_readq(stcb, control);
1713 snprintf(msg, sizeof(msg), "Reas. queue emtpy, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1714 tsn, strmno, strmseq);
1715 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1716 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1717 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1724 if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
1725 sctp_m_freem(control->data);
1726 control->data = NULL;
1727 if (control->whoFrom) {
1728 sctp_free_remote_addr(control->whoFrom);
1729 control->whoFrom = NULL;
1731 sctp_free_a_readq(stcb, control);
1732 snprintf(msg, sizeof(msg), "PD ongoing, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1733 tsn, strmno, strmseq);
1734 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1735 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
1736 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1745 /* No PDAPI running */
1746 if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
1748 * Reassembly queue is NOT empty validate
1749 * that this tsn does not need to be in
1750 * reasembly queue. If it does then our peer
1751 * is broken or evil.
1753 if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
1754 sctp_m_freem(control->data);
1755 control->data = NULL;
1756 if (control->whoFrom) {
1757 sctp_free_remote_addr(control->whoFrom);
1758 control->whoFrom = NULL;
1760 sctp_free_a_readq(stcb, control);
1761 snprintf(msg, sizeof(msg), "No PD ongoing, got TSN=%8.8x, SID=%4.4x, SSN=%4.4x",
1762 tsn, strmno, strmseq);
1763 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
1764 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17;
1765 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1774 /* ok, if we reach here we have passed the sanity checks */
1775 if (chunk_flags & SCTP_DATA_UNORDERED) {
1776 /* queue directly into socket buffer */
1777 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
1778 sctp_add_to_readq(stcb->sctp_ep, stcb,
1780 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1783 * Special check for when streams are resetting. We
1784 * could be more smart about this and check the
1785 * actual stream to see if it is not being reset..
1786 * that way we would not create a HOLB when amongst
1787 * streams being reset and those not being reset.
1789 * We take complete messages that have a stream reset
1790 * intervening (aka the TSN is after where our
1791 * cum-ack needs to be) off and put them on a
1792 * pending_reply_queue. The reassembly ones we do
1793 * not have to worry about since they are all sorted
1794 * and proceessed by TSN order. It is only the
1795 * singletons I must worry about.
1797 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
1798 SCTP_TSN_GT(tsn, liste->tsn)) {
1800 * yep its past where we need to reset... go
1801 * ahead and queue it.
1803 if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
1805 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
1807 struct sctp_queued_to_read *ctlOn,
1809 unsigned char inserted = 0;
1811 TAILQ_FOREACH_SAFE(ctlOn, &asoc->pending_reply_queue, next, nctlOn) {
1812 if (SCTP_TSN_GT(control->sinfo_tsn, ctlOn->sinfo_tsn)) {
1816 TAILQ_INSERT_BEFORE(ctlOn, control, next);
1821 if (inserted == 0) {
1823 * must be put at end, use
1824 * prevP (all setup from
1825 * loop) to setup nextP.
1827 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
1831 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag);
1841 /* Into the re-assembly queue */
1842 sctp_queue_data_for_reasm(stcb, asoc, chk, abort_flag);
1845 * the assoc is now gone and chk was put onto the
1846 * reasm queue, which has all been freed.
1855 if (tsn == (asoc->cumulative_tsn + 1)) {
1856 /* Update cum-ack */
1857 asoc->cumulative_tsn = tsn;
1863 SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
1865 SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
1867 SCTP_STAT_INCR(sctps_recvdata);
1868 /* Set it present please */
1869 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
1870 sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, SCTP_STR_LOG_FROM_MARK_TSN);
1872 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1873 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
1874 asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
1876 /* check the special flag for stream resets */
1877 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
1878 SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
1880 * we have finished working through the backlogged TSN's now
1881 * time to reset streams. 1: call reset function. 2: free
1882 * pending_reply space 3: distribute any chunks in
1883 * pending_reply_queue.
1885 struct sctp_queued_to_read *ctl, *nctl;
1887 sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
1888 TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
1889 SCTP_FREE(liste, SCTP_M_STRESET);
1890 /* sa_ignore FREED_MEMORY */
1891 liste = TAILQ_FIRST(&asoc->resetHead);
1892 if (TAILQ_EMPTY(&asoc->resetHead)) {
1893 /* All can be removed */
1894 TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
1895 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
1896 sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
1902 TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
1903 if (SCTP_TSN_GT(ctl->sinfo_tsn, liste->tsn)) {
1907 * if ctl->sinfo_tsn is <= liste->tsn we can
1908 * process it which is the NOT of
1909 * ctl->sinfo_tsn > liste->tsn
1911 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
1912 sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
1919 * Now service re-assembly to pick up anything that has been
1920 * held on reassembly queue?
1922 sctp_deliver_reasm_check(stcb, asoc);
1923 need_reasm_check = 0;
1925 if (need_reasm_check) {
1926 /* Another one waits ? */
1927 sctp_deliver_reasm_check(stcb, asoc);
1932 int8_t sctp_map_lookup_tab[256] = {
1933 0, 1, 0, 2, 0, 1, 0, 3,
1934 0, 1, 0, 2, 0, 1, 0, 4,
1935 0, 1, 0, 2, 0, 1, 0, 3,
1936 0, 1, 0, 2, 0, 1, 0, 5,
1937 0, 1, 0, 2, 0, 1, 0, 3,
1938 0, 1, 0, 2, 0, 1, 0, 4,
1939 0, 1, 0, 2, 0, 1, 0, 3,
1940 0, 1, 0, 2, 0, 1, 0, 6,
1941 0, 1, 0, 2, 0, 1, 0, 3,
1942 0, 1, 0, 2, 0, 1, 0, 4,
1943 0, 1, 0, 2, 0, 1, 0, 3,
1944 0, 1, 0, 2, 0, 1, 0, 5,
1945 0, 1, 0, 2, 0, 1, 0, 3,
1946 0, 1, 0, 2, 0, 1, 0, 4,
1947 0, 1, 0, 2, 0, 1, 0, 3,
1948 0, 1, 0, 2, 0, 1, 0, 7,
1949 0, 1, 0, 2, 0, 1, 0, 3,
1950 0, 1, 0, 2, 0, 1, 0, 4,
1951 0, 1, 0, 2, 0, 1, 0, 3,
1952 0, 1, 0, 2, 0, 1, 0, 5,
1953 0, 1, 0, 2, 0, 1, 0, 3,
1954 0, 1, 0, 2, 0, 1, 0, 4,
1955 0, 1, 0, 2, 0, 1, 0, 3,
1956 0, 1, 0, 2, 0, 1, 0, 6,
1957 0, 1, 0, 2, 0, 1, 0, 3,
1958 0, 1, 0, 2, 0, 1, 0, 4,
1959 0, 1, 0, 2, 0, 1, 0, 3,
1960 0, 1, 0, 2, 0, 1, 0, 5,
1961 0, 1, 0, 2, 0, 1, 0, 3,
1962 0, 1, 0, 2, 0, 1, 0, 4,
1963 0, 1, 0, 2, 0, 1, 0, 3,
1964 0, 1, 0, 2, 0, 1, 0, 8
1969 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
1972 * Now we also need to check the mapping array in a couple of ways.
1973 * 1) Did we move the cum-ack point?
1975 * When you first glance at this you might think that all entries that
1976 * make up the postion of the cum-ack would be in the nr-mapping
1977 * array only.. i.e. things up to the cum-ack are always
1978 * deliverable. Thats true with one exception, when its a fragmented
1979 * message we may not deliver the data until some threshold (or all
1980 * of it) is in place. So we must OR the nr_mapping_array and
1981 * mapping_array to get a true picture of the cum-ack.
1983 struct sctp_association *asoc;
1986 int slide_from, slide_end, lgap, distance;
1987 uint32_t old_cumack, old_base, old_highest, highest_tsn;
1991 old_cumack = asoc->cumulative_tsn;
1992 old_base = asoc->mapping_array_base_tsn;
1993 old_highest = asoc->highest_tsn_inside_map;
1995 * We could probably improve this a small bit by calculating the
1996 * offset of the current cum-ack as the starting point.
1999 for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2000 val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2004 /* there is a 0 bit */
2005 at += sctp_map_lookup_tab[val];
2009 asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2011 if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2012 SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2014 panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2015 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2017 SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2018 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2019 sctp_print_mapping_array(asoc);
2020 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2021 sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2023 asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2024 asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2027 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2028 highest_tsn = asoc->highest_tsn_inside_nr_map;
2030 highest_tsn = asoc->highest_tsn_inside_map;
2032 if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2033 /* The complete array was completed by a single FR */
2034 /* highest becomes the cum-ack */
2042 /* clear the array */
2043 clr = ((at + 7) >> 3);
2044 if (clr > asoc->mapping_array_size) {
2045 clr = asoc->mapping_array_size;
2047 memset(asoc->mapping_array, 0, clr);
2048 memset(asoc->nr_mapping_array, 0, clr);
2050 for (i = 0; i < asoc->mapping_array_size; i++) {
2051 if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2052 SCTP_PRINTF("Error Mapping array's not clean at clear\n");
2053 sctp_print_mapping_array(asoc);
2057 asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2058 asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2059 } else if (at >= 8) {
2060 /* we can slide the mapping array down */
2061 /* slide_from holds where we hit the first NON 0xff byte */
2064 * now calculate the ceiling of the move using our highest
2067 SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2068 slide_end = (lgap >> 3);
2069 if (slide_end < slide_from) {
2070 sctp_print_mapping_array(asoc);
2072 panic("impossible slide");
2074 SCTP_PRINTF("impossible slide lgap:%x slide_end:%x slide_from:%x? at:%d\n",
2075 lgap, slide_end, slide_from, at);
2079 if (slide_end > asoc->mapping_array_size) {
2081 panic("would overrun buffer");
2083 SCTP_PRINTF("Gak, would have overrun map end:%d slide_end:%d\n",
2084 asoc->mapping_array_size, slide_end);
2085 slide_end = asoc->mapping_array_size;
2088 distance = (slide_end - slide_from) + 1;
2089 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2090 sctp_log_map(old_base, old_cumack, old_highest,
2091 SCTP_MAP_PREPARE_SLIDE);
2092 sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
2093 (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
2095 if (distance + slide_from > asoc->mapping_array_size ||
2098 * Here we do NOT slide forward the array so that
2099 * hopefully when more data comes in to fill it up
2100 * we will be able to slide it forward. Really I
2101 * don't think this should happen :-0
2104 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2105 sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
2106 (uint32_t) asoc->mapping_array_size,
2107 SCTP_MAP_SLIDE_NONE);
2112 for (ii = 0; ii < distance; ii++) {
2113 asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2114 asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2117 for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2118 asoc->mapping_array[ii] = 0;
2119 asoc->nr_mapping_array[ii] = 0;
2121 if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2122 asoc->highest_tsn_inside_map += (slide_from << 3);
2124 if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2125 asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2127 asoc->mapping_array_base_tsn += (slide_from << 3);
2128 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2129 sctp_log_map(asoc->mapping_array_base_tsn,
2130 asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2131 SCTP_MAP_SLIDE_RESULT);
2138 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
2140 struct sctp_association *asoc;
2141 uint32_t highest_tsn;
2144 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2145 highest_tsn = asoc->highest_tsn_inside_nr_map;
2147 highest_tsn = asoc->highest_tsn_inside_map;
2151 * Now we need to see if we need to queue a sack or just start the
2152 * timer (if allowed).
2154 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2156 * Ok special case, in SHUTDOWN-SENT case. here we maker
2157 * sure SACK timer is off and instead send a SHUTDOWN and a
2160 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2161 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2162 stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_INDATA + SCTP_LOC_18);
2164 sctp_send_shutdown(stcb,
2165 ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2166 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2170 /* is there a gap now ? */
2171 is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2174 * CMT DAC algorithm: increase number of packets received
2177 stcb->asoc.cmt_dac_pkts_rcvd++;
2179 if ((stcb->asoc.send_sack == 1) || /* We need to send a
2181 ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no
2183 (stcb->asoc.numduptsns) || /* we have dup's */
2184 (is_a_gap) || /* is still a gap */
2185 (stcb->asoc.delayed_ack == 0) || /* Delayed sack disabled */
2186 (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) /* hit limit of pkts */
2189 if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2190 (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2191 (stcb->asoc.send_sack == 0) &&
2192 (stcb->asoc.numduptsns == 0) &&
2193 (stcb->asoc.delayed_ack) &&
2194 (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2197 * CMT DAC algorithm: With CMT, delay acks
2198 * even in the face of
2200 * reordering. Therefore, if acks that do not
2201 * have to be sent because of the above
2202 * reasons, will be delayed. That is, acks
2203 * that would have been sent due to gap
2204 * reports will be delayed with DAC. Start
2205 * the delayed ack timer.
2207 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2208 stcb->sctp_ep, stcb, NULL);
2211 * Ok we must build a SACK since the timer
2212 * is pending, we got our first packet OR
2213 * there are gaps or duplicates.
2215 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2216 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2219 if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2220 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2221 stcb->sctp_ep, stcb, NULL);
2228 sctp_service_queues(struct sctp_tcb *stcb, struct sctp_association *asoc)
2230 struct sctp_tmit_chunk *chk;
2231 uint32_t tsize, pd_point;
2234 if (asoc->fragmented_delivery_inprogress) {
2235 sctp_service_reassembly(stcb, asoc);
2237 /* Can we proceed further, i.e. the PD-API is complete */
2238 if (asoc->fragmented_delivery_inprogress) {
2243 * Now is there some other chunk I can deliver from the reassembly
2247 chk = TAILQ_FIRST(&asoc->reasmqueue);
2249 asoc->size_on_reasm_queue = 0;
2250 asoc->cnt_on_reasm_queue = 0;
2253 nxt_todel = asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
2254 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
2255 ((nxt_todel == chk->rec.data.stream_seq) ||
2256 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
2258 * Yep the first one is here. We setup to start reception,
2259 * by backing down the TSN just in case we can't deliver.
2263 * Before we start though either all of the message should
2264 * be here or the socket buffer max or nothing on the
2265 * delivery queue and something can be delivered.
2267 if (stcb->sctp_socket) {
2268 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
2269 stcb->sctp_ep->partial_delivery_point);
2271 pd_point = stcb->sctp_ep->partial_delivery_point;
2273 if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
2274 asoc->fragmented_delivery_inprogress = 1;
2275 asoc->tsn_last_delivered = chk->rec.data.TSN_seq - 1;
2276 asoc->str_of_pdapi = chk->rec.data.stream_number;
2277 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
2278 asoc->pdapi_ppid = chk->rec.data.payloadtype;
2279 asoc->fragment_flags = chk->rec.data.rcv_flags;
2280 sctp_service_reassembly(stcb, asoc);
2281 if (asoc->fragmented_delivery_inprogress == 0) {
2289 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2290 struct sockaddr *src, struct sockaddr *dst,
2291 struct sctphdr *sh, struct sctp_inpcb *inp,
2292 struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t * high_tsn,
2293 uint8_t use_mflowid, uint32_t mflowid,
2294 uint32_t vrf_id, uint16_t port)
2296 struct sctp_data_chunk *ch, chunk_buf;
2297 struct sctp_association *asoc;
2298 int num_chunks = 0; /* number of control chunks processed */
2300 int chk_length, break_flag, last_chunk;
2301 int abort_flag = 0, was_a_gap;
2303 uint32_t highest_tsn;
2306 sctp_set_rwnd(stcb, &stcb->asoc);
2309 SCTP_TCB_LOCK_ASSERT(stcb);
2311 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2312 highest_tsn = asoc->highest_tsn_inside_nr_map;
2314 highest_tsn = asoc->highest_tsn_inside_map;
2316 was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2318 * setup where we got the last DATA packet from for any SACK that
2319 * may need to go out. Don't bump the net. This is done ONLY when a
2320 * chunk is assigned.
2322 asoc->last_data_chunk_from = net;
2325 * Now before we proceed we must figure out if this is a wasted
2326 * cluster... i.e. it is a small packet sent in and yet the driver
2327 * underneath allocated a full cluster for it. If so we must copy it
2328 * to a smaller mbuf and free up the cluster mbuf. This will help
2329 * with cluster starvation. Note for __Panda__ we don't do this
2330 * since it has clusters all the way down to 64 bytes.
2332 if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2333 /* we only handle mbufs that are singletons.. not chains */
2334 m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_DONTWAIT, 1, MT_DATA);
2336 /* ok lets see if we can copy the data up */
2339 /* get the pointers and copy */
2340 to = mtod(m, caddr_t *);
2341 from = mtod((*mm), caddr_t *);
2342 memcpy(to, from, SCTP_BUF_LEN((*mm)));
2343 /* copy the length and free up the old */
2344 SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2346 /* sucess, back copy */
2349 /* We are in trouble in the mbuf world .. yikes */
2353 /* get pointer to the first chunk header */
2354 ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2355 sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2360 * process all DATA chunks...
2362 *high_tsn = asoc->cumulative_tsn;
2364 asoc->data_pkts_seen++;
2365 while (stop_proc == 0) {
2366 /* validate chunk length */
2367 chk_length = ntohs(ch->ch.chunk_length);
2368 if (length - *offset < chk_length) {
2369 /* all done, mutulated chunk */
2373 if (ch->ch.chunk_type == SCTP_DATA) {
2374 if ((size_t)chk_length < sizeof(struct sctp_data_chunk)) {
2376 * Need to send an abort since we had a
2377 * invalid data chunk.
2379 struct mbuf *op_err;
2380 char msg[SCTP_DIAG_INFO_LEN];
2382 snprintf(msg, sizeof(msg), "DATA chunk of length %d",
2384 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
2385 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
2386 sctp_abort_association(inp, stcb, m, iphlen,
2387 src, dst, sh, op_err,
2388 use_mflowid, mflowid,
2392 if ((size_t)chk_length == sizeof(struct sctp_data_chunk)) {
2394 * Need to send an abort since we had an
2397 struct mbuf *op_err;
2399 op_err = sctp_generate_no_user_data_cause(ch->dp.tsn);
2400 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
2401 sctp_abort_association(inp, stcb, m, iphlen,
2402 src, dst, sh, op_err,
2403 use_mflowid, mflowid,
2407 #ifdef SCTP_AUDITING_ENABLED
2408 sctp_audit_log(0xB1, 0);
2410 if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2415 if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, ch,
2416 chk_length, net, high_tsn, &abort_flag, &break_flag,
2425 * Set because of out of rwnd space and no
2426 * drop rep space left.
2432 /* not a data chunk in the data region */
2433 switch (ch->ch.chunk_type) {
2434 case SCTP_INITIATION:
2435 case SCTP_INITIATION_ACK:
2436 case SCTP_SELECTIVE_ACK:
2437 case SCTP_NR_SELECTIVE_ACK:
2438 case SCTP_HEARTBEAT_REQUEST:
2439 case SCTP_HEARTBEAT_ACK:
2440 case SCTP_ABORT_ASSOCIATION:
2442 case SCTP_SHUTDOWN_ACK:
2443 case SCTP_OPERATION_ERROR:
2444 case SCTP_COOKIE_ECHO:
2445 case SCTP_COOKIE_ACK:
2448 case SCTP_SHUTDOWN_COMPLETE:
2449 case SCTP_AUTHENTICATION:
2450 case SCTP_ASCONF_ACK:
2451 case SCTP_PACKET_DROPPED:
2452 case SCTP_STREAM_RESET:
2453 case SCTP_FORWARD_CUM_TSN:
2456 * Now, what do we do with KNOWN chunks that
2457 * are NOT in the right place?
2459 * For now, I do nothing but ignore them. We
2460 * may later want to add sysctl stuff to
2461 * switch out and do either an ABORT() or
2462 * possibly process them.
2464 if (SCTP_BASE_SYSCTL(sctp_strict_data_order)) {
2465 struct mbuf *op_err;
2467 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, "");
2468 sctp_abort_association(inp, stcb,
2472 use_mflowid, mflowid,
2478 /* unknown chunk type, use bit rules */
2479 if (ch->ch.chunk_type & 0x40) {
2480 /* Add a error report to the queue */
2482 struct sctp_paramhdr *phd;
2484 merr = sctp_get_mbuf_for_msg(sizeof(*phd), 0, M_DONTWAIT, 1, MT_DATA);
2486 phd = mtod(merr, struct sctp_paramhdr *);
2488 * We cheat and use param
2489 * type since we did not
2490 * bother to define a error
2491 * cause struct. They are
2492 * the same basic format
2493 * with different names.
2496 htons(SCTP_CAUSE_UNRECOG_CHUNK);
2498 htons(chk_length + sizeof(*phd));
2499 SCTP_BUF_LEN(merr) = sizeof(*phd);
2500 SCTP_BUF_NEXT(merr) = SCTP_M_COPYM(m, *offset, chk_length, M_DONTWAIT);
2501 if (SCTP_BUF_NEXT(merr)) {
2502 if (sctp_pad_lastmbuf(SCTP_BUF_NEXT(merr), SCTP_SIZE32(chk_length) - chk_length, NULL)) {
2505 sctp_queue_op_err(stcb, merr);
2512 if ((ch->ch.chunk_type & 0x80) == 0) {
2513 /* discard the rest of this packet */
2515 } /* else skip this bad chunk and
2518 } /* switch of chunk type */
2520 *offset += SCTP_SIZE32(chk_length);
2521 if ((*offset >= length) || stop_proc) {
2522 /* no more data left in the mbuf chain */
2526 ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2527 sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2536 * we need to report rwnd overrun drops.
2538 sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
2542 * Did we get data, if so update the time for auto-close and
2543 * give peer credit for being alive.
2545 SCTP_STAT_INCR(sctps_recvpktwithdata);
2546 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2547 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2548 stcb->asoc.overall_error_count,
2550 SCTP_FROM_SCTP_INDATA,
2553 stcb->asoc.overall_error_count = 0;
2554 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2556 /* now service all of the reassm queue if needed */
2557 if (!(TAILQ_EMPTY(&asoc->reasmqueue)))
2558 sctp_service_queues(stcb, asoc);
2560 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2561 /* Assure that we ack right away */
2562 stcb->asoc.send_sack = 1;
2564 /* Start a sack timer or QUEUE a SACK for sending */
2565 sctp_sack_check(stcb, was_a_gap);
2570 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2571 uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2573 uint32_t * biggest_newly_acked_tsn,
2574 uint32_t * this_sack_lowest_newack,
2577 struct sctp_tmit_chunk *tp1;
2578 unsigned int theTSN;
2579 int j, wake_him = 0, circled = 0;
2581 /* Recover the tp1 we last saw */
2584 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2586 for (j = frag_strt; j <= frag_end; j++) {
2587 theTSN = j + last_tsn;
2589 if (tp1->rec.data.doing_fast_retransmit)
2593 * CMT: CUCv2 algorithm. For each TSN being
2594 * processed from the sent queue, track the
2595 * next expected pseudo-cumack, or
2596 * rtx_pseudo_cumack, if required. Separate
2597 * cumack trackers for first transmissions,
2598 * and retransmissions.
2600 if ((tp1->whoTo->find_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
2601 (tp1->snd_count == 1)) {
2602 tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq;
2603 tp1->whoTo->find_pseudo_cumack = 0;
2605 if ((tp1->whoTo->find_rtx_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
2606 (tp1->snd_count > 1)) {
2607 tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq;
2608 tp1->whoTo->find_rtx_pseudo_cumack = 0;
2610 if (tp1->rec.data.TSN_seq == theTSN) {
2611 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2613 * must be held until
2616 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
2618 * If it is less than RESEND, it is
2619 * now no-longer in flight.
2620 * Higher values may already be set
2621 * via previous Gap Ack Blocks...
2622 * i.e. ACKED or RESEND.
2624 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2625 *biggest_newly_acked_tsn)) {
2626 *biggest_newly_acked_tsn = tp1->rec.data.TSN_seq;
2629 * CMT: SFR algo (and HTNA) - set
2630 * saw_newack to 1 for dest being
2631 * newly acked. update
2632 * this_sack_highest_newack if
2635 if (tp1->rec.data.chunk_was_revoked == 0)
2636 tp1->whoTo->saw_newack = 1;
2638 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2639 tp1->whoTo->this_sack_highest_newack)) {
2640 tp1->whoTo->this_sack_highest_newack =
2641 tp1->rec.data.TSN_seq;
2644 * CMT DAC algo: also update
2645 * this_sack_lowest_newack
2647 if (*this_sack_lowest_newack == 0) {
2648 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2649 sctp_log_sack(*this_sack_lowest_newack,
2651 tp1->rec.data.TSN_seq,
2654 SCTP_LOG_TSN_ACKED);
2656 *this_sack_lowest_newack = tp1->rec.data.TSN_seq;
2659 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
2660 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
2661 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
2662 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
2663 * Separate pseudo_cumack trackers for first transmissions and
2666 if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) {
2667 if (tp1->rec.data.chunk_was_revoked == 0) {
2668 tp1->whoTo->new_pseudo_cumack = 1;
2670 tp1->whoTo->find_pseudo_cumack = 1;
2672 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
2673 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
2675 if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) {
2676 if (tp1->rec.data.chunk_was_revoked == 0) {
2677 tp1->whoTo->new_pseudo_cumack = 1;
2679 tp1->whoTo->find_rtx_pseudo_cumack = 1;
2681 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2682 sctp_log_sack(*biggest_newly_acked_tsn,
2684 tp1->rec.data.TSN_seq,
2687 SCTP_LOG_TSN_ACKED);
2689 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
2690 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
2691 tp1->whoTo->flight_size,
2693 (uintptr_t) tp1->whoTo,
2694 tp1->rec.data.TSN_seq);
2696 sctp_flight_size_decrease(tp1);
2697 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
2698 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
2701 sctp_total_flight_decrease(stcb, tp1);
2703 tp1->whoTo->net_ack += tp1->send_size;
2704 if (tp1->snd_count < 2) {
2706 * True non-retransmited chunk
2708 tp1->whoTo->net_ack2 += tp1->send_size;
2716 sctp_calculate_rto(stcb,
2719 &tp1->sent_rcv_time,
2720 sctp_align_safe_nocopy,
2721 SCTP_RTT_FROM_DATA);
2724 if (tp1->whoTo->rto_needed == 0) {
2725 tp1->whoTo->rto_needed = 1;
2731 if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
2732 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2733 stcb->asoc.this_sack_highest_gap)) {
2734 stcb->asoc.this_sack_highest_gap =
2735 tp1->rec.data.TSN_seq;
2737 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
2738 sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
2739 #ifdef SCTP_AUDITING_ENABLED
2740 sctp_audit_log(0xB2,
2741 (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
2746 * All chunks NOT UNSENT fall through here and are marked
2747 * (leave PR-SCTP ones that are to skip alone though)
2749 if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
2750 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
2751 tp1->sent = SCTP_DATAGRAM_MARKED;
2753 if (tp1->rec.data.chunk_was_revoked) {
2754 /* deflate the cwnd */
2755 tp1->whoTo->cwnd -= tp1->book_size;
2756 tp1->rec.data.chunk_was_revoked = 0;
2758 /* NR Sack code here */
2760 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
2761 if (stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
2762 stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues--;
2765 panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
2768 tp1->sent = SCTP_DATAGRAM_NR_ACKED;
2774 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
2775 sctp_m_freem(tp1->data);
2782 } /* if (tp1->TSN_seq == theTSN) */
2783 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, theTSN)) {
2786 tp1 = TAILQ_NEXT(tp1, sctp_next);
2787 if ((tp1 == NULL) && (circled == 0)) {
2789 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2791 } /* end while (tp1) */
2794 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2796 /* In case the fragments were not in order we must reset */
2797 } /* end for (j = fragStart */
2799 return (wake_him); /* Return value only used for nr-sack */
2804 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
2805 uint32_t last_tsn, uint32_t * biggest_tsn_acked,
2806 uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack,
2807 int num_seg, int num_nr_seg, int *rto_ok)
2809 struct sctp_gap_ack_block *frag, block;
2810 struct sctp_tmit_chunk *tp1;
2815 uint16_t frag_strt, frag_end, prev_frag_end;
2817 tp1 = TAILQ_FIRST(&asoc->sent_queue);
2821 for (i = 0; i < (num_seg + num_nr_seg); i++) {
2824 tp1 = TAILQ_FIRST(&asoc->sent_queue);
2826 frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
2827 sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
2828 *offset += sizeof(block);
2830 return (chunk_freed);
2832 frag_strt = ntohs(frag->start);
2833 frag_end = ntohs(frag->end);
2835 if (frag_strt > frag_end) {
2836 /* This gap report is malformed, skip it. */
2839 if (frag_strt <= prev_frag_end) {
2840 /* This gap report is not in order, so restart. */
2841 tp1 = TAILQ_FIRST(&asoc->sent_queue);
2843 if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
2844 *biggest_tsn_acked = last_tsn + frag_end;
2851 if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
2852 non_revocable, &num_frs, biggest_newly_acked_tsn,
2853 this_sack_lowest_newack, rto_ok)) {
2856 prev_frag_end = frag_end;
2858 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
2860 sctp_log_fr(*biggest_tsn_acked,
2861 *biggest_newly_acked_tsn,
2862 last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
2864 return (chunk_freed);
2868 sctp_check_for_revoked(struct sctp_tcb *stcb,
2869 struct sctp_association *asoc, uint32_t cumack,
2870 uint32_t biggest_tsn_acked)
2872 struct sctp_tmit_chunk *tp1;
2874 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
2875 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cumack)) {
2877 * ok this guy is either ACK or MARKED. If it is
2878 * ACKED it has been previously acked but not this
2879 * time i.e. revoked. If it is MARKED it was ACK'ed
2882 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked)) {
2885 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
2886 /* it has been revoked */
2887 tp1->sent = SCTP_DATAGRAM_SENT;
2888 tp1->rec.data.chunk_was_revoked = 1;
2890 * We must add this stuff back in to assure
2891 * timers and such get started.
2893 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
2894 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
2895 tp1->whoTo->flight_size,
2897 (uintptr_t) tp1->whoTo,
2898 tp1->rec.data.TSN_seq);
2900 sctp_flight_size_increase(tp1);
2901 sctp_total_flight_increase(stcb, tp1);
2903 * We inflate the cwnd to compensate for our
2904 * artificial inflation of the flight_size.
2906 tp1->whoTo->cwnd += tp1->book_size;
2907 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2908 sctp_log_sack(asoc->last_acked_seq,
2910 tp1->rec.data.TSN_seq,
2913 SCTP_LOG_TSN_REVOKED);
2915 } else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
2916 /* it has been re-acked in this SACK */
2917 tp1->sent = SCTP_DATAGRAM_ACKED;
2920 if (tp1->sent == SCTP_DATAGRAM_UNSENT)
2927 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
2928 uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
2930 struct sctp_tmit_chunk *tp1;
2931 int strike_flag = 0;
2933 int tot_retrans = 0;
2934 uint32_t sending_seq;
2935 struct sctp_nets *net;
2936 int num_dests_sacked = 0;
2939 * select the sending_seq, this is either the next thing ready to be
2940 * sent but not transmitted, OR, the next seq we assign.
2942 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
2944 sending_seq = asoc->sending_seq;
2946 sending_seq = tp1->rec.data.TSN_seq;
2949 /* CMT DAC algo: finding out if SACK is a mixed SACK */
2950 if ((asoc->sctp_cmt_on_off > 0) &&
2951 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
2952 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
2953 if (net->saw_newack)
2957 if (stcb->asoc.peer_supports_prsctp) {
2958 (void)SCTP_GETTIME_TIMEVAL(&now);
2960 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
2962 if (tp1->no_fr_allowed) {
2963 /* this one had a timeout or something */
2966 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
2967 if (tp1->sent < SCTP_DATAGRAM_RESEND)
2968 sctp_log_fr(biggest_tsn_newly_acked,
2969 tp1->rec.data.TSN_seq,
2971 SCTP_FR_LOG_CHECK_STRIKE);
2973 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked) ||
2974 tp1->sent == SCTP_DATAGRAM_UNSENT) {
2978 if (stcb->asoc.peer_supports_prsctp) {
2979 if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
2980 /* Is it expired? */
2981 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
2982 /* Yes so drop it */
2983 if (tp1->data != NULL) {
2984 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
2985 SCTP_SO_NOT_LOCKED);
2991 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->this_sack_highest_gap)) {
2992 /* we are beyond the tsn in the sack */
2995 if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
2996 /* either a RESEND, ACKED, or MARKED */
2998 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
2999 /* Continue strikin FWD-TSN chunks */
3000 tp1->rec.data.fwd_tsn_cnt++;
3005 * CMT : SFR algo (covers part of DAC and HTNA as well)
3007 if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3009 * No new acks were receieved for data sent to this
3010 * dest. Therefore, according to the SFR algo for
3011 * CMT, no data sent to this dest can be marked for
3012 * FR using this SACK.
3015 } else if (tp1->whoTo && SCTP_TSN_GT(tp1->rec.data.TSN_seq,
3016 tp1->whoTo->this_sack_highest_newack)) {
3018 * CMT: New acks were receieved for data sent to
3019 * this dest. But no new acks were seen for data
3020 * sent after tp1. Therefore, according to the SFR
3021 * algo for CMT, tp1 cannot be marked for FR using
3022 * this SACK. This step covers part of the DAC algo
3023 * and the HTNA algo as well.
3028 * Here we check to see if we were have already done a FR
3029 * and if so we see if the biggest TSN we saw in the sack is
3030 * smaller than the recovery point. If so we don't strike
3031 * the tsn... otherwise we CAN strike the TSN.
3034 * @@@ JRI: Check for CMT if (accum_moved &&
3035 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3038 if (accum_moved && asoc->fast_retran_loss_recovery) {
3040 * Strike the TSN if in fast-recovery and cum-ack
3043 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3044 sctp_log_fr(biggest_tsn_newly_acked,
3045 tp1->rec.data.TSN_seq,
3047 SCTP_FR_LOG_STRIKE_CHUNK);
3049 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3052 if ((asoc->sctp_cmt_on_off > 0) &&
3053 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3055 * CMT DAC algorithm: If SACK flag is set to
3056 * 0, then lowest_newack test will not pass
3057 * because it would have been set to the
3058 * cumack earlier. If not already to be
3059 * rtx'd, If not a mixed sack and if tp1 is
3060 * not between two sacked TSNs, then mark by
3061 * one more. NOTE that we are marking by one
3062 * additional time since the SACK DAC flag
3063 * indicates that two packets have been
3064 * received after this missing TSN.
3066 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3067 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3068 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3069 sctp_log_fr(16 + num_dests_sacked,
3070 tp1->rec.data.TSN_seq,
3072 SCTP_FR_LOG_STRIKE_CHUNK);
3077 } else if ((tp1->rec.data.doing_fast_retransmit) &&
3078 (asoc->sctp_cmt_on_off == 0)) {
3080 * For those that have done a FR we must take
3081 * special consideration if we strike. I.e the
3082 * biggest_newly_acked must be higher than the
3083 * sending_seq at the time we did the FR.
3086 #ifdef SCTP_FR_TO_ALTERNATE
3088 * If FR's go to new networks, then we must only do
3089 * this for singly homed asoc's. However if the FR's
3090 * go to the same network (Armando's work) then its
3091 * ok to FR multiple times.
3099 if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3100 tp1->rec.data.fast_retran_tsn)) {
3102 * Strike the TSN, since this ack is
3103 * beyond where things were when we
3106 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3107 sctp_log_fr(biggest_tsn_newly_acked,
3108 tp1->rec.data.TSN_seq,
3110 SCTP_FR_LOG_STRIKE_CHUNK);
3112 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3116 if ((asoc->sctp_cmt_on_off > 0) &&
3117 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3119 * CMT DAC algorithm: If
3120 * SACK flag is set to 0,
3121 * then lowest_newack test
3122 * will not pass because it
3123 * would have been set to
3124 * the cumack earlier. If
3125 * not already to be rtx'd,
3126 * If not a mixed sack and
3127 * if tp1 is not between two
3128 * sacked TSNs, then mark by
3129 * one more. NOTE that we
3130 * are marking by one
3131 * additional time since the
3132 * SACK DAC flag indicates
3133 * that two packets have
3134 * been received after this
3137 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3138 (num_dests_sacked == 1) &&
3139 SCTP_TSN_GT(this_sack_lowest_newack,
3140 tp1->rec.data.TSN_seq)) {
3141 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3142 sctp_log_fr(32 + num_dests_sacked,
3143 tp1->rec.data.TSN_seq,
3145 SCTP_FR_LOG_STRIKE_CHUNK);
3147 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3155 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3158 } else if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
3159 biggest_tsn_newly_acked)) {
3161 * We don't strike these: This is the HTNA
3162 * algorithm i.e. we don't strike If our TSN is
3163 * larger than the Highest TSN Newly Acked.
3167 /* Strike the TSN */
3168 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3169 sctp_log_fr(biggest_tsn_newly_acked,
3170 tp1->rec.data.TSN_seq,
3172 SCTP_FR_LOG_STRIKE_CHUNK);
3174 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3177 if ((asoc->sctp_cmt_on_off > 0) &&
3178 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3180 * CMT DAC algorithm: If SACK flag is set to
3181 * 0, then lowest_newack test will not pass
3182 * because it would have been set to the
3183 * cumack earlier. If not already to be
3184 * rtx'd, If not a mixed sack and if tp1 is
3185 * not between two sacked TSNs, then mark by
3186 * one more. NOTE that we are marking by one
3187 * additional time since the SACK DAC flag
3188 * indicates that two packets have been
3189 * received after this missing TSN.
3191 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3192 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3193 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3194 sctp_log_fr(48 + num_dests_sacked,
3195 tp1->rec.data.TSN_seq,
3197 SCTP_FR_LOG_STRIKE_CHUNK);
3203 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3204 struct sctp_nets *alt;
3206 /* fix counts and things */
3207 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3208 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3209 (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3211 (uintptr_t) tp1->whoTo,
3212 tp1->rec.data.TSN_seq);
3215 tp1->whoTo->net_ack++;
3216 sctp_flight_size_decrease(tp1);
3217 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3218 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3222 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3223 sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3224 asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3226 /* add back to the rwnd */
3227 asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3229 /* remove from the total flight */
3230 sctp_total_flight_decrease(stcb, tp1);
3232 if ((stcb->asoc.peer_supports_prsctp) &&
3233 (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3235 * Has it been retransmitted tv_sec times? -
3236 * we store the retran count there.
3238 if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3239 /* Yes, so drop it */
3240 if (tp1->data != NULL) {
3241 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3242 SCTP_SO_NOT_LOCKED);
3244 /* Make sure to flag we had a FR */
3245 tp1->whoTo->net_ack++;
3250 * SCTP_PRINTF("OK, we are now ready to FR this
3253 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3254 sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count,
3258 /* This is a subsequent FR */
3259 SCTP_STAT_INCR(sctps_sendmultfastretrans);
3261 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3262 if (asoc->sctp_cmt_on_off > 0) {
3264 * CMT: Using RTX_SSTHRESH policy for CMT.
3265 * If CMT is being used, then pick dest with
3266 * largest ssthresh for any retransmission.
3268 tp1->no_fr_allowed = 1;
3270 /* sa_ignore NO_NULL_CHK */
3271 if (asoc->sctp_cmt_pf > 0) {
3273 * JRS 5/18/07 - If CMT PF is on,
3274 * use the PF version of
3277 alt = sctp_find_alternate_net(stcb, alt, 2);
3280 * JRS 5/18/07 - If only CMT is on,
3281 * use the CMT version of
3284 /* sa_ignore NO_NULL_CHK */
3285 alt = sctp_find_alternate_net(stcb, alt, 1);
3291 * CUCv2: If a different dest is picked for
3292 * the retransmission, then new
3293 * (rtx-)pseudo_cumack needs to be tracked
3294 * for orig dest. Let CUCv2 track new (rtx-)
3295 * pseudo-cumack always.
3298 tp1->whoTo->find_pseudo_cumack = 1;
3299 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3301 } else {/* CMT is OFF */
3303 #ifdef SCTP_FR_TO_ALTERNATE
3304 /* Can we find an alternate? */
3305 alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3308 * default behavior is to NOT retransmit
3309 * FR's to an alternate. Armando Caro's
3310 * paper details why.
3316 tp1->rec.data.doing_fast_retransmit = 1;
3318 /* mark the sending seq for possible subsequent FR's */
3320 * SCTP_PRINTF("Marking TSN for FR new value %x\n",
3321 * (uint32_t)tpi->rec.data.TSN_seq);
3323 if (TAILQ_EMPTY(&asoc->send_queue)) {
3325 * If the queue of send is empty then its
3326 * the next sequence number that will be
3327 * assigned so we subtract one from this to
3328 * get the one we last sent.
3330 tp1->rec.data.fast_retran_tsn = sending_seq;
3333 * If there are chunks on the send queue
3334 * (unsent data that has made it from the
3335 * stream queues but not out the door, we
3336 * take the first one (which will have the
3337 * lowest TSN) and subtract one to get the
3340 struct sctp_tmit_chunk *ttt;
3342 ttt = TAILQ_FIRST(&asoc->send_queue);
3343 tp1->rec.data.fast_retran_tsn =
3344 ttt->rec.data.TSN_seq;
3349 * this guy had a RTO calculation pending on
3352 if ((tp1->whoTo != NULL) &&
3353 (tp1->whoTo->rto_needed == 0)) {
3354 tp1->whoTo->rto_needed = 1;
3358 if (alt != tp1->whoTo) {
3359 /* yes, there is an alternate. */
3360 sctp_free_remote_addr(tp1->whoTo);
3361 /* sa_ignore FREED_MEMORY */
3363 atomic_add_int(&alt->ref_count, 1);
3369 struct sctp_tmit_chunk *
3370 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3371 struct sctp_association *asoc)
3373 struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3377 if (asoc->peer_supports_prsctp == 0) {
3380 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3381 if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3382 tp1->sent != SCTP_DATAGRAM_RESEND &&
3383 tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3384 /* no chance to advance, out of here */
3387 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3388 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3389 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3390 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3391 asoc->advanced_peer_ack_point,
3392 tp1->rec.data.TSN_seq, 0, 0);
3395 if (!PR_SCTP_ENABLED(tp1->flags)) {
3397 * We can't fwd-tsn past any that are reliable aka
3398 * retransmitted until the asoc fails.
3403 (void)SCTP_GETTIME_TIMEVAL(&now);
3407 * now we got a chunk which is marked for another
3408 * retransmission to a PR-stream but has run out its chances
3409 * already maybe OR has been marked to skip now. Can we skip
3410 * it if its a resend?
3412 if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3413 (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3415 * Now is this one marked for resend and its time is
3418 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3419 /* Yes so drop it */
3421 (void)sctp_release_pr_sctp_chunk(stcb, tp1,
3422 1, SCTP_SO_NOT_LOCKED);
3426 * No, we are done when hit one for resend
3427 * whos time as not expired.
3433 * Ok now if this chunk is marked to drop it we can clean up
3434 * the chunk, advance our peer ack point and we can check
3437 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3438 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3439 /* advance PeerAckPoint goes forward */
3440 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->advanced_peer_ack_point)) {
3441 asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq;
3443 } else if (tp1->rec.data.TSN_seq == asoc->advanced_peer_ack_point) {
3444 /* No update but we do save the chk */
3449 * If it is still in RESEND we can advance no
3459 sctp_fs_audit(struct sctp_association *asoc)
3461 struct sctp_tmit_chunk *chk;
3462 int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3463 int entry_flight, entry_cnt, ret;
3465 entry_flight = asoc->total_flight;
3466 entry_cnt = asoc->total_flight_count;
3469 if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3472 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3473 if (chk->sent < SCTP_DATAGRAM_RESEND) {
3474 SCTP_PRINTF("Chk TSN:%u size:%d inflight cnt:%d\n",
3475 chk->rec.data.TSN_seq,
3479 } else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3481 } else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3483 } else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3490 if ((inflight > 0) || (inbetween > 0)) {
3492 panic("Flight size-express incorrect? \n");
3494 SCTP_PRINTF("asoc->total_flight:%d cnt:%d\n",
3495 entry_flight, entry_cnt);
3497 SCTP_PRINTF("Flight size-express incorrect F:%d I:%d R:%d Ab:%d ACK:%d\n",
3498 inflight, inbetween, resend, above, acked);
3507 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3508 struct sctp_association *asoc,
3509 struct sctp_tmit_chunk *tp1)
3511 tp1->window_probe = 0;
3512 if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3513 /* TSN's skipped we do NOT move back. */
3514 sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3515 tp1->whoTo->flight_size,
3517 (uintptr_t) tp1->whoTo,
3518 tp1->rec.data.TSN_seq);
3521 /* First setup this by shrinking flight */
3522 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3523 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3526 sctp_flight_size_decrease(tp1);
3527 sctp_total_flight_decrease(stcb, tp1);
3528 /* Now mark for resend */
3529 tp1->sent = SCTP_DATAGRAM_RESEND;
3530 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3532 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3533 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3534 tp1->whoTo->flight_size,
3536 (uintptr_t) tp1->whoTo,
3537 tp1->rec.data.TSN_seq);
3542 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3543 uint32_t rwnd, int *abort_now, int ecne_seen)
3545 struct sctp_nets *net;
3546 struct sctp_association *asoc;
3547 struct sctp_tmit_chunk *tp1, *tp2;
3549 int win_probe_recovery = 0;
3550 int win_probe_recovered = 0;
3551 int j, done_once = 0;
3554 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3555 sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3556 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3558 SCTP_TCB_LOCK_ASSERT(stcb);
3559 #ifdef SCTP_ASOCLOG_OF_TSNS
3560 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3561 stcb->asoc.cumack_log_at++;
3562 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3563 stcb->asoc.cumack_log_at = 0;
3567 old_rwnd = asoc->peers_rwnd;
3568 if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3571 } else if (asoc->last_acked_seq == cumack) {
3572 /* Window update sack */
3573 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3574 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3575 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3576 /* SWS sender side engages */
3577 asoc->peers_rwnd = 0;
3579 if (asoc->peers_rwnd > old_rwnd) {
3584 /* First setup for CC stuff */
3585 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3586 if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
3587 /* Drag along the window_tsn for cwr's */
3588 net->cwr_window_tsn = cumack;
3590 net->prev_cwnd = net->cwnd;
3595 * CMT: Reset CUC and Fast recovery algo variables before
3598 net->new_pseudo_cumack = 0;
3599 net->will_exit_fast_recovery = 0;
3600 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
3601 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
3604 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
3607 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3608 tp1 = TAILQ_LAST(&asoc->sent_queue,
3609 sctpchunk_listhead);
3610 send_s = tp1->rec.data.TSN_seq + 1;
3612 send_s = asoc->sending_seq;
3614 if (SCTP_TSN_GE(cumack, send_s)) {
3616 struct mbuf *op_err;
3617 char msg[SCTP_DIAG_INFO_LEN];
3621 panic("Impossible sack 1");
3626 snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal then TSN %8.8x",
3628 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
3629 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
3630 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
3635 asoc->this_sack_highest_gap = cumack;
3636 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
3637 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
3638 stcb->asoc.overall_error_count,
3640 SCTP_FROM_SCTP_INDATA,
3643 stcb->asoc.overall_error_count = 0;
3644 if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
3645 /* process the new consecutive TSN first */
3646 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3647 if (SCTP_TSN_GE(cumack, tp1->rec.data.TSN_seq)) {
3648 if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
3649 SCTP_PRINTF("Warning, an unsent is now acked?\n");
3651 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
3653 * If it is less than ACKED, it is
3654 * now no-longer in flight. Higher
3655 * values may occur during marking
3657 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3658 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3659 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
3660 tp1->whoTo->flight_size,
3662 (uintptr_t) tp1->whoTo,
3663 tp1->rec.data.TSN_seq);
3665 sctp_flight_size_decrease(tp1);
3666 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3667 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3670 /* sa_ignore NO_NULL_CHK */
3671 sctp_total_flight_decrease(stcb, tp1);
3673 tp1->whoTo->net_ack += tp1->send_size;
3674 if (tp1->snd_count < 2) {
3676 * True non-retransmited
3679 tp1->whoTo->net_ack2 +=
3682 /* update RTO too? */
3691 sctp_calculate_rto(stcb,
3693 &tp1->sent_rcv_time,
3694 sctp_align_safe_nocopy,
3695 SCTP_RTT_FROM_DATA);
3698 if (tp1->whoTo->rto_needed == 0) {
3699 tp1->whoTo->rto_needed = 1;
3705 * CMT: CUCv2 algorithm. From the
3706 * cumack'd TSNs, for each TSN being
3707 * acked for the first time, set the
3708 * following variables for the
3709 * corresp destination.
3710 * new_pseudo_cumack will trigger a
3712 * find_(rtx_)pseudo_cumack will
3713 * trigger search for the next
3714 * expected (rtx-)pseudo-cumack.
3716 tp1->whoTo->new_pseudo_cumack = 1;
3717 tp1->whoTo->find_pseudo_cumack = 1;
3718 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3720 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3721 /* sa_ignore NO_NULL_CHK */
3722 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
3725 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3726 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
3728 if (tp1->rec.data.chunk_was_revoked) {
3729 /* deflate the cwnd */
3730 tp1->whoTo->cwnd -= tp1->book_size;
3731 tp1->rec.data.chunk_was_revoked = 0;
3733 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3734 if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
3735 asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--;
3738 panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
3742 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
3744 /* sa_ignore NO_NULL_CHK */
3745 sctp_free_bufspace(stcb, asoc, tp1, 1);
3746 sctp_m_freem(tp1->data);
3749 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3750 sctp_log_sack(asoc->last_acked_seq,
3752 tp1->rec.data.TSN_seq,
3755 SCTP_LOG_FREE_SENT);
3757 asoc->sent_queue_cnt--;
3758 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
3765 /* sa_ignore NO_NULL_CHK */
3766 if (stcb->sctp_socket) {
3767 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3771 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
3772 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
3773 /* sa_ignore NO_NULL_CHK */
3774 sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
3776 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3777 so = SCTP_INP_SO(stcb->sctp_ep);
3778 atomic_add_int(&stcb->asoc.refcnt, 1);
3779 SCTP_TCB_UNLOCK(stcb);
3780 SCTP_SOCKET_LOCK(so, 1);
3781 SCTP_TCB_LOCK(stcb);
3782 atomic_subtract_int(&stcb->asoc.refcnt, 1);
3783 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
3784 /* assoc was freed while we were unlocked */
3785 SCTP_SOCKET_UNLOCK(so, 1);
3789 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
3790 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
3791 SCTP_SOCKET_UNLOCK(so, 1);
3794 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
3795 sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
3799 /* JRS - Use the congestion control given in the CC module */
3800 if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
3801 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3802 if (net->net_ack2 > 0) {
3804 * Karn's rule applies to clearing error
3805 * count, this is optional.
3807 net->error_count = 0;
3808 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
3809 /* addr came good */
3810 net->dest_state |= SCTP_ADDR_REACHABLE;
3811 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
3812 0, (void *)net, SCTP_SO_NOT_LOCKED);
3814 if (net == stcb->asoc.primary_destination) {
3815 if (stcb->asoc.alternate) {
3817 * release the alternate,
3820 sctp_free_remote_addr(stcb->asoc.alternate);
3821 stcb->asoc.alternate = NULL;
3824 if (net->dest_state & SCTP_ADDR_PF) {
3825 net->dest_state &= ~SCTP_ADDR_PF;
3826 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_3);
3827 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
3828 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
3829 /* Done with this net */
3832 /* restore any doubled timers */
3833 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
3834 if (net->RTO < stcb->asoc.minrto) {
3835 net->RTO = stcb->asoc.minrto;
3837 if (net->RTO > stcb->asoc.maxrto) {
3838 net->RTO = stcb->asoc.maxrto;
3842 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
3844 asoc->last_acked_seq = cumack;
3846 if (TAILQ_EMPTY(&asoc->sent_queue)) {
3847 /* nothing left in-flight */
3848 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3849 net->flight_size = 0;
3850 net->partial_bytes_acked = 0;
3852 asoc->total_flight = 0;
3853 asoc->total_flight_count = 0;
3856 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3857 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3858 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3859 /* SWS sender side engages */
3860 asoc->peers_rwnd = 0;
3862 if (asoc->peers_rwnd > old_rwnd) {
3863 win_probe_recovery = 1;
3865 /* Now assure a timer where data is queued at */
3868 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3871 if (win_probe_recovery && (net->window_probe)) {
3872 win_probe_recovered = 1;
3874 * Find first chunk that was used with window probe
3875 * and clear the sent
3877 /* sa_ignore FREED_MEMORY */
3878 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3879 if (tp1->window_probe) {
3880 /* move back to data send queue */
3881 sctp_window_probe_recovery(stcb, asoc, tp1);
3886 if (net->RTO == 0) {
3887 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
3889 to_ticks = MSEC_TO_TICKS(net->RTO);
3891 if (net->flight_size) {
3893 (void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
3894 sctp_timeout_handler, &net->rxt_timer);
3895 if (net->window_probe) {
3896 net->window_probe = 0;
3899 if (net->window_probe) {
3901 * In window probes we must assure a timer
3902 * is still running there
3904 net->window_probe = 0;
3905 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
3906 SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
3907 sctp_timeout_handler, &net->rxt_timer);
3909 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
3910 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
3912 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
3917 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
3918 (asoc->sent_queue_retran_cnt == 0) &&
3919 (win_probe_recovered == 0) &&
3922 * huh, this should not happen unless all packets are
3923 * PR-SCTP and marked to skip of course.
3925 if (sctp_fs_audit(asoc)) {
3926 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3927 net->flight_size = 0;
3929 asoc->total_flight = 0;
3930 asoc->total_flight_count = 0;
3931 asoc->sent_queue_retran_cnt = 0;
3932 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3933 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3934 sctp_flight_size_increase(tp1);
3935 sctp_total_flight_increase(stcb, tp1);
3936 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3937 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3944 /**********************************/
3945 /* Now what about shutdown issues */
3946 /**********************************/
3947 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
3948 /* nothing left on sendqueue.. consider done */
3950 if ((asoc->stream_queue_cnt == 1) &&
3951 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
3952 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
3953 (asoc->locked_on_sending)
3955 struct sctp_stream_queue_pending *sp;
3958 * I may be in a state where we got all across.. but
3959 * cannot write more due to a shutdown... we abort
3960 * since the user did not indicate EOR in this case.
3961 * The sp will be cleaned during free of the asoc.
3963 sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
3965 if ((sp) && (sp->length == 0)) {
3966 /* Let cleanup code purge it */
3967 if (sp->msg_is_complete) {
3968 asoc->stream_queue_cnt--;
3970 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
3971 asoc->locked_on_sending = NULL;
3972 asoc->stream_queue_cnt--;
3976 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
3977 (asoc->stream_queue_cnt == 0)) {
3978 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
3979 /* Need to abort here */
3980 struct mbuf *op_err;
3985 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
3986 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
3987 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
3989 struct sctp_nets *netp;
3991 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
3992 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
3993 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
3995 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
3996 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
3997 sctp_stop_timers_for_shutdown(stcb);
3998 if (asoc->alternate) {
3999 netp = asoc->alternate;
4001 netp = asoc->primary_destination;
4003 sctp_send_shutdown(stcb, netp);
4004 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4005 stcb->sctp_ep, stcb, netp);
4006 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4007 stcb->sctp_ep, stcb, netp);
4009 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4010 (asoc->stream_queue_cnt == 0)) {
4011 struct sctp_nets *netp;
4013 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4016 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4017 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4018 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4019 sctp_stop_timers_for_shutdown(stcb);
4020 if (asoc->alternate) {
4021 netp = asoc->alternate;
4023 netp = asoc->primary_destination;
4025 sctp_send_shutdown_ack(stcb, netp);
4026 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4027 stcb->sctp_ep, stcb, netp);
4030 /*********************************************/
4031 /* Here we perform PR-SCTP procedures */
4033 /*********************************************/
4034 /* C1. update advancedPeerAckPoint */
4035 if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4036 asoc->advanced_peer_ack_point = cumack;
4038 /* PR-Sctp issues need to be addressed too */
4039 if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
4040 struct sctp_tmit_chunk *lchk;
4041 uint32_t old_adv_peer_ack_point;
4043 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4044 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4045 /* C3. See if we need to send a Fwd-TSN */
4046 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4048 * ISSUE with ECN, see FWD-TSN processing.
4050 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4051 send_forward_tsn(stcb, asoc);
4053 /* try to FR fwd-tsn's that get lost too */
4054 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4055 send_forward_tsn(stcb, asoc);
4060 /* Assure a timer is up */
4061 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4062 stcb->sctp_ep, stcb, lchk->whoTo);
4065 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4066 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4068 stcb->asoc.peers_rwnd,
4069 stcb->asoc.total_flight,
4070 stcb->asoc.total_output_queue_size);
4075 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4076 struct sctp_tcb *stcb,
4077 uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4078 int *abort_now, uint8_t flags,
4079 uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4081 struct sctp_association *asoc;
4082 struct sctp_tmit_chunk *tp1, *tp2;
4083 uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4084 uint16_t wake_him = 0;
4085 uint32_t send_s = 0;
4087 int accum_moved = 0;
4088 int will_exit_fast_recovery = 0;
4089 uint32_t a_rwnd, old_rwnd;
4090 int win_probe_recovery = 0;
4091 int win_probe_recovered = 0;
4092 struct sctp_nets *net = NULL;
4095 uint8_t reneged_all = 0;
4096 uint8_t cmt_dac_flag;
4099 * we take any chance we can to service our queues since we cannot
4100 * get awoken when the socket is read from :<
4103 * Now perform the actual SACK handling: 1) Verify that it is not an
4104 * old sack, if so discard. 2) If there is nothing left in the send
4105 * queue (cum-ack is equal to last acked) then you have a duplicate
4106 * too, update any rwnd change and verify no timers are running.
4107 * then return. 3) Process any new consequtive data i.e. cum-ack
4108 * moved process these first and note that it moved. 4) Process any
4109 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4110 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4111 * sync up flightsizes and things, stop all timers and also check
4112 * for shutdown_pending state. If so then go ahead and send off the
4113 * shutdown. If in shutdown recv, send off the shutdown-ack and
4114 * start that timer, Ret. 9) Strike any non-acked things and do FR
4115 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4116 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4117 * if in shutdown_recv state.
4119 SCTP_TCB_LOCK_ASSERT(stcb);
4121 this_sack_lowest_newack = 0;
4122 SCTP_STAT_INCR(sctps_slowpath_sack);
4124 cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4125 #ifdef SCTP_ASOCLOG_OF_TSNS
4126 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4127 stcb->asoc.cumack_log_at++;
4128 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4129 stcb->asoc.cumack_log_at = 0;
4134 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4135 sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4136 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4138 old_rwnd = stcb->asoc.peers_rwnd;
4139 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4140 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4141 stcb->asoc.overall_error_count,
4143 SCTP_FROM_SCTP_INDATA,
4146 stcb->asoc.overall_error_count = 0;
4148 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4149 sctp_log_sack(asoc->last_acked_seq,
4156 if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4158 uint32_t *dupdata, dblock;
4160 for (i = 0; i < num_dup; i++) {
4161 dupdata = (uint32_t *) sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4162 sizeof(uint32_t), (uint8_t *) & dblock);
4163 if (dupdata == NULL) {
4166 sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4169 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4171 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4172 tp1 = TAILQ_LAST(&asoc->sent_queue,
4173 sctpchunk_listhead);
4174 send_s = tp1->rec.data.TSN_seq + 1;
4177 send_s = asoc->sending_seq;
4179 if (SCTP_TSN_GE(cum_ack, send_s)) {
4180 struct mbuf *op_err;
4181 char msg[SCTP_DIAG_INFO_LEN];
4184 * no way, we have not even sent this TSN out yet.
4185 * Peer is hopelessly messed up with us.
4187 SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4190 SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1:%p\n",
4191 tp1->rec.data.TSN_seq, (void *)tp1);
4196 snprintf(msg, sizeof(msg), "Cum ack %8.8x greater or equal then TSN %8.8x",
4198 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
4199 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4200 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4204 /**********************/
4205 /* 1) check the range */
4206 /**********************/
4207 if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4208 /* acking something behind */
4211 /* update the Rwnd of the peer */
4212 if (TAILQ_EMPTY(&asoc->sent_queue) &&
4213 TAILQ_EMPTY(&asoc->send_queue) &&
4214 (asoc->stream_queue_cnt == 0)) {
4215 /* nothing left on send/sent and strmq */
4216 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4217 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4218 asoc->peers_rwnd, 0, 0, a_rwnd);
4220 asoc->peers_rwnd = a_rwnd;
4221 if (asoc->sent_queue_retran_cnt) {
4222 asoc->sent_queue_retran_cnt = 0;
4224 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4225 /* SWS sender side engages */
4226 asoc->peers_rwnd = 0;
4228 /* stop any timers */
4229 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4230 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4231 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4232 net->partial_bytes_acked = 0;
4233 net->flight_size = 0;
4235 asoc->total_flight = 0;
4236 asoc->total_flight_count = 0;
4240 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4241 * things. The total byte count acked is tracked in netAckSz AND
4242 * netAck2 is used to track the total bytes acked that are un-
4243 * amibguious and were never retransmitted. We track these on a per
4244 * destination address basis.
4246 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4247 if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4248 /* Drag along the window_tsn for cwr's */
4249 net->cwr_window_tsn = cum_ack;
4251 net->prev_cwnd = net->cwnd;
4256 * CMT: Reset CUC and Fast recovery algo variables before
4259 net->new_pseudo_cumack = 0;
4260 net->will_exit_fast_recovery = 0;
4261 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4262 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4265 /* process the new consecutive TSN first */
4266 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4267 if (SCTP_TSN_GE(last_tsn, tp1->rec.data.TSN_seq)) {
4268 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4270 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4272 * If it is less than ACKED, it is
4273 * now no-longer in flight. Higher
4274 * values may occur during marking
4276 if ((tp1->whoTo->dest_state &
4277 SCTP_ADDR_UNCONFIRMED) &&
4278 (tp1->snd_count < 2)) {
4280 * If there was no retran
4281 * and the address is
4282 * un-confirmed and we sent
4284 * sacked.. its confirmed,
4287 tp1->whoTo->dest_state &=
4288 ~SCTP_ADDR_UNCONFIRMED;
4290 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4291 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4292 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4293 tp1->whoTo->flight_size,
4295 (uintptr_t) tp1->whoTo,
4296 tp1->rec.data.TSN_seq);
4298 sctp_flight_size_decrease(tp1);
4299 sctp_total_flight_decrease(stcb, tp1);
4300 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4301 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4305 tp1->whoTo->net_ack += tp1->send_size;
4307 /* CMT SFR and DAC algos */
4308 this_sack_lowest_newack = tp1->rec.data.TSN_seq;
4309 tp1->whoTo->saw_newack = 1;
4311 if (tp1->snd_count < 2) {
4313 * True non-retransmited
4316 tp1->whoTo->net_ack2 +=
4319 /* update RTO too? */
4323 sctp_calculate_rto(stcb,
4325 &tp1->sent_rcv_time,
4326 sctp_align_safe_nocopy,
4327 SCTP_RTT_FROM_DATA);
4330 if (tp1->whoTo->rto_needed == 0) {
4331 tp1->whoTo->rto_needed = 1;
4337 * CMT: CUCv2 algorithm. From the
4338 * cumack'd TSNs, for each TSN being
4339 * acked for the first time, set the
4340 * following variables for the
4341 * corresp destination.
4342 * new_pseudo_cumack will trigger a
4344 * find_(rtx_)pseudo_cumack will
4345 * trigger search for the next
4346 * expected (rtx-)pseudo-cumack.
4348 tp1->whoTo->new_pseudo_cumack = 1;
4349 tp1->whoTo->find_pseudo_cumack = 1;
4350 tp1->whoTo->find_rtx_pseudo_cumack = 1;
4353 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4354 sctp_log_sack(asoc->last_acked_seq,
4356 tp1->rec.data.TSN_seq,
4359 SCTP_LOG_TSN_ACKED);
4361 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4362 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
4365 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4366 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4367 #ifdef SCTP_AUDITING_ENABLED
4368 sctp_audit_log(0xB3,
4369 (asoc->sent_queue_retran_cnt & 0x000000ff));
4372 if (tp1->rec.data.chunk_was_revoked) {
4373 /* deflate the cwnd */
4374 tp1->whoTo->cwnd -= tp1->book_size;
4375 tp1->rec.data.chunk_was_revoked = 0;
4377 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4378 tp1->sent = SCTP_DATAGRAM_ACKED;
4385 biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4386 /* always set this up to cum-ack */
4387 asoc->this_sack_highest_gap = last_tsn;
4389 if ((num_seg > 0) || (num_nr_seg > 0)) {
4392 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4393 * to be greater than the cumack. Also reset saw_newack to 0
4396 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4397 net->saw_newack = 0;
4398 net->this_sack_highest_newack = last_tsn;
4402 * thisSackHighestGap will increase while handling NEW
4403 * segments this_sack_highest_newack will increase while
4404 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4405 * used for CMT DAC algo. saw_newack will also change.
4407 if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4408 &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4409 num_seg, num_nr_seg, &rto_ok)) {
4412 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4414 * validate the biggest_tsn_acked in the gap acks if
4415 * strict adherence is wanted.
4417 if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4419 * peer is either confused or we are under
4420 * attack. We must abort.
4422 SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4423 biggest_tsn_acked, send_s);
4428 /*******************************************/
4429 /* cancel ALL T3-send timer if accum moved */
4430 /*******************************************/
4431 if (asoc->sctp_cmt_on_off > 0) {
4432 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4433 if (net->new_pseudo_cumack)
4434 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4436 SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4441 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4442 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4443 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
4447 /********************************************/
4448 /* drop the acked chunks from the sentqueue */
4449 /********************************************/
4450 asoc->last_acked_seq = cum_ack;
4452 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4453 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cum_ack)) {
4456 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4457 if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
4458 asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--;
4461 panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
4465 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4466 if (PR_SCTP_ENABLED(tp1->flags)) {
4467 if (asoc->pr_sctp_cnt != 0)
4468 asoc->pr_sctp_cnt--;
4470 asoc->sent_queue_cnt--;
4472 /* sa_ignore NO_NULL_CHK */
4473 sctp_free_bufspace(stcb, asoc, tp1, 1);
4474 sctp_m_freem(tp1->data);
4476 if (asoc->peer_supports_prsctp && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4477 asoc->sent_queue_cnt_removeable--;
4480 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4481 sctp_log_sack(asoc->last_acked_seq,
4483 tp1->rec.data.TSN_seq,
4486 SCTP_LOG_FREE_SENT);
4488 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4491 if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4493 panic("Warning flight size is postive and should be 0");
4495 SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4496 asoc->total_flight);
4498 asoc->total_flight = 0;
4500 /* sa_ignore NO_NULL_CHK */
4501 if ((wake_him) && (stcb->sctp_socket)) {
4502 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4506 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4507 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4508 sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
4510 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4511 so = SCTP_INP_SO(stcb->sctp_ep);
4512 atomic_add_int(&stcb->asoc.refcnt, 1);
4513 SCTP_TCB_UNLOCK(stcb);
4514 SCTP_SOCKET_LOCK(so, 1);
4515 SCTP_TCB_LOCK(stcb);
4516 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4517 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4518 /* assoc was freed while we were unlocked */
4519 SCTP_SOCKET_UNLOCK(so, 1);
4523 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4524 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4525 SCTP_SOCKET_UNLOCK(so, 1);
4528 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4529 sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
4533 if (asoc->fast_retran_loss_recovery && accum_moved) {
4534 if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4535 /* Setup so we will exit RFC2582 fast recovery */
4536 will_exit_fast_recovery = 1;
4540 * Check for revoked fragments:
4542 * if Previous sack - Had no frags then we can't have any revoked if
4543 * Previous sack - Had frag's then - If we now have frags aka
4544 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4545 * some of them. else - The peer revoked all ACKED fragments, since
4546 * we had some before and now we have NONE.
4550 sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4551 asoc->saw_sack_with_frags = 1;
4552 } else if (asoc->saw_sack_with_frags) {
4553 int cnt_revoked = 0;
4555 /* Peer revoked all dg's marked or acked */
4556 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4557 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4558 tp1->sent = SCTP_DATAGRAM_SENT;
4559 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4560 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4561 tp1->whoTo->flight_size,
4563 (uintptr_t) tp1->whoTo,
4564 tp1->rec.data.TSN_seq);
4566 sctp_flight_size_increase(tp1);
4567 sctp_total_flight_increase(stcb, tp1);
4568 tp1->rec.data.chunk_was_revoked = 1;
4570 * To ensure that this increase in
4571 * flightsize, which is artificial, does not
4572 * throttle the sender, we also increase the
4573 * cwnd artificially.
4575 tp1->whoTo->cwnd += tp1->book_size;
4582 asoc->saw_sack_with_frags = 0;
4585 asoc->saw_sack_with_nr_frags = 1;
4587 asoc->saw_sack_with_nr_frags = 0;
4589 /* JRS - Use the congestion control given in the CC module */
4590 if (ecne_seen == 0) {
4591 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4592 if (net->net_ack2 > 0) {
4594 * Karn's rule applies to clearing error
4595 * count, this is optional.
4597 net->error_count = 0;
4598 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4599 /* addr came good */
4600 net->dest_state |= SCTP_ADDR_REACHABLE;
4601 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4602 0, (void *)net, SCTP_SO_NOT_LOCKED);
4604 if (net == stcb->asoc.primary_destination) {
4605 if (stcb->asoc.alternate) {
4607 * release the alternate,
4610 sctp_free_remote_addr(stcb->asoc.alternate);
4611 stcb->asoc.alternate = NULL;
4614 if (net->dest_state & SCTP_ADDR_PF) {
4615 net->dest_state &= ~SCTP_ADDR_PF;
4616 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_3);
4617 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4618 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4619 /* Done with this net */
4622 /* restore any doubled timers */
4623 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4624 if (net->RTO < stcb->asoc.minrto) {
4625 net->RTO = stcb->asoc.minrto;
4627 if (net->RTO > stcb->asoc.maxrto) {
4628 net->RTO = stcb->asoc.maxrto;
4632 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4634 if (TAILQ_EMPTY(&asoc->sent_queue)) {
4635 /* nothing left in-flight */
4636 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4637 /* stop all timers */
4638 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4639 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4640 net->flight_size = 0;
4641 net->partial_bytes_acked = 0;
4643 asoc->total_flight = 0;
4644 asoc->total_flight_count = 0;
4646 /**********************************/
4647 /* Now what about shutdown issues */
4648 /**********************************/
4649 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4650 /* nothing left on sendqueue.. consider done */
4651 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4652 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4653 asoc->peers_rwnd, 0, 0, a_rwnd);
4655 asoc->peers_rwnd = a_rwnd;
4656 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4657 /* SWS sender side engages */
4658 asoc->peers_rwnd = 0;
4661 if ((asoc->stream_queue_cnt == 1) &&
4662 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4663 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4664 (asoc->locked_on_sending)
4666 struct sctp_stream_queue_pending *sp;
4669 * I may be in a state where we got all across.. but
4670 * cannot write more due to a shutdown... we abort
4671 * since the user did not indicate EOR in this case.
4673 sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4675 if ((sp) && (sp->length == 0)) {
4676 asoc->locked_on_sending = NULL;
4677 if (sp->msg_is_complete) {
4678 asoc->stream_queue_cnt--;
4680 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4681 asoc->stream_queue_cnt--;
4685 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4686 (asoc->stream_queue_cnt == 0)) {
4687 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4688 /* Need to abort here */
4689 struct mbuf *op_err;
4694 op_err = sctp_generate_cause(SCTP_CAUSE_USER_INITIATED_ABT, "");
4695 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31;
4696 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
4699 struct sctp_nets *netp;
4701 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4702 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4703 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4705 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4706 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4707 sctp_stop_timers_for_shutdown(stcb);
4708 if (asoc->alternate) {
4709 netp = asoc->alternate;
4711 netp = asoc->primary_destination;
4713 sctp_send_shutdown(stcb, netp);
4714 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4715 stcb->sctp_ep, stcb, netp);
4716 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4717 stcb->sctp_ep, stcb, netp);
4720 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4721 (asoc->stream_queue_cnt == 0)) {
4722 struct sctp_nets *netp;
4724 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4727 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4728 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4729 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4730 sctp_stop_timers_for_shutdown(stcb);
4731 if (asoc->alternate) {
4732 netp = asoc->alternate;
4734 netp = asoc->primary_destination;
4736 sctp_send_shutdown_ack(stcb, netp);
4737 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4738 stcb->sctp_ep, stcb, netp);
4743 * Now here we are going to recycle net_ack for a different use...
4746 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4751 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
4752 * to be done. Setting this_sack_lowest_newack to the cum_ack will
4753 * automatically ensure that.
4755 if ((asoc->sctp_cmt_on_off > 0) &&
4756 SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
4757 (cmt_dac_flag == 0)) {
4758 this_sack_lowest_newack = cum_ack;
4760 if ((num_seg > 0) || (num_nr_seg > 0)) {
4761 sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
4762 biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
4764 /* JRS - Use the congestion control given in the CC module */
4765 asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
4767 /* Now are we exiting loss recovery ? */
4768 if (will_exit_fast_recovery) {
4769 /* Ok, we must exit fast recovery */
4770 asoc->fast_retran_loss_recovery = 0;
4772 if ((asoc->sat_t3_loss_recovery) &&
4773 SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
4774 /* end satellite t3 loss recovery */
4775 asoc->sat_t3_loss_recovery = 0;
4780 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4781 if (net->will_exit_fast_recovery) {
4782 /* Ok, we must exit fast recovery */
4783 net->fast_retran_loss_recovery = 0;
4787 /* Adjust and set the new rwnd value */
4788 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4789 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4790 asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
4792 asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
4793 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4794 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4795 /* SWS sender side engages */
4796 asoc->peers_rwnd = 0;
4798 if (asoc->peers_rwnd > old_rwnd) {
4799 win_probe_recovery = 1;
4802 * Now we must setup so we have a timer up for anyone with
4808 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4809 if (win_probe_recovery && (net->window_probe)) {
4810 win_probe_recovered = 1;
4812 * Find first chunk that was used with
4813 * window probe and clear the event. Put
4814 * it back into the send queue as if has
4817 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4818 if (tp1->window_probe) {
4819 sctp_window_probe_recovery(stcb, asoc, tp1);
4824 if (net->flight_size) {
4826 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4827 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4828 stcb->sctp_ep, stcb, net);
4830 if (net->window_probe) {
4831 net->window_probe = 0;
4834 if (net->window_probe) {
4836 * In window probes we must assure a timer
4837 * is still running there
4839 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4840 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4841 stcb->sctp_ep, stcb, net);
4844 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4845 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4847 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4852 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4853 (asoc->sent_queue_retran_cnt == 0) &&
4854 (win_probe_recovered == 0) &&
4857 * huh, this should not happen unless all packets are
4858 * PR-SCTP and marked to skip of course.
4860 if (sctp_fs_audit(asoc)) {
4861 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4862 net->flight_size = 0;
4864 asoc->total_flight = 0;
4865 asoc->total_flight_count = 0;
4866 asoc->sent_queue_retran_cnt = 0;
4867 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4868 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4869 sctp_flight_size_increase(tp1);
4870 sctp_total_flight_increase(stcb, tp1);
4871 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4872 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4879 /*********************************************/
4880 /* Here we perform PR-SCTP procedures */
4882 /*********************************************/
4883 /* C1. update advancedPeerAckPoint */
4884 if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
4885 asoc->advanced_peer_ack_point = cum_ack;
4887 /* C2. try to further move advancedPeerAckPoint ahead */
4888 if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
4889 struct sctp_tmit_chunk *lchk;
4890 uint32_t old_adv_peer_ack_point;
4892 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4893 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4894 /* C3. See if we need to send a Fwd-TSN */
4895 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
4897 * ISSUE with ECN, see FWD-TSN processing.
4899 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
4900 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
4901 0xee, cum_ack, asoc->advanced_peer_ack_point,
4902 old_adv_peer_ack_point);
4904 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4905 send_forward_tsn(stcb, asoc);
4907 /* try to FR fwd-tsn's that get lost too */
4908 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4909 send_forward_tsn(stcb, asoc);
4914 /* Assure a timer is up */
4915 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4916 stcb->sctp_ep, stcb, lchk->whoTo);
4919 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4920 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4922 stcb->asoc.peers_rwnd,
4923 stcb->asoc.total_flight,
4924 stcb->asoc.total_output_queue_size);
4929 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
4932 uint32_t cum_ack, a_rwnd;
4934 cum_ack = ntohl(cp->cumulative_tsn_ack);
4935 /* Arrange so a_rwnd does NOT change */
4936 a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
4938 /* Now call the express sack handling */
4939 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
4943 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
4944 struct sctp_stream_in *strmin)
4946 struct sctp_queued_to_read *ctl, *nctl;
4947 struct sctp_association *asoc;
4951 tt = strmin->last_sequence_delivered;
4953 * First deliver anything prior to and including the stream no that
4956 TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next, nctl) {
4957 if (SCTP_SSN_GE(tt, ctl->sinfo_ssn)) {
4958 /* this is deliverable now */
4959 TAILQ_REMOVE(&strmin->inqueue, ctl, next);
4960 /* subtract pending on streams */
4961 asoc->size_on_all_streams -= ctl->length;
4962 sctp_ucount_decr(asoc->cnt_on_all_streams);
4963 /* deliver it to at least the delivery-q */
4964 if (stcb->sctp_socket) {
4965 sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
4966 sctp_add_to_readq(stcb->sctp_ep, stcb,
4968 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
4971 /* no more delivery now. */
4976 * now we must deliver things in queue the normal way if any are
4979 tt = strmin->last_sequence_delivered + 1;
4980 TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next, nctl) {
4981 if (tt == ctl->sinfo_ssn) {
4982 /* this is deliverable now */
4983 TAILQ_REMOVE(&strmin->inqueue, ctl, next);
4984 /* subtract pending on streams */
4985 asoc->size_on_all_streams -= ctl->length;
4986 sctp_ucount_decr(asoc->cnt_on_all_streams);
4987 /* deliver it to at least the delivery-q */
4988 strmin->last_sequence_delivered = ctl->sinfo_ssn;
4989 if (stcb->sctp_socket) {
4990 sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
4991 sctp_add_to_readq(stcb->sctp_ep, stcb,
4993 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
4996 tt = strmin->last_sequence_delivered + 1;
5004 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5005 struct sctp_association *asoc,
5006 uint16_t stream, uint16_t seq)
5008 struct sctp_tmit_chunk *chk, *nchk;
5010 /* For each one on here see if we need to toss it */
5012 * For now large messages held on the reasmqueue that are complete
5013 * will be tossed too. We could in theory do more work to spin
5014 * through and stop after dumping one msg aka seeing the start of a
5015 * new msg at the head, and call the delivery function... to see if
5016 * it can be delivered... But for now we just dump everything on the
5019 TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
5021 * Do not toss it if on a different stream or marked for
5022 * unordered delivery in which case the stream sequence
5023 * number has no meaning.
5025 if ((chk->rec.data.stream_number != stream) ||
5026 ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == SCTP_DATA_UNORDERED)) {
5029 if (chk->rec.data.stream_seq == seq) {
5030 /* It needs to be tossed */
5031 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5032 if (SCTP_TSN_GT(chk->rec.data.TSN_seq, asoc->tsn_last_delivered)) {
5033 asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
5034 asoc->str_of_pdapi = chk->rec.data.stream_number;
5035 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
5036 asoc->fragment_flags = chk->rec.data.rcv_flags;
5038 asoc->size_on_reasm_queue -= chk->send_size;
5039 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5041 /* Clear up any stream problem */
5042 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != SCTP_DATA_UNORDERED &&
5043 SCTP_SSN_GT(chk->rec.data.stream_seq, asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered)) {
5045 * We must dump forward this streams
5046 * sequence number if the chunk is not
5047 * unordered that is being skipped. There is
5048 * a chance that if the peer does not
5049 * include the last fragment in its FWD-TSN
5050 * we WILL have a problem here since you
5051 * would have a partial chunk in queue that
5052 * may not be deliverable. Also if a Partial
5053 * delivery API as started the user may get
5054 * a partial chunk. The next read returning
5055 * a new chunk... really ugly but I see no
5056 * way around it! Maybe a notify??
5058 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = chk->rec.data.stream_seq;
5061 sctp_m_freem(chk->data);
5064 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5065 } else if (SCTP_SSN_GT(chk->rec.data.stream_seq, seq)) {
5067 * If the stream_seq is > than the purging one, we
5077 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5078 struct sctp_forward_tsn_chunk *fwd,
5079 int *abort_flag, struct mbuf *m, int offset)
5081 /* The pr-sctp fwd tsn */
5083 * here we will perform all the data receiver side steps for
5084 * processing FwdTSN, as required in by pr-sctp draft:
5086 * Assume we get FwdTSN(x):
5088 * 1) update local cumTSN to x 2) try to further advance cumTSN to x +
5089 * others we have 3) examine and update re-ordering queue on
5090 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5091 * report where we are.
5093 struct sctp_association *asoc;
5094 uint32_t new_cum_tsn, gap;
5095 unsigned int i, fwd_sz, m_size;
5097 struct sctp_stream_in *strm;
5098 struct sctp_tmit_chunk *chk, *nchk;
5099 struct sctp_queued_to_read *ctl, *sv;
5102 if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5103 SCTPDBG(SCTP_DEBUG_INDATA1,
5104 "Bad size too small/big fwd-tsn\n");
5107 m_size = (stcb->asoc.mapping_array_size << 3);
5108 /*************************************************************/
5109 /* 1. Here we update local cumTSN and shift the bitmap array */
5110 /*************************************************************/
5111 new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5113 if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5114 /* Already got there ... */
5118 * now we know the new TSN is more advanced, let's find the actual
5121 SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5122 asoc->cumulative_tsn = new_cum_tsn;
5123 if (gap >= m_size) {
5124 if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5125 struct mbuf *op_err;
5126 char msg[SCTP_DIAG_INFO_LEN];
5129 * out of range (of single byte chunks in the rwnd I
5130 * give out). This must be an attacker.
5133 snprintf(msg, sizeof(msg),
5134 "New cum ack %8.8x too high, highest TSN %8.8x",
5135 new_cum_tsn, asoc->highest_tsn_inside_map);
5136 op_err = sctp_generate_cause(SCTP_CAUSE_PROTOCOL_VIOLATION, msg);
5137 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
5138 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
5141 SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5143 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5144 asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5145 asoc->highest_tsn_inside_map = new_cum_tsn;
5147 memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5148 asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5150 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5151 sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5154 SCTP_TCB_LOCK_ASSERT(stcb);
5155 for (i = 0; i <= gap; i++) {
5156 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5157 !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5158 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5159 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5160 asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5165 /*************************************************************/
5166 /* 2. Clear up re-assembly queue */
5167 /*************************************************************/
5169 * First service it if pd-api is up, just in case we can progress it
5172 if (asoc->fragmented_delivery_inprogress) {
5173 sctp_service_reassembly(stcb, asoc);
5175 /* For each one on here see if we need to toss it */
5177 * For now large messages held on the reasmqueue that are complete
5178 * will be tossed too. We could in theory do more work to spin
5179 * through and stop after dumping one msg aka seeing the start of a
5180 * new msg at the head, and call the delivery function... to see if
5181 * it can be delivered... But for now we just dump everything on the
5184 TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
5185 if (SCTP_TSN_GE(new_cum_tsn, chk->rec.data.TSN_seq)) {
5186 /* It needs to be tossed */
5187 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5188 if (SCTP_TSN_GT(chk->rec.data.TSN_seq, asoc->tsn_last_delivered)) {
5189 asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
5190 asoc->str_of_pdapi = chk->rec.data.stream_number;
5191 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
5192 asoc->fragment_flags = chk->rec.data.rcv_flags;
5194 asoc->size_on_reasm_queue -= chk->send_size;
5195 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5197 /* Clear up any stream problem */
5198 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != SCTP_DATA_UNORDERED &&
5199 SCTP_SSN_GT(chk->rec.data.stream_seq, asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered)) {
5201 * We must dump forward this streams
5202 * sequence number if the chunk is not
5203 * unordered that is being skipped. There is
5204 * a chance that if the peer does not
5205 * include the last fragment in its FWD-TSN
5206 * we WILL have a problem here since you
5207 * would have a partial chunk in queue that
5208 * may not be deliverable. Also if a Partial
5209 * delivery API as started the user may get
5210 * a partial chunk. The next read returning
5211 * a new chunk... really ugly but I see no
5212 * way around it! Maybe a notify??
5214 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = chk->rec.data.stream_seq;
5217 sctp_m_freem(chk->data);
5220 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5223 * Ok we have gone beyond the end of the fwd-tsn's
5229 /*******************************************************/
5230 /* 3. Update the PR-stream re-ordering queues and fix */
5231 /* delivery issues as needed. */
5232 /*******************************************************/
5233 fwd_sz -= sizeof(*fwd);
5236 unsigned int num_str;
5237 struct sctp_strseq *stseq, strseqbuf;
5239 offset += sizeof(*fwd);
5241 SCTP_INP_READ_LOCK(stcb->sctp_ep);
5242 num_str = fwd_sz / sizeof(struct sctp_strseq);
5243 for (i = 0; i < num_str; i++) {
5246 stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5247 sizeof(struct sctp_strseq),
5248 (uint8_t *) & strseqbuf);
5249 offset += sizeof(struct sctp_strseq);
5250 if (stseq == NULL) {
5254 st = ntohs(stseq->stream);
5256 st = ntohs(stseq->sequence);
5257 stseq->sequence = st;
5262 * Ok we now look for the stream/seq on the read
5263 * queue where its not all delivered. If we find it
5264 * we transmute the read entry into a PDI_ABORTED.
5266 if (stseq->stream >= asoc->streamincnt) {
5267 /* screwed up streams, stop! */
5270 if ((asoc->str_of_pdapi == stseq->stream) &&
5271 (asoc->ssn_of_pdapi == stseq->sequence)) {
5273 * If this is the one we were partially
5274 * delivering now then we no longer are.
5275 * Note this will change with the reassembly
5278 asoc->fragmented_delivery_inprogress = 0;
5280 sctp_flush_reassm_for_str_seq(stcb, asoc, stseq->stream, stseq->sequence);
5281 TAILQ_FOREACH(ctl, &stcb->sctp_ep->read_queue, next) {
5282 if ((ctl->sinfo_stream == stseq->stream) &&
5283 (ctl->sinfo_ssn == stseq->sequence)) {
5284 str_seq = (stseq->stream << 16) | stseq->sequence;
5286 ctl->pdapi_aborted = 1;
5287 sv = stcb->asoc.control_pdapi;
5288 stcb->asoc.control_pdapi = ctl;
5289 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5291 SCTP_PARTIAL_DELIVERY_ABORTED,
5293 SCTP_SO_NOT_LOCKED);
5294 stcb->asoc.control_pdapi = sv;
5296 } else if ((ctl->sinfo_stream == stseq->stream) &&
5297 SCTP_SSN_GT(ctl->sinfo_ssn, stseq->sequence)) {
5298 /* We are past our victim SSN */
5302 strm = &asoc->strmin[stseq->stream];
5303 if (SCTP_SSN_GT(stseq->sequence, strm->last_sequence_delivered)) {
5304 /* Update the sequence number */
5305 strm->last_sequence_delivered = stseq->sequence;
5307 /* now kick the stream the new way */
5308 /* sa_ignore NO_NULL_CHK */
5309 sctp_kick_prsctp_reorder_queue(stcb, strm);
5311 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5314 * Now slide thing forward.
5316 sctp_slide_mapping_arrays(stcb);
5318 if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
5319 /* now lets kick out and check for more fragmented delivery */
5320 /* sa_ignore NO_NULL_CHK */
5321 sctp_deliver_reasm_check(stcb, &stcb->asoc);