2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
9 * a) Redistributions of source code must retain the above copyright notice,
10 * this list of conditions and the following disclaimer.
12 * b) Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the distribution.
16 * c) Neither the name of Cisco Systems, Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_var.h>
38 #include <netinet/sctp_sysctl.h>
39 #include <netinet/sctp_pcb.h>
40 #include <netinet/sctp_header.h>
41 #include <netinet/sctputil.h>
42 #include <netinet/sctp_output.h>
43 #include <netinet/sctp_input.h>
44 #include <netinet/sctp_indata.h>
45 #include <netinet/sctp_uio.h>
46 #include <netinet/sctp_timer.h>
50 * NOTES: On the outbound side of things I need to check the sack timer to
51 * see if I should generate a sack into the chunk queue (if I have data to
52 * send that is and will be sending it .. for bundling.
54 * The callback in sctp_usrreq.c will get called when the socket is read from.
55 * This will cause sctp_service_queues() to get called on the top entry in
60 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
62 asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
65 /* Calculate what the rwnd would be */
67 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
72 * This is really set wrong with respect to a 1-2-m socket. Since
73 * the sb_cc is the count that everyone as put up. When we re-write
74 * sctp_soreceive then we will fix this so that ONLY this
75 * associations data is taken into account.
77 if (stcb->sctp_socket == NULL)
80 if (stcb->asoc.sb_cc == 0 &&
81 asoc->size_on_reasm_queue == 0 &&
82 asoc->size_on_all_streams == 0) {
83 /* Full rwnd granted */
84 calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
87 /* get actual space */
88 calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
91 * take out what has NOT been put on socket queue and we yet hold
94 calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_reasm_queue +
95 asoc->cnt_on_reasm_queue * MSIZE));
96 calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_all_streams +
97 asoc->cnt_on_all_streams * MSIZE));
103 /* what is the overhead of all these rwnd's */
104 calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
106 * If the window gets too small due to ctrl-stuff, reduce it to 1,
107 * even it is 0. SWS engaged
109 if (calc < stcb->asoc.my_rwnd_control_len) {
118 * Build out our readq entry based on the incoming packet.
120 struct sctp_queued_to_read *
121 sctp_build_readq_entry(struct sctp_tcb *stcb,
122 struct sctp_nets *net,
123 uint32_t tsn, uint32_t ppid,
124 uint32_t context, uint16_t stream_no,
125 uint16_t stream_seq, uint8_t flags,
128 struct sctp_queued_to_read *read_queue_e = NULL;
130 sctp_alloc_a_readq(stcb, read_queue_e);
131 if (read_queue_e == NULL) {
134 read_queue_e->sinfo_stream = stream_no;
135 read_queue_e->sinfo_ssn = stream_seq;
136 read_queue_e->sinfo_flags = (flags << 8);
137 read_queue_e->sinfo_ppid = ppid;
138 read_queue_e->sinfo_context = context;
139 read_queue_e->sinfo_timetolive = 0;
140 read_queue_e->sinfo_tsn = tsn;
141 read_queue_e->sinfo_cumtsn = tsn;
142 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
143 read_queue_e->whoFrom = net;
144 read_queue_e->length = 0;
145 atomic_add_int(&net->ref_count, 1);
146 read_queue_e->data = dm;
147 read_queue_e->spec_flags = 0;
148 read_queue_e->tail_mbuf = NULL;
149 read_queue_e->aux_data = NULL;
150 read_queue_e->stcb = stcb;
151 read_queue_e->port_from = stcb->rport;
152 read_queue_e->do_not_ref_stcb = 0;
153 read_queue_e->end_added = 0;
154 read_queue_e->some_taken = 0;
155 read_queue_e->pdapi_aborted = 0;
157 return (read_queue_e);
162 * Build out our readq entry based on the incoming packet.
164 static struct sctp_queued_to_read *
165 sctp_build_readq_entry_chk(struct sctp_tcb *stcb,
166 struct sctp_tmit_chunk *chk)
168 struct sctp_queued_to_read *read_queue_e = NULL;
170 sctp_alloc_a_readq(stcb, read_queue_e);
171 if (read_queue_e == NULL) {
174 read_queue_e->sinfo_stream = chk->rec.data.stream_number;
175 read_queue_e->sinfo_ssn = chk->rec.data.stream_seq;
176 read_queue_e->sinfo_flags = (chk->rec.data.rcv_flags << 8);
177 read_queue_e->sinfo_ppid = chk->rec.data.payloadtype;
178 read_queue_e->sinfo_context = stcb->asoc.context;
179 read_queue_e->sinfo_timetolive = 0;
180 read_queue_e->sinfo_tsn = chk->rec.data.TSN_seq;
181 read_queue_e->sinfo_cumtsn = chk->rec.data.TSN_seq;
182 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
183 read_queue_e->whoFrom = chk->whoTo;
184 read_queue_e->aux_data = NULL;
185 read_queue_e->length = 0;
186 atomic_add_int(&chk->whoTo->ref_count, 1);
187 read_queue_e->data = chk->data;
188 read_queue_e->tail_mbuf = NULL;
189 read_queue_e->stcb = stcb;
190 read_queue_e->port_from = stcb->rport;
191 read_queue_e->spec_flags = 0;
192 read_queue_e->do_not_ref_stcb = 0;
193 read_queue_e->end_added = 0;
194 read_queue_e->some_taken = 0;
195 read_queue_e->pdapi_aborted = 0;
197 return (read_queue_e);
202 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
204 struct sctp_extrcvinfo *seinfo;
205 struct sctp_sndrcvinfo *outinfo;
206 struct sctp_rcvinfo *rcvinfo;
207 struct sctp_nxtinfo *nxtinfo;
214 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
215 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
216 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
217 /* user does not want any ancillary data */
221 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
222 len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
224 seinfo = (struct sctp_extrcvinfo *)sinfo;
225 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
226 (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
228 len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
232 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
233 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
235 len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
238 len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
244 ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
249 SCTP_BUF_LEN(ret) = 0;
251 /* We need a CMSG header followed by the struct */
252 cmh = mtod(ret, struct cmsghdr *);
253 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
254 cmh->cmsg_level = IPPROTO_SCTP;
255 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
256 cmh->cmsg_type = SCTP_RCVINFO;
257 rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
258 rcvinfo->rcv_sid = sinfo->sinfo_stream;
259 rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
260 rcvinfo->rcv_flags = sinfo->sinfo_flags;
261 rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
262 rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
263 rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
264 rcvinfo->rcv_context = sinfo->sinfo_context;
265 rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
266 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
267 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
270 cmh->cmsg_level = IPPROTO_SCTP;
271 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
272 cmh->cmsg_type = SCTP_NXTINFO;
273 nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
274 nxtinfo->nxt_sid = seinfo->sreinfo_next_stream;
275 nxtinfo->nxt_flags = 0;
276 if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
277 nxtinfo->nxt_flags |= SCTP_UNORDERED;
279 if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
280 nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
282 if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
283 nxtinfo->nxt_flags |= SCTP_COMPLETE;
285 nxtinfo->nxt_ppid = seinfo->sreinfo_next_ppid;
286 nxtinfo->nxt_length = seinfo->sreinfo_next_length;
287 nxtinfo->nxt_assoc_id = seinfo->sreinfo_next_aid;
288 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
289 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
291 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
292 cmh->cmsg_level = IPPROTO_SCTP;
293 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
295 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
296 cmh->cmsg_type = SCTP_EXTRCV;
297 memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
298 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
300 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
301 cmh->cmsg_type = SCTP_SNDRCV;
303 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
311 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
313 uint32_t gap, i, cumackp1;
316 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
319 cumackp1 = asoc->cumulative_tsn + 1;
320 if (SCTP_TSN_GT(cumackp1, tsn)) {
322 * this tsn is behind the cum ack and thus we don't need to
323 * worry about it being moved from one to the other.
327 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
328 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
329 SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn);
330 sctp_print_mapping_array(asoc);
332 panic("Things are really messed up now!!");
335 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
336 SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
337 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
338 asoc->highest_tsn_inside_nr_map = tsn;
340 if (tsn == asoc->highest_tsn_inside_map) {
341 /* We must back down to see what the new highest is */
342 for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
343 SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
344 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
345 asoc->highest_tsn_inside_map = i;
351 asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
358 * We are delivering currently from the reassembly queue. We must continue to
359 * deliver until we either: 1) run out of space. 2) run out of sequential
360 * TSN's 3) hit the SCTP_DATA_LAST_FRAG flag.
363 sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc)
365 struct sctp_tmit_chunk *chk, *nchk;
370 struct sctp_queued_to_read *control, *ctl, *nctl;
375 cntDel = stream_no = 0;
376 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
377 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) ||
378 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
379 /* socket above is long gone or going.. */
381 asoc->fragmented_delivery_inprogress = 0;
382 TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
383 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
384 asoc->size_on_reasm_queue -= chk->send_size;
385 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
387 * Lose the data pointer, since its in the socket
391 sctp_m_freem(chk->data);
394 /* Now free the address and data */
395 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
396 /* sa_ignore FREED_MEMORY */
400 SCTP_TCB_LOCK_ASSERT(stcb);
401 TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
402 if (chk->rec.data.TSN_seq != (asoc->tsn_last_delivered + 1)) {
403 /* Can't deliver more :< */
406 stream_no = chk->rec.data.stream_number;
407 nxt_todel = asoc->strmin[stream_no].last_sequence_delivered + 1;
408 if (nxt_todel != chk->rec.data.stream_seq &&
409 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
411 * Not the next sequence to deliver in its stream OR
416 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
418 control = sctp_build_readq_entry_chk(stcb, chk);
419 if (control == NULL) {
423 /* save it off for our future deliveries */
424 stcb->asoc.control_pdapi = control;
425 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
429 sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
430 sctp_add_to_readq(stcb->sctp_ep,
431 stcb, control, &stcb->sctp_socket->so_rcv, end,
432 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
435 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
439 sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
440 if (sctp_append_to_readq(stcb->sctp_ep, stcb,
441 stcb->asoc.control_pdapi,
442 chk->data, end, chk->rec.data.TSN_seq,
443 &stcb->sctp_socket->so_rcv)) {
445 * something is very wrong, either
446 * control_pdapi is NULL, or the tail_mbuf
447 * is corrupt, or there is a EOM already on
450 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
454 if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
455 panic("This should not happen control_pdapi NULL?");
457 /* if we did not panic, it was a EOM */
458 panic("Bad chunking ??");
460 if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
461 SCTP_PRINTF("This should not happen control_pdapi NULL?\n");
463 SCTP_PRINTF("Bad chunking ??\n");
464 SCTP_PRINTF("Dumping re-assembly queue this will probably hose the association\n");
472 /* pull it we did it */
473 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
474 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
475 asoc->fragmented_delivery_inprogress = 0;
476 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
477 asoc->strmin[stream_no].last_sequence_delivered++;
479 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
480 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
482 } else if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
484 * turn the flag back on since we just delivered
487 asoc->fragmented_delivery_inprogress = 1;
489 asoc->tsn_of_pdapi_last_delivered = chk->rec.data.TSN_seq;
490 asoc->last_flags_delivered = chk->rec.data.rcv_flags;
491 asoc->last_strm_seq_delivered = chk->rec.data.stream_seq;
492 asoc->last_strm_no_delivered = chk->rec.data.stream_number;
494 asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
495 asoc->size_on_reasm_queue -= chk->send_size;
496 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
497 /* free up the chk */
499 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
501 if (asoc->fragmented_delivery_inprogress == 0) {
503 * Now lets see if we can deliver the next one on
506 struct sctp_stream_in *strm;
508 strm = &asoc->strmin[stream_no];
509 nxt_todel = strm->last_sequence_delivered + 1;
510 TAILQ_FOREACH_SAFE(ctl, &strm->inqueue, next, nctl) {
511 /* Deliver more if we can. */
512 if (nxt_todel == ctl->sinfo_ssn) {
513 TAILQ_REMOVE(&strm->inqueue, ctl, next);
514 asoc->size_on_all_streams -= ctl->length;
515 sctp_ucount_decr(asoc->cnt_on_all_streams);
516 strm->last_sequence_delivered++;
517 sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
518 sctp_add_to_readq(stcb->sctp_ep, stcb,
520 &stcb->sctp_socket->so_rcv, 1,
521 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
525 nxt_todel = strm->last_sequence_delivered + 1;
533 * Queue the chunk either right into the socket buffer if it is the next one
534 * to go OR put it in the correct place in the delivery queue. If we do
535 * append to the so_buf, keep doing so until we are out of order. One big
536 * question still remains, what to do when the socket buffer is FULL??
539 sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc,
540 struct sctp_queued_to_read *control, int *abort_flag)
543 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
544 * all the data in one stream this could happen quite rapidly. One
545 * could use the TSN to keep track of things, but this scheme breaks
546 * down in the other type of stream useage that could occur. Send a
547 * single msg to stream 0, send 4Billion messages to stream 1, now
548 * send a message to stream 0. You have a situation where the TSN
549 * has wrapped but not in the stream. Is this worth worrying about
550 * or should we just change our queue sort at the bottom to be by
553 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2
554 * with TSN 1? If the peer is doing some sort of funky TSN/SSN
555 * assignment this could happen... and I don't see how this would be
556 * a violation. So for now I am undecided an will leave the sort by
557 * SSN alone. Maybe a hybred approach is the answer
560 struct sctp_stream_in *strm;
561 struct sctp_queued_to_read *at;
567 asoc->size_on_all_streams += control->length;
568 sctp_ucount_incr(asoc->cnt_on_all_streams);
569 strm = &asoc->strmin[control->sinfo_stream];
570 nxt_todel = strm->last_sequence_delivered + 1;
571 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
572 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
574 SCTPDBG(SCTP_DEBUG_INDATA1,
575 "queue to stream called for ssn:%u lastdel:%u nxt:%u\n",
576 (uint32_t) control->sinfo_stream,
577 (uint32_t) strm->last_sequence_delivered,
578 (uint32_t) nxt_todel);
579 if (SCTP_SSN_GE(strm->last_sequence_delivered, control->sinfo_ssn)) {
580 /* The incoming sseq is behind where we last delivered? */
581 SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ:%d delivered:%d from peer, Abort association\n",
582 control->sinfo_ssn, strm->last_sequence_delivered);
585 * throw it in the stream so it gets cleaned up in
586 * association destruction
588 TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
589 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
590 0, M_NOWAIT, 1, MT_DATA);
592 struct sctp_paramhdr *ph;
595 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
596 (sizeof(uint32_t) * 3);
597 ph = mtod(oper, struct sctp_paramhdr *);
598 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
599 ph->param_length = htons(SCTP_BUF_LEN(oper));
600 ippp = (uint32_t *) (ph + 1);
601 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_1);
603 *ippp = control->sinfo_tsn;
605 *ippp = ((control->sinfo_stream << 16) | control->sinfo_ssn);
607 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
608 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
613 if (nxt_todel == control->sinfo_ssn) {
614 /* can be delivered right away? */
615 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
616 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
618 /* EY it wont be queued if it could be delivered directly */
620 asoc->size_on_all_streams -= control->length;
621 sctp_ucount_decr(asoc->cnt_on_all_streams);
622 strm->last_sequence_delivered++;
624 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
625 sctp_add_to_readq(stcb->sctp_ep, stcb,
627 &stcb->sctp_socket->so_rcv, 1,
628 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
629 TAILQ_FOREACH_SAFE(control, &strm->inqueue, next, at) {
631 nxt_todel = strm->last_sequence_delivered + 1;
632 if (nxt_todel == control->sinfo_ssn) {
633 TAILQ_REMOVE(&strm->inqueue, control, next);
634 asoc->size_on_all_streams -= control->length;
635 sctp_ucount_decr(asoc->cnt_on_all_streams);
636 strm->last_sequence_delivered++;
638 * We ignore the return of deliver_data here
639 * since we always can hold the chunk on the
640 * d-queue. And we have a finite number that
641 * can be delivered from the strq.
643 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
644 sctp_log_strm_del(control, NULL,
645 SCTP_STR_LOG_FROM_IMMED_DEL);
647 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
648 sctp_add_to_readq(stcb->sctp_ep, stcb,
650 &stcb->sctp_socket->so_rcv, 1,
651 SCTP_READ_LOCK_NOT_HELD,
660 * Ok, we did not deliver this guy, find the correct place
661 * to put it on the queue.
663 if (SCTP_TSN_GE(asoc->cumulative_tsn, control->sinfo_tsn)) {
666 if (TAILQ_EMPTY(&strm->inqueue)) {
668 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
669 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INSERT_HD);
671 TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
673 TAILQ_FOREACH(at, &strm->inqueue, next) {
674 if (SCTP_SSN_GT(at->sinfo_ssn, control->sinfo_ssn)) {
676 * one in queue is bigger than the
677 * new one, insert before this one
679 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
680 sctp_log_strm_del(control, at,
681 SCTP_STR_LOG_FROM_INSERT_MD);
683 TAILQ_INSERT_BEFORE(at, control, next);
685 } else if (at->sinfo_ssn == control->sinfo_ssn) {
687 * Gak, He sent me a duplicate str
691 * foo bar, I guess I will just free
692 * this new guy, should we abort
693 * too? FIX ME MAYBE? Or it COULD be
694 * that the SSN's have wrapped.
695 * Maybe I should compare to TSN
696 * somehow... sigh for now just blow
701 sctp_m_freem(control->data);
702 control->data = NULL;
703 asoc->size_on_all_streams -= control->length;
704 sctp_ucount_decr(asoc->cnt_on_all_streams);
705 if (control->whoFrom) {
706 sctp_free_remote_addr(control->whoFrom);
707 control->whoFrom = NULL;
709 sctp_free_a_readq(stcb, control);
712 if (TAILQ_NEXT(at, next) == NULL) {
714 * We are at the end, insert
717 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
718 sctp_log_strm_del(control, at,
719 SCTP_STR_LOG_FROM_INSERT_TL);
721 TAILQ_INSERT_AFTER(&strm->inqueue,
732 * Returns two things: You get the total size of the deliverable parts of the
733 * first fragmented message on the reassembly queue. And you get a 1 back if
734 * all of the message is ready or a 0 back if the message is still incomplete
737 sctp_is_all_msg_on_reasm(struct sctp_association *asoc, uint32_t * t_size)
739 struct sctp_tmit_chunk *chk;
743 chk = TAILQ_FIRST(&asoc->reasmqueue);
745 /* nothing on the queue */
748 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
749 /* Not a first on the queue */
752 tsn = chk->rec.data.TSN_seq;
753 TAILQ_FOREACH(chk, &asoc->reasmqueue, sctp_next) {
754 if (tsn != chk->rec.data.TSN_seq) {
757 *t_size += chk->send_size;
758 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
767 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc)
769 struct sctp_tmit_chunk *chk;
771 uint32_t tsize, pd_point;
774 chk = TAILQ_FIRST(&asoc->reasmqueue);
777 asoc->size_on_reasm_queue = 0;
778 asoc->cnt_on_reasm_queue = 0;
781 if (asoc->fragmented_delivery_inprogress == 0) {
783 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
784 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
785 (nxt_todel == chk->rec.data.stream_seq ||
786 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
788 * Yep the first one is here and its ok to deliver
791 if (stcb->sctp_socket) {
792 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
793 stcb->sctp_ep->partial_delivery_point);
795 pd_point = stcb->sctp_ep->partial_delivery_point;
797 if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
799 * Yes, we setup to start reception, by
800 * backing down the TSN just in case we
801 * can't deliver. If we
803 asoc->fragmented_delivery_inprogress = 1;
804 asoc->tsn_last_delivered =
805 chk->rec.data.TSN_seq - 1;
807 chk->rec.data.stream_number;
808 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
809 asoc->pdapi_ppid = chk->rec.data.payloadtype;
810 asoc->fragment_flags = chk->rec.data.rcv_flags;
811 sctp_service_reassembly(stcb, asoc);
816 * Service re-assembly will deliver stream data queued at
817 * the end of fragmented delivery.. but it wont know to go
818 * back and call itself again... we do that here with the
821 sctp_service_reassembly(stcb, asoc);
822 if (asoc->fragmented_delivery_inprogress == 0) {
824 * finished our Fragmented delivery, could be more
833 * Dump onto the re-assembly queue, in its proper place. After dumping on the
834 * queue, see if anthing can be delivered. If so pull it off (or as much as
835 * we can. If we run out of space then we must dump what we can and set the
836 * appropriate flag to say we queued what we could.
839 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
840 struct sctp_tmit_chunk *chk, int *abort_flag)
843 uint32_t cum_ackp1, prev_tsn, post_tsn;
844 struct sctp_tmit_chunk *at, *prev, *next;
847 cum_ackp1 = asoc->tsn_last_delivered + 1;
848 if (TAILQ_EMPTY(&asoc->reasmqueue)) {
849 /* This is the first one on the queue */
850 TAILQ_INSERT_HEAD(&asoc->reasmqueue, chk, sctp_next);
852 * we do not check for delivery of anything when only one
855 asoc->size_on_reasm_queue = chk->send_size;
856 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
857 if (chk->rec.data.TSN_seq == cum_ackp1) {
858 if (asoc->fragmented_delivery_inprogress == 0 &&
859 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) !=
860 SCTP_DATA_FIRST_FRAG) {
862 * An empty queue, no delivery inprogress,
863 * we hit the next one and it does NOT have
864 * a FIRST fragment mark.
866 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not first, no fragmented delivery in progress\n");
867 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
868 0, M_NOWAIT, 1, MT_DATA);
871 struct sctp_paramhdr *ph;
875 sizeof(struct sctp_paramhdr) +
876 (sizeof(uint32_t) * 3);
877 ph = mtod(oper, struct sctp_paramhdr *);
879 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
880 ph->param_length = htons(SCTP_BUF_LEN(oper));
881 ippp = (uint32_t *) (ph + 1);
882 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_2);
884 *ippp = chk->rec.data.TSN_seq;
886 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
889 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
890 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
892 } else if (asoc->fragmented_delivery_inprogress &&
893 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
895 * We are doing a partial delivery and the
896 * NEXT chunk MUST be either the LAST or
897 * MIDDLE fragment NOT a FIRST
899 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS a first and fragmented delivery in progress\n");
900 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
901 0, M_NOWAIT, 1, MT_DATA);
903 struct sctp_paramhdr *ph;
907 sizeof(struct sctp_paramhdr) +
908 (3 * sizeof(uint32_t));
909 ph = mtod(oper, struct sctp_paramhdr *);
911 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
912 ph->param_length = htons(SCTP_BUF_LEN(oper));
913 ippp = (uint32_t *) (ph + 1);
914 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_3);
916 *ippp = chk->rec.data.TSN_seq;
918 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
920 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
921 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
923 } else if (asoc->fragmented_delivery_inprogress) {
925 * Here we are ok with a MIDDLE or LAST
928 if (chk->rec.data.stream_number !=
929 asoc->str_of_pdapi) {
930 /* Got to be the right STR No */
931 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream number %d vs %d\n",
932 chk->rec.data.stream_number,
934 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
935 0, M_NOWAIT, 1, MT_DATA);
937 struct sctp_paramhdr *ph;
941 sizeof(struct sctp_paramhdr) +
942 (sizeof(uint32_t) * 3);
944 struct sctp_paramhdr *);
946 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
948 htons(SCTP_BUF_LEN(oper));
949 ippp = (uint32_t *) (ph + 1);
950 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
952 *ippp = chk->rec.data.TSN_seq;
954 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
956 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_4;
957 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
959 } else if ((asoc->fragment_flags & SCTP_DATA_UNORDERED) !=
960 SCTP_DATA_UNORDERED &&
961 chk->rec.data.stream_seq != asoc->ssn_of_pdapi) {
962 /* Got to be the right STR Seq */
963 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream seq %d vs %d\n",
964 chk->rec.data.stream_seq,
966 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
967 0, M_NOWAIT, 1, MT_DATA);
969 struct sctp_paramhdr *ph;
973 sizeof(struct sctp_paramhdr) +
974 (3 * sizeof(uint32_t));
976 struct sctp_paramhdr *);
978 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
980 htons(SCTP_BUF_LEN(oper));
981 ippp = (uint32_t *) (ph + 1);
982 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
984 *ippp = chk->rec.data.TSN_seq;
986 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
989 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_5;
990 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
998 TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
999 if (SCTP_TSN_GT(at->rec.data.TSN_seq, chk->rec.data.TSN_seq)) {
1001 * one in queue is bigger than the new one, insert
1005 asoc->size_on_reasm_queue += chk->send_size;
1006 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1008 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1010 } else if (at->rec.data.TSN_seq == chk->rec.data.TSN_seq) {
1011 /* Gak, He sent me a duplicate str seq number */
1013 * foo bar, I guess I will just free this new guy,
1014 * should we abort too? FIX ME MAYBE? Or it COULD be
1015 * that the SSN's have wrapped. Maybe I should
1016 * compare to TSN somehow... sigh for now just blow
1020 sctp_m_freem(chk->data);
1023 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1027 if (TAILQ_NEXT(at, sctp_next) == NULL) {
1029 * We are at the end, insert it after this
1032 /* check it first */
1033 asoc->size_on_reasm_queue += chk->send_size;
1034 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1035 TAILQ_INSERT_AFTER(&asoc->reasmqueue, at, chk, sctp_next);
1040 /* Now the audits */
1042 prev_tsn = chk->rec.data.TSN_seq - 1;
1043 if (prev_tsn == prev->rec.data.TSN_seq) {
1045 * Ok the one I am dropping onto the end is the
1046 * NEXT. A bit of valdiation here.
1048 if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1049 SCTP_DATA_FIRST_FRAG ||
1050 (prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1051 SCTP_DATA_MIDDLE_FRAG) {
1053 * Insert chk MUST be a MIDDLE or LAST
1056 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1057 SCTP_DATA_FIRST_FRAG) {
1058 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - It can be a midlle or last but not a first\n");
1059 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it's a FIRST!\n");
1060 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1061 0, M_NOWAIT, 1, MT_DATA);
1063 struct sctp_paramhdr *ph;
1066 SCTP_BUF_LEN(oper) =
1067 sizeof(struct sctp_paramhdr) +
1068 (3 * sizeof(uint32_t));
1070 struct sctp_paramhdr *);
1072 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1074 htons(SCTP_BUF_LEN(oper));
1075 ippp = (uint32_t *) (ph + 1);
1076 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1078 *ippp = chk->rec.data.TSN_seq;
1080 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1083 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_6;
1084 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1088 if (chk->rec.data.stream_number !=
1089 prev->rec.data.stream_number) {
1091 * Huh, need the correct STR here,
1092 * they must be the same.
1094 SCTP_PRINTF("Prev check - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1095 chk->rec.data.stream_number,
1096 prev->rec.data.stream_number);
1097 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1098 0, M_NOWAIT, 1, MT_DATA);
1100 struct sctp_paramhdr *ph;
1103 SCTP_BUF_LEN(oper) =
1104 sizeof(struct sctp_paramhdr) +
1105 (3 * sizeof(uint32_t));
1107 struct sctp_paramhdr *);
1109 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1111 htons(SCTP_BUF_LEN(oper));
1112 ippp = (uint32_t *) (ph + 1);
1113 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1115 *ippp = chk->rec.data.TSN_seq;
1117 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1119 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_7;
1120 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1124 if ((prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1125 chk->rec.data.stream_seq !=
1126 prev->rec.data.stream_seq) {
1128 * Huh, need the correct STR here,
1129 * they must be the same.
1131 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1132 chk->rec.data.stream_seq,
1133 prev->rec.data.stream_seq);
1134 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1135 0, M_NOWAIT, 1, MT_DATA);
1137 struct sctp_paramhdr *ph;
1140 SCTP_BUF_LEN(oper) =
1141 sizeof(struct sctp_paramhdr) +
1142 (3 * sizeof(uint32_t));
1144 struct sctp_paramhdr *);
1146 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1148 htons(SCTP_BUF_LEN(oper));
1149 ippp = (uint32_t *) (ph + 1);
1150 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1152 *ippp = chk->rec.data.TSN_seq;
1154 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1156 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_8;
1157 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1161 } else if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1162 SCTP_DATA_LAST_FRAG) {
1163 /* Insert chk MUST be a FIRST */
1164 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1165 SCTP_DATA_FIRST_FRAG) {
1166 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, evil plot, its not FIRST and it must be!\n");
1167 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1168 0, M_NOWAIT, 1, MT_DATA);
1170 struct sctp_paramhdr *ph;
1173 SCTP_BUF_LEN(oper) =
1174 sizeof(struct sctp_paramhdr) +
1175 (3 * sizeof(uint32_t));
1177 struct sctp_paramhdr *);
1179 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1181 htons(SCTP_BUF_LEN(oper));
1182 ippp = (uint32_t *) (ph + 1);
1183 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1185 *ippp = chk->rec.data.TSN_seq;
1187 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1190 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_9;
1191 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1199 post_tsn = chk->rec.data.TSN_seq + 1;
1200 if (post_tsn == next->rec.data.TSN_seq) {
1202 * Ok the one I am inserting ahead of is my NEXT
1203 * one. A bit of valdiation here.
1205 if (next->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1206 /* Insert chk MUST be a last fragment */
1207 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK)
1208 != SCTP_DATA_LAST_FRAG) {
1209 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is FIRST, we must be LAST\n");
1210 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not a last!\n");
1211 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1212 0, M_NOWAIT, 1, MT_DATA);
1214 struct sctp_paramhdr *ph;
1217 SCTP_BUF_LEN(oper) =
1218 sizeof(struct sctp_paramhdr) +
1219 (3 * sizeof(uint32_t));
1221 struct sctp_paramhdr *);
1223 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1225 htons(SCTP_BUF_LEN(oper));
1226 ippp = (uint32_t *) (ph + 1);
1227 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1229 *ippp = chk->rec.data.TSN_seq;
1231 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1233 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_10;
1234 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1238 } else if ((next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1239 SCTP_DATA_MIDDLE_FRAG ||
1240 (next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1241 SCTP_DATA_LAST_FRAG) {
1243 * Insert chk CAN be MIDDLE or FIRST NOT
1246 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1247 SCTP_DATA_LAST_FRAG) {
1248 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is a MIDDLE/LAST\n");
1249 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, new prev chunk is a LAST\n");
1250 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1251 0, M_NOWAIT, 1, MT_DATA);
1253 struct sctp_paramhdr *ph;
1256 SCTP_BUF_LEN(oper) =
1257 sizeof(struct sctp_paramhdr) +
1258 (3 * sizeof(uint32_t));
1260 struct sctp_paramhdr *);
1262 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1264 htons(SCTP_BUF_LEN(oper));
1265 ippp = (uint32_t *) (ph + 1);
1266 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1268 *ippp = chk->rec.data.TSN_seq;
1270 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1273 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_11;
1274 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1278 if (chk->rec.data.stream_number !=
1279 next->rec.data.stream_number) {
1281 * Huh, need the correct STR here,
1282 * they must be the same.
1284 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1285 chk->rec.data.stream_number,
1286 next->rec.data.stream_number);
1287 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1288 0, M_NOWAIT, 1, MT_DATA);
1290 struct sctp_paramhdr *ph;
1293 SCTP_BUF_LEN(oper) =
1294 sizeof(struct sctp_paramhdr) +
1295 (3 * sizeof(uint32_t));
1297 struct sctp_paramhdr *);
1299 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1301 htons(SCTP_BUF_LEN(oper));
1302 ippp = (uint32_t *) (ph + 1);
1303 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1305 *ippp = chk->rec.data.TSN_seq;
1307 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1310 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_12;
1311 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1315 if ((next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1316 chk->rec.data.stream_seq !=
1317 next->rec.data.stream_seq) {
1319 * Huh, need the correct STR here,
1320 * they must be the same.
1322 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1323 chk->rec.data.stream_seq,
1324 next->rec.data.stream_seq);
1325 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1326 0, M_NOWAIT, 1, MT_DATA);
1328 struct sctp_paramhdr *ph;
1331 SCTP_BUF_LEN(oper) =
1332 sizeof(struct sctp_paramhdr) +
1333 (3 * sizeof(uint32_t));
1335 struct sctp_paramhdr *);
1337 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1339 htons(SCTP_BUF_LEN(oper));
1340 ippp = (uint32_t *) (ph + 1);
1341 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1343 *ippp = chk->rec.data.TSN_seq;
1345 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1347 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_13;
1348 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1355 /* Do we need to do some delivery? check */
1356 sctp_deliver_reasm_check(stcb, asoc);
1360 * This is an unfortunate routine. It checks to make sure a evil guy is not
1361 * stuffing us full of bad packet fragments. A broken peer could also do this
1362 * but this is doubtful. It is to bad I must worry about evil crackers sigh
1366 sctp_does_tsn_belong_to_reasm(struct sctp_association *asoc,
1369 struct sctp_tmit_chunk *at;
1372 TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1373 if (SCTP_TSN_GT(TSN_seq, at->rec.data.TSN_seq)) {
1374 /* is it one bigger? */
1375 tsn_est = at->rec.data.TSN_seq + 1;
1376 if (tsn_est == TSN_seq) {
1377 /* yep. It better be a last then */
1378 if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1379 SCTP_DATA_LAST_FRAG) {
1381 * Ok this guy belongs next to a guy
1382 * that is NOT last, it should be a
1383 * middle/last, not a complete
1389 * This guy is ok since its a LAST
1390 * and the new chunk is a fully
1391 * self- contained one.
1396 } else if (TSN_seq == at->rec.data.TSN_seq) {
1397 /* Software error since I have a dup? */
1401 * Ok, 'at' is larger than new chunk but does it
1402 * need to be right before it.
1404 tsn_est = TSN_seq + 1;
1405 if (tsn_est == at->rec.data.TSN_seq) {
1406 /* Yep, It better be a first */
1407 if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1408 SCTP_DATA_FIRST_FRAG) {
1420 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1421 struct mbuf **m, int offset, struct sctp_data_chunk *ch, int chk_length,
1422 struct sctp_nets *net, uint32_t * high_tsn, int *abort_flag,
1423 int *break_flag, int last_chunk)
1425 /* Process a data chunk */
1426 /* struct sctp_tmit_chunk *chk; */
1427 struct sctp_tmit_chunk *chk;
1431 int need_reasm_check = 0;
1432 uint16_t strmno, strmseq;
1434 struct sctp_queued_to_read *control;
1436 uint32_t protocol_id;
1437 uint8_t chunk_flags;
1438 struct sctp_stream_reset_list *liste;
1441 tsn = ntohl(ch->dp.tsn);
1442 chunk_flags = ch->ch.chunk_flags;
1443 if ((chunk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1444 asoc->send_sack = 1;
1446 protocol_id = ch->dp.protocol_id;
1447 ordered = ((chunk_flags & SCTP_DATA_UNORDERED) == 0);
1448 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1449 sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1454 SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, ch->ch.chunk_type, tsn);
1455 if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1456 /* It is a duplicate */
1457 SCTP_STAT_INCR(sctps_recvdupdata);
1458 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1459 /* Record a dup for the next outbound sack */
1460 asoc->dup_tsns[asoc->numduptsns] = tsn;
1463 asoc->send_sack = 1;
1466 /* Calculate the number of TSN's between the base and this TSN */
1467 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1468 if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1469 /* Can't hold the bit in the mapping at max array, toss it */
1472 if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
1473 SCTP_TCB_LOCK_ASSERT(stcb);
1474 if (sctp_expand_mapping_array(asoc, gap)) {
1475 /* Can't expand, drop it */
1479 if (SCTP_TSN_GT(tsn, *high_tsn)) {
1482 /* See if we have received this one already */
1483 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1484 SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1485 SCTP_STAT_INCR(sctps_recvdupdata);
1486 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1487 /* Record a dup for the next outbound sack */
1488 asoc->dup_tsns[asoc->numduptsns] = tsn;
1491 asoc->send_sack = 1;
1495 * Check to see about the GONE flag, duplicates would cause a sack
1496 * to be sent up above
1498 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1499 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1500 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))
1503 * wait a minute, this guy is gone, there is no longer a
1504 * receiver. Send peer an ABORT!
1506 struct mbuf *op_err;
1508 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
1509 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1514 * Now before going further we see if there is room. If NOT then we
1515 * MAY let one through only IF this TSN is the one we are waiting
1516 * for on a partial delivery API.
1519 /* now do the tests */
1520 if (((asoc->cnt_on_all_streams +
1521 asoc->cnt_on_reasm_queue +
1522 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1523 (((int)asoc->my_rwnd) <= 0)) {
1525 * When we have NO room in the rwnd we check to make sure
1526 * the reader is doing its job...
1528 if (stcb->sctp_socket->so_rcv.sb_cc) {
1529 /* some to read, wake-up */
1530 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1533 so = SCTP_INP_SO(stcb->sctp_ep);
1534 atomic_add_int(&stcb->asoc.refcnt, 1);
1535 SCTP_TCB_UNLOCK(stcb);
1536 SCTP_SOCKET_LOCK(so, 1);
1537 SCTP_TCB_LOCK(stcb);
1538 atomic_subtract_int(&stcb->asoc.refcnt, 1);
1539 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1540 /* assoc was freed while we were unlocked */
1541 SCTP_SOCKET_UNLOCK(so, 1);
1545 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1546 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1547 SCTP_SOCKET_UNLOCK(so, 1);
1550 /* now is it in the mapping array of what we have accepted? */
1551 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1552 SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1553 /* Nope not in the valid range dump it */
1554 sctp_set_rwnd(stcb, asoc);
1555 if ((asoc->cnt_on_all_streams +
1556 asoc->cnt_on_reasm_queue +
1557 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1558 SCTP_STAT_INCR(sctps_datadropchklmt);
1560 SCTP_STAT_INCR(sctps_datadroprwnd);
1566 strmno = ntohs(ch->dp.stream_id);
1567 if (strmno >= asoc->streamincnt) {
1568 struct sctp_paramhdr *phdr;
1571 mb = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) * 2),
1572 0, M_NOWAIT, 1, MT_DATA);
1574 /* add some space up front so prepend will work well */
1575 SCTP_BUF_RESV_UF(mb, sizeof(struct sctp_chunkhdr));
1576 phdr = mtod(mb, struct sctp_paramhdr *);
1578 * Error causes are just param's and this one has
1579 * two back to back phdr, one with the error type
1580 * and size, the other with the streamid and a rsvd
1582 SCTP_BUF_LEN(mb) = (sizeof(struct sctp_paramhdr) * 2);
1583 phdr->param_type = htons(SCTP_CAUSE_INVALID_STREAM);
1584 phdr->param_length =
1585 htons(sizeof(struct sctp_paramhdr) * 2);
1587 /* We insert the stream in the type field */
1588 phdr->param_type = ch->dp.stream_id;
1589 /* And set the length to 0 for the rsvd field */
1590 phdr->param_length = 0;
1591 sctp_queue_op_err(stcb, mb);
1593 SCTP_STAT_INCR(sctps_badsid);
1594 SCTP_TCB_LOCK_ASSERT(stcb);
1595 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1596 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1597 asoc->highest_tsn_inside_nr_map = tsn;
1599 if (tsn == (asoc->cumulative_tsn + 1)) {
1600 /* Update cum-ack */
1601 asoc->cumulative_tsn = tsn;
1606 * Before we continue lets validate that we are not being fooled by
1607 * an evil attacker. We can only have 4k chunks based on our TSN
1608 * spread allowed by the mapping array 512 * 8 bits, so there is no
1609 * way our stream sequence numbers could have wrapped. We of course
1610 * only validate the FIRST fragment so the bit must be set.
1612 strmseq = ntohs(ch->dp.stream_sequence);
1613 #ifdef SCTP_ASOCLOG_OF_TSNS
1614 SCTP_TCB_LOCK_ASSERT(stcb);
1615 if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1616 asoc->tsn_in_at = 0;
1617 asoc->tsn_in_wrapped = 1;
1619 asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1620 asoc->in_tsnlog[asoc->tsn_in_at].strm = strmno;
1621 asoc->in_tsnlog[asoc->tsn_in_at].seq = strmseq;
1622 asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1623 asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1624 asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1625 asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1626 asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1629 if ((chunk_flags & SCTP_DATA_FIRST_FRAG) &&
1630 (TAILQ_EMPTY(&asoc->resetHead)) &&
1631 (chunk_flags & SCTP_DATA_UNORDERED) == 0 &&
1632 SCTP_SSN_GE(asoc->strmin[strmno].last_sequence_delivered, strmseq)) {
1633 /* The incoming sseq is behind where we last delivered? */
1634 SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n",
1635 strmseq, asoc->strmin[strmno].last_sequence_delivered);
1636 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1637 0, M_NOWAIT, 1, MT_DATA);
1639 struct sctp_paramhdr *ph;
1642 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
1643 (3 * sizeof(uint32_t));
1644 ph = mtod(oper, struct sctp_paramhdr *);
1645 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1646 ph->param_length = htons(SCTP_BUF_LEN(oper));
1647 ippp = (uint32_t *) (ph + 1);
1648 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_14);
1652 *ippp = ((strmno << 16) | strmseq);
1655 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1656 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1660 /************************************
1661 * From here down we may find ch-> invalid
1662 * so its a good idea NOT to use it.
1663 *************************************/
1665 the_len = (chk_length - sizeof(struct sctp_data_chunk));
1666 if (last_chunk == 0) {
1667 dmbuf = SCTP_M_COPYM(*m,
1668 (offset + sizeof(struct sctp_data_chunk)),
1670 #ifdef SCTP_MBUF_LOGGING
1671 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
1674 for (mat = dmbuf; mat; mat = SCTP_BUF_NEXT(mat)) {
1675 if (SCTP_BUF_IS_EXTENDED(mat)) {
1676 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
1682 /* We can steal the last chunk */
1686 /* lop off the top part */
1687 m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
1688 if (SCTP_BUF_NEXT(dmbuf) == NULL) {
1689 l_len = SCTP_BUF_LEN(dmbuf);
1692 * need to count up the size hopefully does not hit
1698 for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
1699 l_len += SCTP_BUF_LEN(lat);
1702 if (l_len > the_len) {
1703 /* Trim the end round bytes off too */
1704 m_adj(dmbuf, -(l_len - the_len));
1707 if (dmbuf == NULL) {
1708 SCTP_STAT_INCR(sctps_nomem);
1711 if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
1712 asoc->fragmented_delivery_inprogress == 0 &&
1713 TAILQ_EMPTY(&asoc->resetHead) &&
1715 ((uint16_t) (asoc->strmin[strmno].last_sequence_delivered + 1) == strmseq &&
1716 TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) {
1717 /* Candidate for express delivery */
1719 * Its not fragmented, No PD-API is up, Nothing in the
1720 * delivery queue, Its un-ordered OR ordered and the next to
1721 * deliver AND nothing else is stuck on the stream queue,
1722 * And there is room for it in the socket buffer. Lets just
1723 * stuff it up the buffer....
1726 /* It would be nice to avoid this copy if we could :< */
1727 sctp_alloc_a_readq(stcb, control);
1728 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1733 if (control == NULL) {
1734 goto failed_express_del;
1736 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1737 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1738 asoc->highest_tsn_inside_nr_map = tsn;
1740 sctp_add_to_readq(stcb->sctp_ep, stcb,
1741 control, &stcb->sctp_socket->so_rcv,
1742 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1744 if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1745 /* for ordered, bump what we delivered */
1746 asoc->strmin[strmno].last_sequence_delivered++;
1748 SCTP_STAT_INCR(sctps_recvexpress);
1749 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
1750 sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno,
1751 SCTP_STR_LOG_FROM_EXPRS_DEL);
1755 goto finish_express_del;
1758 /* If we reach here this is a new chunk */
1761 /* Express for fragmented delivery? */
1762 if ((asoc->fragmented_delivery_inprogress) &&
1763 (stcb->asoc.control_pdapi) &&
1764 (asoc->str_of_pdapi == strmno) &&
1765 (asoc->ssn_of_pdapi == strmseq)
1767 control = stcb->asoc.control_pdapi;
1768 if ((chunk_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
1769 /* Can't be another first? */
1770 goto failed_pdapi_express_del;
1772 if (tsn == (control->sinfo_tsn + 1)) {
1773 /* Yep, we can add it on */
1776 if (chunk_flags & SCTP_DATA_LAST_FRAG) {
1779 if (sctp_append_to_readq(stcb->sctp_ep, stcb, control, dmbuf, end,
1781 &stcb->sctp_socket->so_rcv)) {
1782 SCTP_PRINTF("Append fails end:%d\n", end);
1783 goto failed_pdapi_express_del;
1785 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1786 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1787 asoc->highest_tsn_inside_nr_map = tsn;
1789 SCTP_STAT_INCR(sctps_recvexpressm);
1790 asoc->tsn_last_delivered = tsn;
1791 asoc->fragment_flags = chunk_flags;
1792 asoc->tsn_of_pdapi_last_delivered = tsn;
1793 asoc->last_flags_delivered = chunk_flags;
1794 asoc->last_strm_seq_delivered = strmseq;
1795 asoc->last_strm_no_delivered = strmno;
1797 /* clean up the flags and such */
1798 asoc->fragmented_delivery_inprogress = 0;
1799 if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1800 asoc->strmin[strmno].last_sequence_delivered++;
1802 stcb->asoc.control_pdapi = NULL;
1803 if (TAILQ_EMPTY(&asoc->reasmqueue) == 0) {
1805 * There could be another message
1808 need_reasm_check = 1;
1812 goto finish_express_del;
1815 failed_pdapi_express_del:
1817 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
1818 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1819 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1820 asoc->highest_tsn_inside_nr_map = tsn;
1823 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
1824 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
1825 asoc->highest_tsn_inside_map = tsn;
1828 if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1829 sctp_alloc_a_chunk(stcb, chk);
1831 /* No memory so we drop the chunk */
1832 SCTP_STAT_INCR(sctps_nomem);
1833 if (last_chunk == 0) {
1834 /* we copied it, free the copy */
1835 sctp_m_freem(dmbuf);
1839 chk->rec.data.TSN_seq = tsn;
1840 chk->no_fr_allowed = 0;
1841 chk->rec.data.stream_seq = strmseq;
1842 chk->rec.data.stream_number = strmno;
1843 chk->rec.data.payloadtype = protocol_id;
1844 chk->rec.data.context = stcb->asoc.context;
1845 chk->rec.data.doing_fast_retransmit = 0;
1846 chk->rec.data.rcv_flags = chunk_flags;
1848 chk->send_size = the_len;
1850 atomic_add_int(&net->ref_count, 1);
1853 sctp_alloc_a_readq(stcb, control);
1854 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1859 if (control == NULL) {
1860 /* No memory so we drop the chunk */
1861 SCTP_STAT_INCR(sctps_nomem);
1862 if (last_chunk == 0) {
1863 /* we copied it, free the copy */
1864 sctp_m_freem(dmbuf);
1868 control->length = the_len;
1871 /* Mark it as received */
1872 /* Now queue it where it belongs */
1873 if (control != NULL) {
1874 /* First a sanity check */
1875 if (asoc->fragmented_delivery_inprogress) {
1877 * Ok, we have a fragmented delivery in progress if
1878 * this chunk is next to deliver OR belongs in our
1879 * view to the reassembly, the peer is evil or
1882 uint32_t estimate_tsn;
1884 estimate_tsn = asoc->tsn_last_delivered + 1;
1885 if (TAILQ_EMPTY(&asoc->reasmqueue) &&
1886 (estimate_tsn == control->sinfo_tsn)) {
1887 /* Evil/Broke peer */
1888 sctp_m_freem(control->data);
1889 control->data = NULL;
1890 if (control->whoFrom) {
1891 sctp_free_remote_addr(control->whoFrom);
1892 control->whoFrom = NULL;
1894 sctp_free_a_readq(stcb, control);
1895 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1896 0, M_NOWAIT, 1, MT_DATA);
1898 struct sctp_paramhdr *ph;
1901 SCTP_BUF_LEN(oper) =
1902 sizeof(struct sctp_paramhdr) +
1903 (3 * sizeof(uint32_t));
1904 ph = mtod(oper, struct sctp_paramhdr *);
1906 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1907 ph->param_length = htons(SCTP_BUF_LEN(oper));
1908 ippp = (uint32_t *) (ph + 1);
1909 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_15);
1913 *ippp = ((strmno << 16) | strmseq);
1915 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1916 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1920 if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
1921 sctp_m_freem(control->data);
1922 control->data = NULL;
1923 if (control->whoFrom) {
1924 sctp_free_remote_addr(control->whoFrom);
1925 control->whoFrom = NULL;
1927 sctp_free_a_readq(stcb, control);
1929 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1930 0, M_NOWAIT, 1, MT_DATA);
1932 struct sctp_paramhdr *ph;
1935 SCTP_BUF_LEN(oper) =
1936 sizeof(struct sctp_paramhdr) +
1937 (3 * sizeof(uint32_t));
1939 struct sctp_paramhdr *);
1941 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1943 htons(SCTP_BUF_LEN(oper));
1944 ippp = (uint32_t *) (ph + 1);
1945 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_16);
1949 *ippp = ((strmno << 16) | strmseq);
1951 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
1952 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1958 /* No PDAPI running */
1959 if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
1961 * Reassembly queue is NOT empty validate
1962 * that this tsn does not need to be in
1963 * reasembly queue. If it does then our peer
1964 * is broken or evil.
1966 if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
1967 sctp_m_freem(control->data);
1968 control->data = NULL;
1969 if (control->whoFrom) {
1970 sctp_free_remote_addr(control->whoFrom);
1971 control->whoFrom = NULL;
1973 sctp_free_a_readq(stcb, control);
1974 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1975 0, M_NOWAIT, 1, MT_DATA);
1977 struct sctp_paramhdr *ph;
1980 SCTP_BUF_LEN(oper) =
1981 sizeof(struct sctp_paramhdr) +
1982 (3 * sizeof(uint32_t));
1984 struct sctp_paramhdr *);
1986 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1988 htons(SCTP_BUF_LEN(oper));
1989 ippp = (uint32_t *) (ph + 1);
1990 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_17);
1994 *ippp = ((strmno << 16) | strmseq);
1996 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17;
1997 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
2003 /* ok, if we reach here we have passed the sanity checks */
2004 if (chunk_flags & SCTP_DATA_UNORDERED) {
2005 /* queue directly into socket buffer */
2006 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2007 sctp_add_to_readq(stcb->sctp_ep, stcb,
2009 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2012 * Special check for when streams are resetting. We
2013 * could be more smart about this and check the
2014 * actual stream to see if it is not being reset..
2015 * that way we would not create a HOLB when amongst
2016 * streams being reset and those not being reset.
2018 * We take complete messages that have a stream reset
2019 * intervening (aka the TSN is after where our
2020 * cum-ack needs to be) off and put them on a
2021 * pending_reply_queue. The reassembly ones we do
2022 * not have to worry about since they are all sorted
2023 * and proceessed by TSN order. It is only the
2024 * singletons I must worry about.
2026 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2027 SCTP_TSN_GT(tsn, liste->tsn)) {
2029 * yep its past where we need to reset... go
2030 * ahead and queue it.
2032 if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2034 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2036 struct sctp_queued_to_read *ctlOn,
2038 unsigned char inserted = 0;
2040 TAILQ_FOREACH_SAFE(ctlOn, &asoc->pending_reply_queue, next, nctlOn) {
2041 if (SCTP_TSN_GT(control->sinfo_tsn, ctlOn->sinfo_tsn)) {
2045 TAILQ_INSERT_BEFORE(ctlOn, control, next);
2050 if (inserted == 0) {
2052 * must be put at end, use
2053 * prevP (all setup from
2054 * loop) to setup nextP.
2056 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2060 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag);
2067 /* Into the re-assembly queue */
2068 sctp_queue_data_for_reasm(stcb, asoc, chk, abort_flag);
2071 * the assoc is now gone and chk was put onto the
2072 * reasm queue, which has all been freed.
2079 if (tsn == (asoc->cumulative_tsn + 1)) {
2080 /* Update cum-ack */
2081 asoc->cumulative_tsn = tsn;
2087 SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2089 SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2091 SCTP_STAT_INCR(sctps_recvdata);
2092 /* Set it present please */
2093 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2094 sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, SCTP_STR_LOG_FROM_MARK_TSN);
2096 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2097 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2098 asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2100 /* check the special flag for stream resets */
2101 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2102 SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
2104 * we have finished working through the backlogged TSN's now
2105 * time to reset streams. 1: call reset function. 2: free
2106 * pending_reply space 3: distribute any chunks in
2107 * pending_reply_queue.
2109 struct sctp_queued_to_read *ctl, *nctl;
2111 sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
2112 TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2113 SCTP_FREE(liste, SCTP_M_STRESET);
2114 /* sa_ignore FREED_MEMORY */
2115 liste = TAILQ_FIRST(&asoc->resetHead);
2116 if (TAILQ_EMPTY(&asoc->resetHead)) {
2117 /* All can be removed */
2118 TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
2119 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2120 sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
2126 TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
2127 if (SCTP_TSN_GT(ctl->sinfo_tsn, liste->tsn)) {
2131 * if ctl->sinfo_tsn is <= liste->tsn we can
2132 * process it which is the NOT of
2133 * ctl->sinfo_tsn > liste->tsn
2135 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2136 sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
2143 * Now service re-assembly to pick up anything that has been
2144 * held on reassembly queue?
2146 sctp_deliver_reasm_check(stcb, asoc);
2147 need_reasm_check = 0;
2149 if (need_reasm_check) {
2150 /* Another one waits ? */
2151 sctp_deliver_reasm_check(stcb, asoc);
2156 int8_t sctp_map_lookup_tab[256] = {
2157 0, 1, 0, 2, 0, 1, 0, 3,
2158 0, 1, 0, 2, 0, 1, 0, 4,
2159 0, 1, 0, 2, 0, 1, 0, 3,
2160 0, 1, 0, 2, 0, 1, 0, 5,
2161 0, 1, 0, 2, 0, 1, 0, 3,
2162 0, 1, 0, 2, 0, 1, 0, 4,
2163 0, 1, 0, 2, 0, 1, 0, 3,
2164 0, 1, 0, 2, 0, 1, 0, 6,
2165 0, 1, 0, 2, 0, 1, 0, 3,
2166 0, 1, 0, 2, 0, 1, 0, 4,
2167 0, 1, 0, 2, 0, 1, 0, 3,
2168 0, 1, 0, 2, 0, 1, 0, 5,
2169 0, 1, 0, 2, 0, 1, 0, 3,
2170 0, 1, 0, 2, 0, 1, 0, 4,
2171 0, 1, 0, 2, 0, 1, 0, 3,
2172 0, 1, 0, 2, 0, 1, 0, 7,
2173 0, 1, 0, 2, 0, 1, 0, 3,
2174 0, 1, 0, 2, 0, 1, 0, 4,
2175 0, 1, 0, 2, 0, 1, 0, 3,
2176 0, 1, 0, 2, 0, 1, 0, 5,
2177 0, 1, 0, 2, 0, 1, 0, 3,
2178 0, 1, 0, 2, 0, 1, 0, 4,
2179 0, 1, 0, 2, 0, 1, 0, 3,
2180 0, 1, 0, 2, 0, 1, 0, 6,
2181 0, 1, 0, 2, 0, 1, 0, 3,
2182 0, 1, 0, 2, 0, 1, 0, 4,
2183 0, 1, 0, 2, 0, 1, 0, 3,
2184 0, 1, 0, 2, 0, 1, 0, 5,
2185 0, 1, 0, 2, 0, 1, 0, 3,
2186 0, 1, 0, 2, 0, 1, 0, 4,
2187 0, 1, 0, 2, 0, 1, 0, 3,
2188 0, 1, 0, 2, 0, 1, 0, 8
2193 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2196 * Now we also need to check the mapping array in a couple of ways.
2197 * 1) Did we move the cum-ack point?
2199 * When you first glance at this you might think that all entries that
2200 * make up the postion of the cum-ack would be in the nr-mapping
2201 * array only.. i.e. things up to the cum-ack are always
2202 * deliverable. Thats true with one exception, when its a fragmented
2203 * message we may not deliver the data until some threshold (or all
2204 * of it) is in place. So we must OR the nr_mapping_array and
2205 * mapping_array to get a true picture of the cum-ack.
2207 struct sctp_association *asoc;
2210 int slide_from, slide_end, lgap, distance;
2211 uint32_t old_cumack, old_base, old_highest, highest_tsn;
2215 old_cumack = asoc->cumulative_tsn;
2216 old_base = asoc->mapping_array_base_tsn;
2217 old_highest = asoc->highest_tsn_inside_map;
2219 * We could probably improve this a small bit by calculating the
2220 * offset of the current cum-ack as the starting point.
2223 for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2224 val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2228 /* there is a 0 bit */
2229 at += sctp_map_lookup_tab[val];
2233 asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2235 if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2236 SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2238 panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2239 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2241 SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2242 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2243 sctp_print_mapping_array(asoc);
2244 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2245 sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2247 asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2248 asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2251 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2252 highest_tsn = asoc->highest_tsn_inside_nr_map;
2254 highest_tsn = asoc->highest_tsn_inside_map;
2256 if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2257 /* The complete array was completed by a single FR */
2258 /* highest becomes the cum-ack */
2266 /* clear the array */
2267 clr = ((at + 7) >> 3);
2268 if (clr > asoc->mapping_array_size) {
2269 clr = asoc->mapping_array_size;
2271 memset(asoc->mapping_array, 0, clr);
2272 memset(asoc->nr_mapping_array, 0, clr);
2274 for (i = 0; i < asoc->mapping_array_size; i++) {
2275 if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2276 SCTP_PRINTF("Error Mapping array's not clean at clear\n");
2277 sctp_print_mapping_array(asoc);
2281 asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2282 asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2283 } else if (at >= 8) {
2284 /* we can slide the mapping array down */
2285 /* slide_from holds where we hit the first NON 0xff byte */
2288 * now calculate the ceiling of the move using our highest
2291 SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2292 slide_end = (lgap >> 3);
2293 if (slide_end < slide_from) {
2294 sctp_print_mapping_array(asoc);
2296 panic("impossible slide");
2298 SCTP_PRINTF("impossible slide lgap:%x slide_end:%x slide_from:%x? at:%d\n",
2299 lgap, slide_end, slide_from, at);
2303 if (slide_end > asoc->mapping_array_size) {
2305 panic("would overrun buffer");
2307 SCTP_PRINTF("Gak, would have overrun map end:%d slide_end:%d\n",
2308 asoc->mapping_array_size, slide_end);
2309 slide_end = asoc->mapping_array_size;
2312 distance = (slide_end - slide_from) + 1;
2313 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2314 sctp_log_map(old_base, old_cumack, old_highest,
2315 SCTP_MAP_PREPARE_SLIDE);
2316 sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
2317 (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
2319 if (distance + slide_from > asoc->mapping_array_size ||
2322 * Here we do NOT slide forward the array so that
2323 * hopefully when more data comes in to fill it up
2324 * we will be able to slide it forward. Really I
2325 * don't think this should happen :-0
2328 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2329 sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
2330 (uint32_t) asoc->mapping_array_size,
2331 SCTP_MAP_SLIDE_NONE);
2336 for (ii = 0; ii < distance; ii++) {
2337 asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2338 asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2341 for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2342 asoc->mapping_array[ii] = 0;
2343 asoc->nr_mapping_array[ii] = 0;
2345 if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2346 asoc->highest_tsn_inside_map += (slide_from << 3);
2348 if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2349 asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2351 asoc->mapping_array_base_tsn += (slide_from << 3);
2352 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2353 sctp_log_map(asoc->mapping_array_base_tsn,
2354 asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2355 SCTP_MAP_SLIDE_RESULT);
2362 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
2364 struct sctp_association *asoc;
2365 uint32_t highest_tsn;
2368 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2369 highest_tsn = asoc->highest_tsn_inside_nr_map;
2371 highest_tsn = asoc->highest_tsn_inside_map;
2375 * Now we need to see if we need to queue a sack or just start the
2376 * timer (if allowed).
2378 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2380 * Ok special case, in SHUTDOWN-SENT case. here we maker
2381 * sure SACK timer is off and instead send a SHUTDOWN and a
2384 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2385 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2386 stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_INDATA + SCTP_LOC_18);
2388 sctp_send_shutdown(stcb,
2389 ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2390 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2394 /* is there a gap now ? */
2395 is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2398 * CMT DAC algorithm: increase number of packets received
2401 stcb->asoc.cmt_dac_pkts_rcvd++;
2403 if ((stcb->asoc.send_sack == 1) || /* We need to send a
2405 ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no
2407 (stcb->asoc.numduptsns) || /* we have dup's */
2408 (is_a_gap) || /* is still a gap */
2409 (stcb->asoc.delayed_ack == 0) || /* Delayed sack disabled */
2410 (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) /* hit limit of pkts */
2413 if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2414 (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2415 (stcb->asoc.send_sack == 0) &&
2416 (stcb->asoc.numduptsns == 0) &&
2417 (stcb->asoc.delayed_ack) &&
2418 (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2421 * CMT DAC algorithm: With CMT, delay acks
2422 * even in the face of
2424 * reordering. Therefore, if acks that do not
2425 * have to be sent because of the above
2426 * reasons, will be delayed. That is, acks
2427 * that would have been sent due to gap
2428 * reports will be delayed with DAC. Start
2429 * the delayed ack timer.
2431 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2432 stcb->sctp_ep, stcb, NULL);
2435 * Ok we must build a SACK since the timer
2436 * is pending, we got our first packet OR
2437 * there are gaps or duplicates.
2439 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2440 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2443 if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2444 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2445 stcb->sctp_ep, stcb, NULL);
2452 sctp_service_queues(struct sctp_tcb *stcb, struct sctp_association *asoc)
2454 struct sctp_tmit_chunk *chk;
2455 uint32_t tsize, pd_point;
2458 if (asoc->fragmented_delivery_inprogress) {
2459 sctp_service_reassembly(stcb, asoc);
2461 /* Can we proceed further, i.e. the PD-API is complete */
2462 if (asoc->fragmented_delivery_inprogress) {
2467 * Now is there some other chunk I can deliver from the reassembly
2471 chk = TAILQ_FIRST(&asoc->reasmqueue);
2473 asoc->size_on_reasm_queue = 0;
2474 asoc->cnt_on_reasm_queue = 0;
2477 nxt_todel = asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
2478 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
2479 ((nxt_todel == chk->rec.data.stream_seq) ||
2480 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
2482 * Yep the first one is here. We setup to start reception,
2483 * by backing down the TSN just in case we can't deliver.
2487 * Before we start though either all of the message should
2488 * be here or the socket buffer max or nothing on the
2489 * delivery queue and something can be delivered.
2491 if (stcb->sctp_socket) {
2492 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
2493 stcb->sctp_ep->partial_delivery_point);
2495 pd_point = stcb->sctp_ep->partial_delivery_point;
2497 if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
2498 asoc->fragmented_delivery_inprogress = 1;
2499 asoc->tsn_last_delivered = chk->rec.data.TSN_seq - 1;
2500 asoc->str_of_pdapi = chk->rec.data.stream_number;
2501 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
2502 asoc->pdapi_ppid = chk->rec.data.payloadtype;
2503 asoc->fragment_flags = chk->rec.data.rcv_flags;
2504 sctp_service_reassembly(stcb, asoc);
2505 if (asoc->fragmented_delivery_inprogress == 0) {
2513 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2514 struct sockaddr *src, struct sockaddr *dst,
2515 struct sctphdr *sh, struct sctp_inpcb *inp,
2516 struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t * high_tsn,
2517 uint8_t use_mflowid, uint32_t mflowid,
2518 uint32_t vrf_id, uint16_t port)
2520 struct sctp_data_chunk *ch, chunk_buf;
2521 struct sctp_association *asoc;
2522 int num_chunks = 0; /* number of control chunks processed */
2524 int chk_length, break_flag, last_chunk;
2525 int abort_flag = 0, was_a_gap;
2527 uint32_t highest_tsn;
2530 sctp_set_rwnd(stcb, &stcb->asoc);
2533 SCTP_TCB_LOCK_ASSERT(stcb);
2535 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2536 highest_tsn = asoc->highest_tsn_inside_nr_map;
2538 highest_tsn = asoc->highest_tsn_inside_map;
2540 was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2542 * setup where we got the last DATA packet from for any SACK that
2543 * may need to go out. Don't bump the net. This is done ONLY when a
2544 * chunk is assigned.
2546 asoc->last_data_chunk_from = net;
2549 * Now before we proceed we must figure out if this is a wasted
2550 * cluster... i.e. it is a small packet sent in and yet the driver
2551 * underneath allocated a full cluster for it. If so we must copy it
2552 * to a smaller mbuf and free up the cluster mbuf. This will help
2553 * with cluster starvation. Note for __Panda__ we don't do this
2554 * since it has clusters all the way down to 64 bytes.
2556 if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2557 /* we only handle mbufs that are singletons.. not chains */
2558 m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA);
2560 /* ok lets see if we can copy the data up */
2563 /* get the pointers and copy */
2564 to = mtod(m, caddr_t *);
2565 from = mtod((*mm), caddr_t *);
2566 memcpy(to, from, SCTP_BUF_LEN((*mm)));
2567 /* copy the length and free up the old */
2568 SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2570 /* sucess, back copy */
2573 /* We are in trouble in the mbuf world .. yikes */
2577 /* get pointer to the first chunk header */
2578 ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2579 sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2584 * process all DATA chunks...
2586 *high_tsn = asoc->cumulative_tsn;
2588 asoc->data_pkts_seen++;
2589 while (stop_proc == 0) {
2590 /* validate chunk length */
2591 chk_length = ntohs(ch->ch.chunk_length);
2592 if (length - *offset < chk_length) {
2593 /* all done, mutulated chunk */
2597 if (ch->ch.chunk_type == SCTP_DATA) {
2598 if ((size_t)chk_length < sizeof(struct sctp_data_chunk) + 1) {
2600 * Need to send an abort since we had a
2601 * invalid data chunk.
2603 struct mbuf *op_err;
2605 op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 2 * sizeof(uint32_t)),
2606 0, M_NOWAIT, 1, MT_DATA);
2609 struct sctp_paramhdr *ph;
2612 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr) +
2613 (2 * sizeof(uint32_t));
2614 ph = mtod(op_err, struct sctp_paramhdr *);
2616 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2617 ph->param_length = htons(SCTP_BUF_LEN(op_err));
2618 ippp = (uint32_t *) (ph + 1);
2619 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_19);
2621 *ippp = asoc->cumulative_tsn;
2624 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
2625 sctp_abort_association(inp, stcb, m, iphlen,
2626 src, dst, sh, op_err,
2627 use_mflowid, mflowid,
2631 #ifdef SCTP_AUDITING_ENABLED
2632 sctp_audit_log(0xB1, 0);
2634 if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2639 if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, ch,
2640 chk_length, net, high_tsn, &abort_flag, &break_flag,
2649 * Set because of out of rwnd space and no
2650 * drop rep space left.
2656 /* not a data chunk in the data region */
2657 switch (ch->ch.chunk_type) {
2658 case SCTP_INITIATION:
2659 case SCTP_INITIATION_ACK:
2660 case SCTP_SELECTIVE_ACK:
2661 case SCTP_NR_SELECTIVE_ACK:
2662 case SCTP_HEARTBEAT_REQUEST:
2663 case SCTP_HEARTBEAT_ACK:
2664 case SCTP_ABORT_ASSOCIATION:
2666 case SCTP_SHUTDOWN_ACK:
2667 case SCTP_OPERATION_ERROR:
2668 case SCTP_COOKIE_ECHO:
2669 case SCTP_COOKIE_ACK:
2672 case SCTP_SHUTDOWN_COMPLETE:
2673 case SCTP_AUTHENTICATION:
2674 case SCTP_ASCONF_ACK:
2675 case SCTP_PACKET_DROPPED:
2676 case SCTP_STREAM_RESET:
2677 case SCTP_FORWARD_CUM_TSN:
2680 * Now, what do we do with KNOWN chunks that
2681 * are NOT in the right place?
2683 * For now, I do nothing but ignore them. We
2684 * may later want to add sysctl stuff to
2685 * switch out and do either an ABORT() or
2686 * possibly process them.
2688 if (SCTP_BASE_SYSCTL(sctp_strict_data_order)) {
2689 struct mbuf *op_err;
2691 op_err = sctp_generate_invmanparam(SCTP_CAUSE_PROTOCOL_VIOLATION);
2692 sctp_abort_association(inp, stcb,
2696 use_mflowid, mflowid,
2702 /* unknown chunk type, use bit rules */
2703 if (ch->ch.chunk_type & 0x40) {
2704 /* Add a error report to the queue */
2706 struct sctp_paramhdr *phd;
2708 merr = sctp_get_mbuf_for_msg(sizeof(*phd), 0, M_NOWAIT, 1, MT_DATA);
2710 phd = mtod(merr, struct sctp_paramhdr *);
2712 * We cheat and use param
2713 * type since we did not
2714 * bother to define a error
2715 * cause struct. They are
2716 * the same basic format
2717 * with different names.
2720 htons(SCTP_CAUSE_UNRECOG_CHUNK);
2722 htons(chk_length + sizeof(*phd));
2723 SCTP_BUF_LEN(merr) = sizeof(*phd);
2724 SCTP_BUF_NEXT(merr) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
2725 if (SCTP_BUF_NEXT(merr)) {
2726 if (sctp_pad_lastmbuf(SCTP_BUF_NEXT(merr), SCTP_SIZE32(chk_length) - chk_length, NULL)) {
2729 sctp_queue_op_err(stcb, merr);
2736 if ((ch->ch.chunk_type & 0x80) == 0) {
2737 /* discard the rest of this packet */
2739 } /* else skip this bad chunk and
2742 } /* switch of chunk type */
2744 *offset += SCTP_SIZE32(chk_length);
2745 if ((*offset >= length) || stop_proc) {
2746 /* no more data left in the mbuf chain */
2750 ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2751 sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2760 * we need to report rwnd overrun drops.
2762 sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
2766 * Did we get data, if so update the time for auto-close and
2767 * give peer credit for being alive.
2769 SCTP_STAT_INCR(sctps_recvpktwithdata);
2770 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2771 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2772 stcb->asoc.overall_error_count,
2774 SCTP_FROM_SCTP_INDATA,
2777 stcb->asoc.overall_error_count = 0;
2778 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2780 /* now service all of the reassm queue if needed */
2781 if (!(TAILQ_EMPTY(&asoc->reasmqueue)))
2782 sctp_service_queues(stcb, asoc);
2784 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2785 /* Assure that we ack right away */
2786 stcb->asoc.send_sack = 1;
2788 /* Start a sack timer or QUEUE a SACK for sending */
2789 sctp_sack_check(stcb, was_a_gap);
2794 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2795 uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2797 uint32_t * biggest_newly_acked_tsn,
2798 uint32_t * this_sack_lowest_newack,
2801 struct sctp_tmit_chunk *tp1;
2802 unsigned int theTSN;
2803 int j, wake_him = 0, circled = 0;
2805 /* Recover the tp1 we last saw */
2808 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2810 for (j = frag_strt; j <= frag_end; j++) {
2811 theTSN = j + last_tsn;
2813 if (tp1->rec.data.doing_fast_retransmit)
2817 * CMT: CUCv2 algorithm. For each TSN being
2818 * processed from the sent queue, track the
2819 * next expected pseudo-cumack, or
2820 * rtx_pseudo_cumack, if required. Separate
2821 * cumack trackers for first transmissions,
2822 * and retransmissions.
2824 if ((tp1->whoTo->find_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
2825 (tp1->snd_count == 1)) {
2826 tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq;
2827 tp1->whoTo->find_pseudo_cumack = 0;
2829 if ((tp1->whoTo->find_rtx_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
2830 (tp1->snd_count > 1)) {
2831 tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq;
2832 tp1->whoTo->find_rtx_pseudo_cumack = 0;
2834 if (tp1->rec.data.TSN_seq == theTSN) {
2835 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2837 * must be held until
2840 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
2842 * If it is less than RESEND, it is
2843 * now no-longer in flight.
2844 * Higher values may already be set
2845 * via previous Gap Ack Blocks...
2846 * i.e. ACKED or RESEND.
2848 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2849 *biggest_newly_acked_tsn)) {
2850 *biggest_newly_acked_tsn = tp1->rec.data.TSN_seq;
2853 * CMT: SFR algo (and HTNA) - set
2854 * saw_newack to 1 for dest being
2855 * newly acked. update
2856 * this_sack_highest_newack if
2859 if (tp1->rec.data.chunk_was_revoked == 0)
2860 tp1->whoTo->saw_newack = 1;
2862 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2863 tp1->whoTo->this_sack_highest_newack)) {
2864 tp1->whoTo->this_sack_highest_newack =
2865 tp1->rec.data.TSN_seq;
2868 * CMT DAC algo: also update
2869 * this_sack_lowest_newack
2871 if (*this_sack_lowest_newack == 0) {
2872 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2873 sctp_log_sack(*this_sack_lowest_newack,
2875 tp1->rec.data.TSN_seq,
2878 SCTP_LOG_TSN_ACKED);
2880 *this_sack_lowest_newack = tp1->rec.data.TSN_seq;
2883 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
2884 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
2885 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
2886 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
2887 * Separate pseudo_cumack trackers for first transmissions and
2890 if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) {
2891 if (tp1->rec.data.chunk_was_revoked == 0) {
2892 tp1->whoTo->new_pseudo_cumack = 1;
2894 tp1->whoTo->find_pseudo_cumack = 1;
2896 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
2897 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
2899 if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) {
2900 if (tp1->rec.data.chunk_was_revoked == 0) {
2901 tp1->whoTo->new_pseudo_cumack = 1;
2903 tp1->whoTo->find_rtx_pseudo_cumack = 1;
2905 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2906 sctp_log_sack(*biggest_newly_acked_tsn,
2908 tp1->rec.data.TSN_seq,
2911 SCTP_LOG_TSN_ACKED);
2913 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
2914 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
2915 tp1->whoTo->flight_size,
2917 (uintptr_t) tp1->whoTo,
2918 tp1->rec.data.TSN_seq);
2920 sctp_flight_size_decrease(tp1);
2921 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
2922 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
2925 sctp_total_flight_decrease(stcb, tp1);
2927 tp1->whoTo->net_ack += tp1->send_size;
2928 if (tp1->snd_count < 2) {
2930 * True non-retransmited chunk
2932 tp1->whoTo->net_ack2 += tp1->send_size;
2940 sctp_calculate_rto(stcb,
2943 &tp1->sent_rcv_time,
2944 sctp_align_safe_nocopy,
2945 SCTP_RTT_FROM_DATA);
2948 if (tp1->whoTo->rto_needed == 0) {
2949 tp1->whoTo->rto_needed = 1;
2955 if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
2956 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2957 stcb->asoc.this_sack_highest_gap)) {
2958 stcb->asoc.this_sack_highest_gap =
2959 tp1->rec.data.TSN_seq;
2961 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
2962 sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
2963 #ifdef SCTP_AUDITING_ENABLED
2964 sctp_audit_log(0xB2,
2965 (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
2970 * All chunks NOT UNSENT fall through here and are marked
2971 * (leave PR-SCTP ones that are to skip alone though)
2973 if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
2974 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
2975 tp1->sent = SCTP_DATAGRAM_MARKED;
2977 if (tp1->rec.data.chunk_was_revoked) {
2978 /* deflate the cwnd */
2979 tp1->whoTo->cwnd -= tp1->book_size;
2980 tp1->rec.data.chunk_was_revoked = 0;
2982 /* NR Sack code here */
2984 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
2985 if (stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
2986 stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues--;
2989 panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
2992 tp1->sent = SCTP_DATAGRAM_NR_ACKED;
2998 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
2999 sctp_m_freem(tp1->data);
3006 } /* if (tp1->TSN_seq == theTSN) */
3007 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, theTSN)) {
3010 tp1 = TAILQ_NEXT(tp1, sctp_next);
3011 if ((tp1 == NULL) && (circled == 0)) {
3013 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3015 } /* end while (tp1) */
3018 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3020 /* In case the fragments were not in order we must reset */
3021 } /* end for (j = fragStart */
3023 return (wake_him); /* Return value only used for nr-sack */
3028 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3029 uint32_t last_tsn, uint32_t * biggest_tsn_acked,
3030 uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack,
3031 int num_seg, int num_nr_seg, int *rto_ok)
3033 struct sctp_gap_ack_block *frag, block;
3034 struct sctp_tmit_chunk *tp1;
3039 uint16_t frag_strt, frag_end, prev_frag_end;
3041 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3045 for (i = 0; i < (num_seg + num_nr_seg); i++) {
3048 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3050 frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3051 sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
3052 *offset += sizeof(block);
3054 return (chunk_freed);
3056 frag_strt = ntohs(frag->start);
3057 frag_end = ntohs(frag->end);
3059 if (frag_strt > frag_end) {
3060 /* This gap report is malformed, skip it. */
3063 if (frag_strt <= prev_frag_end) {
3064 /* This gap report is not in order, so restart. */
3065 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3067 if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
3068 *biggest_tsn_acked = last_tsn + frag_end;
3075 if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3076 non_revocable, &num_frs, biggest_newly_acked_tsn,
3077 this_sack_lowest_newack, rto_ok)) {
3080 prev_frag_end = frag_end;
3082 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3084 sctp_log_fr(*biggest_tsn_acked,
3085 *biggest_newly_acked_tsn,
3086 last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3088 return (chunk_freed);
3092 sctp_check_for_revoked(struct sctp_tcb *stcb,
3093 struct sctp_association *asoc, uint32_t cumack,
3094 uint32_t biggest_tsn_acked)
3096 struct sctp_tmit_chunk *tp1;
3098 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3099 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cumack)) {
3101 * ok this guy is either ACK or MARKED. If it is
3102 * ACKED it has been previously acked but not this
3103 * time i.e. revoked. If it is MARKED it was ACK'ed
3106 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked)) {
3109 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3110 /* it has been revoked */
3111 tp1->sent = SCTP_DATAGRAM_SENT;
3112 tp1->rec.data.chunk_was_revoked = 1;
3114 * We must add this stuff back in to assure
3115 * timers and such get started.
3117 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3118 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3119 tp1->whoTo->flight_size,
3121 (uintptr_t) tp1->whoTo,
3122 tp1->rec.data.TSN_seq);
3124 sctp_flight_size_increase(tp1);
3125 sctp_total_flight_increase(stcb, tp1);
3127 * We inflate the cwnd to compensate for our
3128 * artificial inflation of the flight_size.
3130 tp1->whoTo->cwnd += tp1->book_size;
3131 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3132 sctp_log_sack(asoc->last_acked_seq,
3134 tp1->rec.data.TSN_seq,
3137 SCTP_LOG_TSN_REVOKED);
3139 } else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3140 /* it has been re-acked in this SACK */
3141 tp1->sent = SCTP_DATAGRAM_ACKED;
3144 if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3151 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3152 uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3154 struct sctp_tmit_chunk *tp1;
3155 int strike_flag = 0;
3157 int tot_retrans = 0;
3158 uint32_t sending_seq;
3159 struct sctp_nets *net;
3160 int num_dests_sacked = 0;
3163 * select the sending_seq, this is either the next thing ready to be
3164 * sent but not transmitted, OR, the next seq we assign.
3166 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3168 sending_seq = asoc->sending_seq;
3170 sending_seq = tp1->rec.data.TSN_seq;
3173 /* CMT DAC algo: finding out if SACK is a mixed SACK */
3174 if ((asoc->sctp_cmt_on_off > 0) &&
3175 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3176 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3177 if (net->saw_newack)
3181 if (stcb->asoc.peer_supports_prsctp) {
3182 (void)SCTP_GETTIME_TIMEVAL(&now);
3184 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3186 if (tp1->no_fr_allowed) {
3187 /* this one had a timeout or something */
3190 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3191 if (tp1->sent < SCTP_DATAGRAM_RESEND)
3192 sctp_log_fr(biggest_tsn_newly_acked,
3193 tp1->rec.data.TSN_seq,
3195 SCTP_FR_LOG_CHECK_STRIKE);
3197 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked) ||
3198 tp1->sent == SCTP_DATAGRAM_UNSENT) {
3202 if (stcb->asoc.peer_supports_prsctp) {
3203 if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3204 /* Is it expired? */
3205 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3206 /* Yes so drop it */
3207 if (tp1->data != NULL) {
3208 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3209 SCTP_SO_NOT_LOCKED);
3215 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->this_sack_highest_gap)) {
3216 /* we are beyond the tsn in the sack */
3219 if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3220 /* either a RESEND, ACKED, or MARKED */
3222 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3223 /* Continue strikin FWD-TSN chunks */
3224 tp1->rec.data.fwd_tsn_cnt++;
3229 * CMT : SFR algo (covers part of DAC and HTNA as well)
3231 if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3233 * No new acks were receieved for data sent to this
3234 * dest. Therefore, according to the SFR algo for
3235 * CMT, no data sent to this dest can be marked for
3236 * FR using this SACK.
3239 } else if (tp1->whoTo && SCTP_TSN_GT(tp1->rec.data.TSN_seq,
3240 tp1->whoTo->this_sack_highest_newack)) {
3242 * CMT: New acks were receieved for data sent to
3243 * this dest. But no new acks were seen for data
3244 * sent after tp1. Therefore, according to the SFR
3245 * algo for CMT, tp1 cannot be marked for FR using
3246 * this SACK. This step covers part of the DAC algo
3247 * and the HTNA algo as well.
3252 * Here we check to see if we were have already done a FR
3253 * and if so we see if the biggest TSN we saw in the sack is
3254 * smaller than the recovery point. If so we don't strike
3255 * the tsn... otherwise we CAN strike the TSN.
3258 * @@@ JRI: Check for CMT if (accum_moved &&
3259 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3262 if (accum_moved && asoc->fast_retran_loss_recovery) {
3264 * Strike the TSN if in fast-recovery and cum-ack
3267 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3268 sctp_log_fr(biggest_tsn_newly_acked,
3269 tp1->rec.data.TSN_seq,
3271 SCTP_FR_LOG_STRIKE_CHUNK);
3273 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3276 if ((asoc->sctp_cmt_on_off > 0) &&
3277 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3279 * CMT DAC algorithm: If SACK flag is set to
3280 * 0, then lowest_newack test will not pass
3281 * because it would have been set to the
3282 * cumack earlier. If not already to be
3283 * rtx'd, If not a mixed sack and if tp1 is
3284 * not between two sacked TSNs, then mark by
3285 * one more. NOTE that we are marking by one
3286 * additional time since the SACK DAC flag
3287 * indicates that two packets have been
3288 * received after this missing TSN.
3290 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3291 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3292 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3293 sctp_log_fr(16 + num_dests_sacked,
3294 tp1->rec.data.TSN_seq,
3296 SCTP_FR_LOG_STRIKE_CHUNK);
3301 } else if ((tp1->rec.data.doing_fast_retransmit) &&
3302 (asoc->sctp_cmt_on_off == 0)) {
3304 * For those that have done a FR we must take
3305 * special consideration if we strike. I.e the
3306 * biggest_newly_acked must be higher than the
3307 * sending_seq at the time we did the FR.
3310 #ifdef SCTP_FR_TO_ALTERNATE
3312 * If FR's go to new networks, then we must only do
3313 * this for singly homed asoc's. However if the FR's
3314 * go to the same network (Armando's work) then its
3315 * ok to FR multiple times.
3323 if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3324 tp1->rec.data.fast_retran_tsn)) {
3326 * Strike the TSN, since this ack is
3327 * beyond where things were when we
3330 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3331 sctp_log_fr(biggest_tsn_newly_acked,
3332 tp1->rec.data.TSN_seq,
3334 SCTP_FR_LOG_STRIKE_CHUNK);
3336 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3340 if ((asoc->sctp_cmt_on_off > 0) &&
3341 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3343 * CMT DAC algorithm: If
3344 * SACK flag is set to 0,
3345 * then lowest_newack test
3346 * will not pass because it
3347 * would have been set to
3348 * the cumack earlier. If
3349 * not already to be rtx'd,
3350 * If not a mixed sack and
3351 * if tp1 is not between two
3352 * sacked TSNs, then mark by
3353 * one more. NOTE that we
3354 * are marking by one
3355 * additional time since the
3356 * SACK DAC flag indicates
3357 * that two packets have
3358 * been received after this
3361 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3362 (num_dests_sacked == 1) &&
3363 SCTP_TSN_GT(this_sack_lowest_newack,
3364 tp1->rec.data.TSN_seq)) {
3365 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3366 sctp_log_fr(32 + num_dests_sacked,
3367 tp1->rec.data.TSN_seq,
3369 SCTP_FR_LOG_STRIKE_CHUNK);
3371 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3379 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3382 } else if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
3383 biggest_tsn_newly_acked)) {
3385 * We don't strike these: This is the HTNA
3386 * algorithm i.e. we don't strike If our TSN is
3387 * larger than the Highest TSN Newly Acked.
3391 /* Strike the TSN */
3392 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3393 sctp_log_fr(biggest_tsn_newly_acked,
3394 tp1->rec.data.TSN_seq,
3396 SCTP_FR_LOG_STRIKE_CHUNK);
3398 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3401 if ((asoc->sctp_cmt_on_off > 0) &&
3402 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3404 * CMT DAC algorithm: If SACK flag is set to
3405 * 0, then lowest_newack test will not pass
3406 * because it would have been set to the
3407 * cumack earlier. If not already to be
3408 * rtx'd, If not a mixed sack and if tp1 is
3409 * not between two sacked TSNs, then mark by
3410 * one more. NOTE that we are marking by one
3411 * additional time since the SACK DAC flag
3412 * indicates that two packets have been
3413 * received after this missing TSN.
3415 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3416 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3417 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3418 sctp_log_fr(48 + num_dests_sacked,
3419 tp1->rec.data.TSN_seq,
3421 SCTP_FR_LOG_STRIKE_CHUNK);
3427 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3428 struct sctp_nets *alt;
3430 /* fix counts and things */
3431 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3432 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3433 (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3435 (uintptr_t) tp1->whoTo,
3436 tp1->rec.data.TSN_seq);
3439 tp1->whoTo->net_ack++;
3440 sctp_flight_size_decrease(tp1);
3441 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3442 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3446 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3447 sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3448 asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3450 /* add back to the rwnd */
3451 asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3453 /* remove from the total flight */
3454 sctp_total_flight_decrease(stcb, tp1);
3456 if ((stcb->asoc.peer_supports_prsctp) &&
3457 (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3459 * Has it been retransmitted tv_sec times? -
3460 * we store the retran count there.
3462 if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3463 /* Yes, so drop it */
3464 if (tp1->data != NULL) {
3465 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3466 SCTP_SO_NOT_LOCKED);
3468 /* Make sure to flag we had a FR */
3469 tp1->whoTo->net_ack++;
3474 * SCTP_PRINTF("OK, we are now ready to FR this
3477 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3478 sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count,
3482 /* This is a subsequent FR */
3483 SCTP_STAT_INCR(sctps_sendmultfastretrans);
3485 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3486 if (asoc->sctp_cmt_on_off > 0) {
3488 * CMT: Using RTX_SSTHRESH policy for CMT.
3489 * If CMT is being used, then pick dest with
3490 * largest ssthresh for any retransmission.
3492 tp1->no_fr_allowed = 1;
3494 /* sa_ignore NO_NULL_CHK */
3495 if (asoc->sctp_cmt_pf > 0) {
3497 * JRS 5/18/07 - If CMT PF is on,
3498 * use the PF version of
3501 alt = sctp_find_alternate_net(stcb, alt, 2);
3504 * JRS 5/18/07 - If only CMT is on,
3505 * use the CMT version of
3508 /* sa_ignore NO_NULL_CHK */
3509 alt = sctp_find_alternate_net(stcb, alt, 1);
3515 * CUCv2: If a different dest is picked for
3516 * the retransmission, then new
3517 * (rtx-)pseudo_cumack needs to be tracked
3518 * for orig dest. Let CUCv2 track new (rtx-)
3519 * pseudo-cumack always.
3522 tp1->whoTo->find_pseudo_cumack = 1;
3523 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3525 } else {/* CMT is OFF */
3527 #ifdef SCTP_FR_TO_ALTERNATE
3528 /* Can we find an alternate? */
3529 alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3532 * default behavior is to NOT retransmit
3533 * FR's to an alternate. Armando Caro's
3534 * paper details why.
3540 tp1->rec.data.doing_fast_retransmit = 1;
3542 /* mark the sending seq for possible subsequent FR's */
3544 * SCTP_PRINTF("Marking TSN for FR new value %x\n",
3545 * (uint32_t)tpi->rec.data.TSN_seq);
3547 if (TAILQ_EMPTY(&asoc->send_queue)) {
3549 * If the queue of send is empty then its
3550 * the next sequence number that will be
3551 * assigned so we subtract one from this to
3552 * get the one we last sent.
3554 tp1->rec.data.fast_retran_tsn = sending_seq;
3557 * If there are chunks on the send queue
3558 * (unsent data that has made it from the
3559 * stream queues but not out the door, we
3560 * take the first one (which will have the
3561 * lowest TSN) and subtract one to get the
3564 struct sctp_tmit_chunk *ttt;
3566 ttt = TAILQ_FIRST(&asoc->send_queue);
3567 tp1->rec.data.fast_retran_tsn =
3568 ttt->rec.data.TSN_seq;
3573 * this guy had a RTO calculation pending on
3576 if ((tp1->whoTo != NULL) &&
3577 (tp1->whoTo->rto_needed == 0)) {
3578 tp1->whoTo->rto_needed = 1;
3582 if (alt != tp1->whoTo) {
3583 /* yes, there is an alternate. */
3584 sctp_free_remote_addr(tp1->whoTo);
3585 /* sa_ignore FREED_MEMORY */
3587 atomic_add_int(&alt->ref_count, 1);
3593 struct sctp_tmit_chunk *
3594 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3595 struct sctp_association *asoc)
3597 struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3601 if (asoc->peer_supports_prsctp == 0) {
3604 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3605 if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3606 tp1->sent != SCTP_DATAGRAM_RESEND &&
3607 tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3608 /* no chance to advance, out of here */
3611 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3612 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3613 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3614 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3615 asoc->advanced_peer_ack_point,
3616 tp1->rec.data.TSN_seq, 0, 0);
3619 if (!PR_SCTP_ENABLED(tp1->flags)) {
3621 * We can't fwd-tsn past any that are reliable aka
3622 * retransmitted until the asoc fails.
3627 (void)SCTP_GETTIME_TIMEVAL(&now);
3631 * now we got a chunk which is marked for another
3632 * retransmission to a PR-stream but has run out its chances
3633 * already maybe OR has been marked to skip now. Can we skip
3634 * it if its a resend?
3636 if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3637 (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3639 * Now is this one marked for resend and its time is
3642 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3643 /* Yes so drop it */
3645 (void)sctp_release_pr_sctp_chunk(stcb, tp1,
3646 1, SCTP_SO_NOT_LOCKED);
3650 * No, we are done when hit one for resend
3651 * whos time as not expired.
3657 * Ok now if this chunk is marked to drop it we can clean up
3658 * the chunk, advance our peer ack point and we can check
3661 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3662 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3663 /* advance PeerAckPoint goes forward */
3664 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->advanced_peer_ack_point)) {
3665 asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq;
3667 } else if (tp1->rec.data.TSN_seq == asoc->advanced_peer_ack_point) {
3668 /* No update but we do save the chk */
3673 * If it is still in RESEND we can advance no
3683 sctp_fs_audit(struct sctp_association *asoc)
3685 struct sctp_tmit_chunk *chk;
3686 int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3687 int entry_flight, entry_cnt, ret;
3689 entry_flight = asoc->total_flight;
3690 entry_cnt = asoc->total_flight_count;
3693 if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3696 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3697 if (chk->sent < SCTP_DATAGRAM_RESEND) {
3698 SCTP_PRINTF("Chk TSN:%u size:%d inflight cnt:%d\n",
3699 chk->rec.data.TSN_seq,
3703 } else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3705 } else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3707 } else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3714 if ((inflight > 0) || (inbetween > 0)) {
3716 panic("Flight size-express incorrect? \n");
3718 SCTP_PRINTF("asoc->total_flight:%d cnt:%d\n",
3719 entry_flight, entry_cnt);
3721 SCTP_PRINTF("Flight size-express incorrect F:%d I:%d R:%d Ab:%d ACK:%d\n",
3722 inflight, inbetween, resend, above, acked);
3731 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3732 struct sctp_association *asoc,
3733 struct sctp_tmit_chunk *tp1)
3735 tp1->window_probe = 0;
3736 if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3737 /* TSN's skipped we do NOT move back. */
3738 sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3739 tp1->whoTo->flight_size,
3741 (uintptr_t) tp1->whoTo,
3742 tp1->rec.data.TSN_seq);
3745 /* First setup this by shrinking flight */
3746 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3747 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3750 sctp_flight_size_decrease(tp1);
3751 sctp_total_flight_decrease(stcb, tp1);
3752 /* Now mark for resend */
3753 tp1->sent = SCTP_DATAGRAM_RESEND;
3754 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3756 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3757 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3758 tp1->whoTo->flight_size,
3760 (uintptr_t) tp1->whoTo,
3761 tp1->rec.data.TSN_seq);
3766 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3767 uint32_t rwnd, int *abort_now, int ecne_seen)
3769 struct sctp_nets *net;
3770 struct sctp_association *asoc;
3771 struct sctp_tmit_chunk *tp1, *tp2;
3773 int win_probe_recovery = 0;
3774 int win_probe_recovered = 0;
3775 int j, done_once = 0;
3778 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3779 sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3780 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3782 SCTP_TCB_LOCK_ASSERT(stcb);
3783 #ifdef SCTP_ASOCLOG_OF_TSNS
3784 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3785 stcb->asoc.cumack_log_at++;
3786 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3787 stcb->asoc.cumack_log_at = 0;
3791 old_rwnd = asoc->peers_rwnd;
3792 if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3795 } else if (asoc->last_acked_seq == cumack) {
3796 /* Window update sack */
3797 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3798 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3799 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3800 /* SWS sender side engages */
3801 asoc->peers_rwnd = 0;
3803 if (asoc->peers_rwnd > old_rwnd) {
3808 /* First setup for CC stuff */
3809 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3810 if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
3811 /* Drag along the window_tsn for cwr's */
3812 net->cwr_window_tsn = cumack;
3814 net->prev_cwnd = net->cwnd;
3819 * CMT: Reset CUC and Fast recovery algo variables before
3822 net->new_pseudo_cumack = 0;
3823 net->will_exit_fast_recovery = 0;
3824 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
3825 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
3828 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
3831 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3832 tp1 = TAILQ_LAST(&asoc->sent_queue,
3833 sctpchunk_listhead);
3834 send_s = tp1->rec.data.TSN_seq + 1;
3836 send_s = asoc->sending_seq;
3838 if (SCTP_TSN_GE(cumack, send_s)) {
3844 panic("Impossible sack 1");
3849 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
3850 0, M_NOWAIT, 1, MT_DATA);
3852 struct sctp_paramhdr *ph;
3855 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
3857 ph = mtod(oper, struct sctp_paramhdr *);
3858 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
3859 ph->param_length = htons(SCTP_BUF_LEN(oper));
3860 ippp = (uint32_t *) (ph + 1);
3861 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
3863 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
3864 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
3869 asoc->this_sack_highest_gap = cumack;
3870 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
3871 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
3872 stcb->asoc.overall_error_count,
3874 SCTP_FROM_SCTP_INDATA,
3877 stcb->asoc.overall_error_count = 0;
3878 if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
3879 /* process the new consecutive TSN first */
3880 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3881 if (SCTP_TSN_GE(cumack, tp1->rec.data.TSN_seq)) {
3882 if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
3883 SCTP_PRINTF("Warning, an unsent is now acked?\n");
3885 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
3887 * If it is less than ACKED, it is
3888 * now no-longer in flight. Higher
3889 * values may occur during marking
3891 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3892 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3893 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
3894 tp1->whoTo->flight_size,
3896 (uintptr_t) tp1->whoTo,
3897 tp1->rec.data.TSN_seq);
3899 sctp_flight_size_decrease(tp1);
3900 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3901 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3904 /* sa_ignore NO_NULL_CHK */
3905 sctp_total_flight_decrease(stcb, tp1);
3907 tp1->whoTo->net_ack += tp1->send_size;
3908 if (tp1->snd_count < 2) {
3910 * True non-retransmited
3913 tp1->whoTo->net_ack2 +=
3916 /* update RTO too? */
3925 sctp_calculate_rto(stcb,
3927 &tp1->sent_rcv_time,
3928 sctp_align_safe_nocopy,
3929 SCTP_RTT_FROM_DATA);
3932 if (tp1->whoTo->rto_needed == 0) {
3933 tp1->whoTo->rto_needed = 1;
3939 * CMT: CUCv2 algorithm. From the
3940 * cumack'd TSNs, for each TSN being
3941 * acked for the first time, set the
3942 * following variables for the
3943 * corresp destination.
3944 * new_pseudo_cumack will trigger a
3946 * find_(rtx_)pseudo_cumack will
3947 * trigger search for the next
3948 * expected (rtx-)pseudo-cumack.
3950 tp1->whoTo->new_pseudo_cumack = 1;
3951 tp1->whoTo->find_pseudo_cumack = 1;
3952 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3954 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3955 /* sa_ignore NO_NULL_CHK */
3956 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
3959 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3960 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
3962 if (tp1->rec.data.chunk_was_revoked) {
3963 /* deflate the cwnd */
3964 tp1->whoTo->cwnd -= tp1->book_size;
3965 tp1->rec.data.chunk_was_revoked = 0;
3967 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3968 if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
3969 asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--;
3972 panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
3976 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
3978 /* sa_ignore NO_NULL_CHK */
3979 sctp_free_bufspace(stcb, asoc, tp1, 1);
3980 sctp_m_freem(tp1->data);
3983 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3984 sctp_log_sack(asoc->last_acked_seq,
3986 tp1->rec.data.TSN_seq,
3989 SCTP_LOG_FREE_SENT);
3991 asoc->sent_queue_cnt--;
3992 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
3999 /* sa_ignore NO_NULL_CHK */
4000 if (stcb->sctp_socket) {
4001 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4005 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4006 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4007 /* sa_ignore NO_NULL_CHK */
4008 sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
4010 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4011 so = SCTP_INP_SO(stcb->sctp_ep);
4012 atomic_add_int(&stcb->asoc.refcnt, 1);
4013 SCTP_TCB_UNLOCK(stcb);
4014 SCTP_SOCKET_LOCK(so, 1);
4015 SCTP_TCB_LOCK(stcb);
4016 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4017 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4018 /* assoc was freed while we were unlocked */
4019 SCTP_SOCKET_UNLOCK(so, 1);
4023 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4024 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4025 SCTP_SOCKET_UNLOCK(so, 1);
4028 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4029 sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
4033 /* JRS - Use the congestion control given in the CC module */
4034 if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
4035 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4036 if (net->net_ack2 > 0) {
4038 * Karn's rule applies to clearing error
4039 * count, this is optional.
4041 net->error_count = 0;
4042 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4043 /* addr came good */
4044 net->dest_state |= SCTP_ADDR_REACHABLE;
4045 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4046 0, (void *)net, SCTP_SO_NOT_LOCKED);
4048 if (net == stcb->asoc.primary_destination) {
4049 if (stcb->asoc.alternate) {
4051 * release the alternate,
4054 sctp_free_remote_addr(stcb->asoc.alternate);
4055 stcb->asoc.alternate = NULL;
4058 if (net->dest_state & SCTP_ADDR_PF) {
4059 net->dest_state &= ~SCTP_ADDR_PF;
4060 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_3);
4061 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4062 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4063 /* Done with this net */
4066 /* restore any doubled timers */
4067 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4068 if (net->RTO < stcb->asoc.minrto) {
4069 net->RTO = stcb->asoc.minrto;
4071 if (net->RTO > stcb->asoc.maxrto) {
4072 net->RTO = stcb->asoc.maxrto;
4076 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4078 asoc->last_acked_seq = cumack;
4080 if (TAILQ_EMPTY(&asoc->sent_queue)) {
4081 /* nothing left in-flight */
4082 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4083 net->flight_size = 0;
4084 net->partial_bytes_acked = 0;
4086 asoc->total_flight = 0;
4087 asoc->total_flight_count = 0;
4090 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4091 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4092 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4093 /* SWS sender side engages */
4094 asoc->peers_rwnd = 0;
4096 if (asoc->peers_rwnd > old_rwnd) {
4097 win_probe_recovery = 1;
4099 /* Now assure a timer where data is queued at */
4102 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4105 if (win_probe_recovery && (net->window_probe)) {
4106 win_probe_recovered = 1;
4108 * Find first chunk that was used with window probe
4109 * and clear the sent
4111 /* sa_ignore FREED_MEMORY */
4112 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4113 if (tp1->window_probe) {
4114 /* move back to data send queue */
4115 sctp_window_probe_recovery(stcb, asoc, tp1);
4120 if (net->RTO == 0) {
4121 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
4123 to_ticks = MSEC_TO_TICKS(net->RTO);
4125 if (net->flight_size) {
4127 (void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4128 sctp_timeout_handler, &net->rxt_timer);
4129 if (net->window_probe) {
4130 net->window_probe = 0;
4133 if (net->window_probe) {
4135 * In window probes we must assure a timer
4136 * is still running there
4138 net->window_probe = 0;
4139 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4140 SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4141 sctp_timeout_handler, &net->rxt_timer);
4143 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4144 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4146 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4151 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4152 (asoc->sent_queue_retran_cnt == 0) &&
4153 (win_probe_recovered == 0) &&
4156 * huh, this should not happen unless all packets are
4157 * PR-SCTP and marked to skip of course.
4159 if (sctp_fs_audit(asoc)) {
4160 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4161 net->flight_size = 0;
4163 asoc->total_flight = 0;
4164 asoc->total_flight_count = 0;
4165 asoc->sent_queue_retran_cnt = 0;
4166 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4167 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4168 sctp_flight_size_increase(tp1);
4169 sctp_total_flight_increase(stcb, tp1);
4170 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4171 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4178 /**********************************/
4179 /* Now what about shutdown issues */
4180 /**********************************/
4181 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4182 /* nothing left on sendqueue.. consider done */
4184 if ((asoc->stream_queue_cnt == 1) &&
4185 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4186 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4187 (asoc->locked_on_sending)
4189 struct sctp_stream_queue_pending *sp;
4192 * I may be in a state where we got all across.. but
4193 * cannot write more due to a shutdown... we abort
4194 * since the user did not indicate EOR in this case.
4195 * The sp will be cleaned during free of the asoc.
4197 sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4199 if ((sp) && (sp->length == 0)) {
4200 /* Let cleanup code purge it */
4201 if (sp->msg_is_complete) {
4202 asoc->stream_queue_cnt--;
4204 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4205 asoc->locked_on_sending = NULL;
4206 asoc->stream_queue_cnt--;
4210 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4211 (asoc->stream_queue_cnt == 0)) {
4212 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4213 /* Need to abort here */
4219 oper = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
4220 0, M_NOWAIT, 1, MT_DATA);
4222 struct sctp_paramhdr *ph;
4224 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr);
4225 ph = mtod(oper, struct sctp_paramhdr *);
4226 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
4227 ph->param_length = htons(SCTP_BUF_LEN(oper));
4229 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4230 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
4232 struct sctp_nets *netp;
4234 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4235 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4236 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4238 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4239 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4240 sctp_stop_timers_for_shutdown(stcb);
4241 if (asoc->alternate) {
4242 netp = asoc->alternate;
4244 netp = asoc->primary_destination;
4246 sctp_send_shutdown(stcb, netp);
4247 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4248 stcb->sctp_ep, stcb, netp);
4249 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4250 stcb->sctp_ep, stcb, netp);
4252 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4253 (asoc->stream_queue_cnt == 0)) {
4254 struct sctp_nets *netp;
4256 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4259 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4260 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4261 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4262 sctp_stop_timers_for_shutdown(stcb);
4263 if (asoc->alternate) {
4264 netp = asoc->alternate;
4266 netp = asoc->primary_destination;
4268 sctp_send_shutdown_ack(stcb, netp);
4269 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4270 stcb->sctp_ep, stcb, netp);
4273 /*********************************************/
4274 /* Here we perform PR-SCTP procedures */
4276 /*********************************************/
4277 /* C1. update advancedPeerAckPoint */
4278 if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4279 asoc->advanced_peer_ack_point = cumack;
4281 /* PR-Sctp issues need to be addressed too */
4282 if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
4283 struct sctp_tmit_chunk *lchk;
4284 uint32_t old_adv_peer_ack_point;
4286 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4287 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4288 /* C3. See if we need to send a Fwd-TSN */
4289 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4291 * ISSUE with ECN, see FWD-TSN processing.
4293 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4294 send_forward_tsn(stcb, asoc);
4296 /* try to FR fwd-tsn's that get lost too */
4297 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4298 send_forward_tsn(stcb, asoc);
4303 /* Assure a timer is up */
4304 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4305 stcb->sctp_ep, stcb, lchk->whoTo);
4308 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4309 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4311 stcb->asoc.peers_rwnd,
4312 stcb->asoc.total_flight,
4313 stcb->asoc.total_output_queue_size);
4318 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4319 struct sctp_tcb *stcb,
4320 uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4321 int *abort_now, uint8_t flags,
4322 uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4324 struct sctp_association *asoc;
4325 struct sctp_tmit_chunk *tp1, *tp2;
4326 uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4327 uint16_t wake_him = 0;
4328 uint32_t send_s = 0;
4330 int accum_moved = 0;
4331 int will_exit_fast_recovery = 0;
4332 uint32_t a_rwnd, old_rwnd;
4333 int win_probe_recovery = 0;
4334 int win_probe_recovered = 0;
4335 struct sctp_nets *net = NULL;
4338 uint8_t reneged_all = 0;
4339 uint8_t cmt_dac_flag;
4342 * we take any chance we can to service our queues since we cannot
4343 * get awoken when the socket is read from :<
4346 * Now perform the actual SACK handling: 1) Verify that it is not an
4347 * old sack, if so discard. 2) If there is nothing left in the send
4348 * queue (cum-ack is equal to last acked) then you have a duplicate
4349 * too, update any rwnd change and verify no timers are running.
4350 * then return. 3) Process any new consequtive data i.e. cum-ack
4351 * moved process these first and note that it moved. 4) Process any
4352 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4353 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4354 * sync up flightsizes and things, stop all timers and also check
4355 * for shutdown_pending state. If so then go ahead and send off the
4356 * shutdown. If in shutdown recv, send off the shutdown-ack and
4357 * start that timer, Ret. 9) Strike any non-acked things and do FR
4358 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4359 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4360 * if in shutdown_recv state.
4362 SCTP_TCB_LOCK_ASSERT(stcb);
4364 this_sack_lowest_newack = 0;
4365 SCTP_STAT_INCR(sctps_slowpath_sack);
4367 cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4368 #ifdef SCTP_ASOCLOG_OF_TSNS
4369 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4370 stcb->asoc.cumack_log_at++;
4371 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4372 stcb->asoc.cumack_log_at = 0;
4377 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4378 sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4379 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4381 old_rwnd = stcb->asoc.peers_rwnd;
4382 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4383 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4384 stcb->asoc.overall_error_count,
4386 SCTP_FROM_SCTP_INDATA,
4389 stcb->asoc.overall_error_count = 0;
4391 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4392 sctp_log_sack(asoc->last_acked_seq,
4399 if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4401 uint32_t *dupdata, dblock;
4403 for (i = 0; i < num_dup; i++) {
4404 dupdata = (uint32_t *) sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4405 sizeof(uint32_t), (uint8_t *) & dblock);
4406 if (dupdata == NULL) {
4409 sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4412 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4414 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4415 tp1 = TAILQ_LAST(&asoc->sent_queue,
4416 sctpchunk_listhead);
4417 send_s = tp1->rec.data.TSN_seq + 1;
4420 send_s = asoc->sending_seq;
4422 if (SCTP_TSN_GE(cum_ack, send_s)) {
4426 * no way, we have not even sent this TSN out yet.
4427 * Peer is hopelessly messed up with us.
4429 SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4432 SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1:%p\n",
4433 tp1->rec.data.TSN_seq, (void *)tp1);
4438 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4439 0, M_NOWAIT, 1, MT_DATA);
4441 struct sctp_paramhdr *ph;
4444 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4446 ph = mtod(oper, struct sctp_paramhdr *);
4447 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
4448 ph->param_length = htons(SCTP_BUF_LEN(oper));
4449 ippp = (uint32_t *) (ph + 1);
4450 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
4452 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4453 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
4457 /**********************/
4458 /* 1) check the range */
4459 /**********************/
4460 if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4461 /* acking something behind */
4464 /* update the Rwnd of the peer */
4465 if (TAILQ_EMPTY(&asoc->sent_queue) &&
4466 TAILQ_EMPTY(&asoc->send_queue) &&
4467 (asoc->stream_queue_cnt == 0)) {
4468 /* nothing left on send/sent and strmq */
4469 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4470 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4471 asoc->peers_rwnd, 0, 0, a_rwnd);
4473 asoc->peers_rwnd = a_rwnd;
4474 if (asoc->sent_queue_retran_cnt) {
4475 asoc->sent_queue_retran_cnt = 0;
4477 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4478 /* SWS sender side engages */
4479 asoc->peers_rwnd = 0;
4481 /* stop any timers */
4482 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4483 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4484 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4485 net->partial_bytes_acked = 0;
4486 net->flight_size = 0;
4488 asoc->total_flight = 0;
4489 asoc->total_flight_count = 0;
4493 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4494 * things. The total byte count acked is tracked in netAckSz AND
4495 * netAck2 is used to track the total bytes acked that are un-
4496 * amibguious and were never retransmitted. We track these on a per
4497 * destination address basis.
4499 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4500 if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4501 /* Drag along the window_tsn for cwr's */
4502 net->cwr_window_tsn = cum_ack;
4504 net->prev_cwnd = net->cwnd;
4509 * CMT: Reset CUC and Fast recovery algo variables before
4512 net->new_pseudo_cumack = 0;
4513 net->will_exit_fast_recovery = 0;
4514 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4515 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4518 /* process the new consecutive TSN first */
4519 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4520 if (SCTP_TSN_GE(last_tsn, tp1->rec.data.TSN_seq)) {
4521 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4523 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4525 * If it is less than ACKED, it is
4526 * now no-longer in flight. Higher
4527 * values may occur during marking
4529 if ((tp1->whoTo->dest_state &
4530 SCTP_ADDR_UNCONFIRMED) &&
4531 (tp1->snd_count < 2)) {
4533 * If there was no retran
4534 * and the address is
4535 * un-confirmed and we sent
4537 * sacked.. its confirmed,
4540 tp1->whoTo->dest_state &=
4541 ~SCTP_ADDR_UNCONFIRMED;
4543 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4544 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4545 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4546 tp1->whoTo->flight_size,
4548 (uintptr_t) tp1->whoTo,
4549 tp1->rec.data.TSN_seq);
4551 sctp_flight_size_decrease(tp1);
4552 sctp_total_flight_decrease(stcb, tp1);
4553 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4554 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4558 tp1->whoTo->net_ack += tp1->send_size;
4560 /* CMT SFR and DAC algos */
4561 this_sack_lowest_newack = tp1->rec.data.TSN_seq;
4562 tp1->whoTo->saw_newack = 1;
4564 if (tp1->snd_count < 2) {
4566 * True non-retransmited
4569 tp1->whoTo->net_ack2 +=
4572 /* update RTO too? */
4576 sctp_calculate_rto(stcb,
4578 &tp1->sent_rcv_time,
4579 sctp_align_safe_nocopy,
4580 SCTP_RTT_FROM_DATA);
4583 if (tp1->whoTo->rto_needed == 0) {
4584 tp1->whoTo->rto_needed = 1;
4590 * CMT: CUCv2 algorithm. From the
4591 * cumack'd TSNs, for each TSN being
4592 * acked for the first time, set the
4593 * following variables for the
4594 * corresp destination.
4595 * new_pseudo_cumack will trigger a
4597 * find_(rtx_)pseudo_cumack will
4598 * trigger search for the next
4599 * expected (rtx-)pseudo-cumack.
4601 tp1->whoTo->new_pseudo_cumack = 1;
4602 tp1->whoTo->find_pseudo_cumack = 1;
4603 tp1->whoTo->find_rtx_pseudo_cumack = 1;
4606 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4607 sctp_log_sack(asoc->last_acked_seq,
4609 tp1->rec.data.TSN_seq,
4612 SCTP_LOG_TSN_ACKED);
4614 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4615 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
4618 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4619 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4620 #ifdef SCTP_AUDITING_ENABLED
4621 sctp_audit_log(0xB3,
4622 (asoc->sent_queue_retran_cnt & 0x000000ff));
4625 if (tp1->rec.data.chunk_was_revoked) {
4626 /* deflate the cwnd */
4627 tp1->whoTo->cwnd -= tp1->book_size;
4628 tp1->rec.data.chunk_was_revoked = 0;
4630 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4631 tp1->sent = SCTP_DATAGRAM_ACKED;
4638 biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4639 /* always set this up to cum-ack */
4640 asoc->this_sack_highest_gap = last_tsn;
4642 if ((num_seg > 0) || (num_nr_seg > 0)) {
4645 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4646 * to be greater than the cumack. Also reset saw_newack to 0
4649 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4650 net->saw_newack = 0;
4651 net->this_sack_highest_newack = last_tsn;
4655 * thisSackHighestGap will increase while handling NEW
4656 * segments this_sack_highest_newack will increase while
4657 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4658 * used for CMT DAC algo. saw_newack will also change.
4660 if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4661 &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4662 num_seg, num_nr_seg, &rto_ok)) {
4665 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4667 * validate the biggest_tsn_acked in the gap acks if
4668 * strict adherence is wanted.
4670 if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4672 * peer is either confused or we are under
4673 * attack. We must abort.
4675 SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4676 biggest_tsn_acked, send_s);
4681 /*******************************************/
4682 /* cancel ALL T3-send timer if accum moved */
4683 /*******************************************/
4684 if (asoc->sctp_cmt_on_off > 0) {
4685 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4686 if (net->new_pseudo_cumack)
4687 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4689 SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4694 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4695 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4696 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
4700 /********************************************/
4701 /* drop the acked chunks from the sentqueue */
4702 /********************************************/
4703 asoc->last_acked_seq = cum_ack;
4705 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4706 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cum_ack)) {
4709 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4710 if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
4711 asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--;
4714 panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
4718 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4719 if (PR_SCTP_ENABLED(tp1->flags)) {
4720 if (asoc->pr_sctp_cnt != 0)
4721 asoc->pr_sctp_cnt--;
4723 asoc->sent_queue_cnt--;
4725 /* sa_ignore NO_NULL_CHK */
4726 sctp_free_bufspace(stcb, asoc, tp1, 1);
4727 sctp_m_freem(tp1->data);
4729 if (asoc->peer_supports_prsctp && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4730 asoc->sent_queue_cnt_removeable--;
4733 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4734 sctp_log_sack(asoc->last_acked_seq,
4736 tp1->rec.data.TSN_seq,
4739 SCTP_LOG_FREE_SENT);
4741 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4744 if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4746 panic("Warning flight size is postive and should be 0");
4748 SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4749 asoc->total_flight);
4751 asoc->total_flight = 0;
4753 /* sa_ignore NO_NULL_CHK */
4754 if ((wake_him) && (stcb->sctp_socket)) {
4755 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4759 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4760 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4761 sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
4763 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4764 so = SCTP_INP_SO(stcb->sctp_ep);
4765 atomic_add_int(&stcb->asoc.refcnt, 1);
4766 SCTP_TCB_UNLOCK(stcb);
4767 SCTP_SOCKET_LOCK(so, 1);
4768 SCTP_TCB_LOCK(stcb);
4769 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4770 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4771 /* assoc was freed while we were unlocked */
4772 SCTP_SOCKET_UNLOCK(so, 1);
4776 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4777 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4778 SCTP_SOCKET_UNLOCK(so, 1);
4781 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4782 sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
4786 if (asoc->fast_retran_loss_recovery && accum_moved) {
4787 if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4788 /* Setup so we will exit RFC2582 fast recovery */
4789 will_exit_fast_recovery = 1;
4793 * Check for revoked fragments:
4795 * if Previous sack - Had no frags then we can't have any revoked if
4796 * Previous sack - Had frag's then - If we now have frags aka
4797 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4798 * some of them. else - The peer revoked all ACKED fragments, since
4799 * we had some before and now we have NONE.
4803 sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4804 asoc->saw_sack_with_frags = 1;
4805 } else if (asoc->saw_sack_with_frags) {
4806 int cnt_revoked = 0;
4808 /* Peer revoked all dg's marked or acked */
4809 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4810 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4811 tp1->sent = SCTP_DATAGRAM_SENT;
4812 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4813 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4814 tp1->whoTo->flight_size,
4816 (uintptr_t) tp1->whoTo,
4817 tp1->rec.data.TSN_seq);
4819 sctp_flight_size_increase(tp1);
4820 sctp_total_flight_increase(stcb, tp1);
4821 tp1->rec.data.chunk_was_revoked = 1;
4823 * To ensure that this increase in
4824 * flightsize, which is artificial, does not
4825 * throttle the sender, we also increase the
4826 * cwnd artificially.
4828 tp1->whoTo->cwnd += tp1->book_size;
4835 asoc->saw_sack_with_frags = 0;
4838 asoc->saw_sack_with_nr_frags = 1;
4840 asoc->saw_sack_with_nr_frags = 0;
4842 /* JRS - Use the congestion control given in the CC module */
4843 if (ecne_seen == 0) {
4844 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4845 if (net->net_ack2 > 0) {
4847 * Karn's rule applies to clearing error
4848 * count, this is optional.
4850 net->error_count = 0;
4851 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4852 /* addr came good */
4853 net->dest_state |= SCTP_ADDR_REACHABLE;
4854 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4855 0, (void *)net, SCTP_SO_NOT_LOCKED);
4857 if (net == stcb->asoc.primary_destination) {
4858 if (stcb->asoc.alternate) {
4860 * release the alternate,
4863 sctp_free_remote_addr(stcb->asoc.alternate);
4864 stcb->asoc.alternate = NULL;
4867 if (net->dest_state & SCTP_ADDR_PF) {
4868 net->dest_state &= ~SCTP_ADDR_PF;
4869 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_3);
4870 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4871 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4872 /* Done with this net */
4875 /* restore any doubled timers */
4876 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4877 if (net->RTO < stcb->asoc.minrto) {
4878 net->RTO = stcb->asoc.minrto;
4880 if (net->RTO > stcb->asoc.maxrto) {
4881 net->RTO = stcb->asoc.maxrto;
4885 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4887 if (TAILQ_EMPTY(&asoc->sent_queue)) {
4888 /* nothing left in-flight */
4889 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4890 /* stop all timers */
4891 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4892 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4893 net->flight_size = 0;
4894 net->partial_bytes_acked = 0;
4896 asoc->total_flight = 0;
4897 asoc->total_flight_count = 0;
4899 /**********************************/
4900 /* Now what about shutdown issues */
4901 /**********************************/
4902 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4903 /* nothing left on sendqueue.. consider done */
4904 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4905 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4906 asoc->peers_rwnd, 0, 0, a_rwnd);
4908 asoc->peers_rwnd = a_rwnd;
4909 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4910 /* SWS sender side engages */
4911 asoc->peers_rwnd = 0;
4914 if ((asoc->stream_queue_cnt == 1) &&
4915 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4916 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4917 (asoc->locked_on_sending)
4919 struct sctp_stream_queue_pending *sp;
4922 * I may be in a state where we got all across.. but
4923 * cannot write more due to a shutdown... we abort
4924 * since the user did not indicate EOR in this case.
4926 sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4928 if ((sp) && (sp->length == 0)) {
4929 asoc->locked_on_sending = NULL;
4930 if (sp->msg_is_complete) {
4931 asoc->stream_queue_cnt--;
4933 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4934 asoc->stream_queue_cnt--;
4938 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4939 (asoc->stream_queue_cnt == 0)) {
4940 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4941 /* Need to abort here */
4947 oper = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
4948 0, M_NOWAIT, 1, MT_DATA);
4950 struct sctp_paramhdr *ph;
4952 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr);
4953 ph = mtod(oper, struct sctp_paramhdr *);
4954 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
4955 ph->param_length = htons(SCTP_BUF_LEN(oper));
4957 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31;
4958 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
4961 struct sctp_nets *netp;
4963 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4964 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4965 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4967 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4968 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4969 sctp_stop_timers_for_shutdown(stcb);
4970 if (asoc->alternate) {
4971 netp = asoc->alternate;
4973 netp = asoc->primary_destination;
4975 sctp_send_shutdown(stcb, netp);
4976 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4977 stcb->sctp_ep, stcb, netp);
4978 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4979 stcb->sctp_ep, stcb, netp);
4982 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4983 (asoc->stream_queue_cnt == 0)) {
4984 struct sctp_nets *netp;
4986 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4989 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4990 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4991 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4992 sctp_stop_timers_for_shutdown(stcb);
4993 if (asoc->alternate) {
4994 netp = asoc->alternate;
4996 netp = asoc->primary_destination;
4998 sctp_send_shutdown_ack(stcb, netp);
4999 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
5000 stcb->sctp_ep, stcb, netp);
5005 * Now here we are going to recycle net_ack for a different use...
5008 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5013 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
5014 * to be done. Setting this_sack_lowest_newack to the cum_ack will
5015 * automatically ensure that.
5017 if ((asoc->sctp_cmt_on_off > 0) &&
5018 SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
5019 (cmt_dac_flag == 0)) {
5020 this_sack_lowest_newack = cum_ack;
5022 if ((num_seg > 0) || (num_nr_seg > 0)) {
5023 sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
5024 biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
5026 /* JRS - Use the congestion control given in the CC module */
5027 asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
5029 /* Now are we exiting loss recovery ? */
5030 if (will_exit_fast_recovery) {
5031 /* Ok, we must exit fast recovery */
5032 asoc->fast_retran_loss_recovery = 0;
5034 if ((asoc->sat_t3_loss_recovery) &&
5035 SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
5036 /* end satellite t3 loss recovery */
5037 asoc->sat_t3_loss_recovery = 0;
5042 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5043 if (net->will_exit_fast_recovery) {
5044 /* Ok, we must exit fast recovery */
5045 net->fast_retran_loss_recovery = 0;
5049 /* Adjust and set the new rwnd value */
5050 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5051 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5052 asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
5054 asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5055 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
5056 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5057 /* SWS sender side engages */
5058 asoc->peers_rwnd = 0;
5060 if (asoc->peers_rwnd > old_rwnd) {
5061 win_probe_recovery = 1;
5064 * Now we must setup so we have a timer up for anyone with
5070 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5071 if (win_probe_recovery && (net->window_probe)) {
5072 win_probe_recovered = 1;
5074 * Find first chunk that was used with
5075 * window probe and clear the event. Put
5076 * it back into the send queue as if has
5079 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5080 if (tp1->window_probe) {
5081 sctp_window_probe_recovery(stcb, asoc, tp1);
5086 if (net->flight_size) {
5088 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5089 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5090 stcb->sctp_ep, stcb, net);
5092 if (net->window_probe) {
5093 net->window_probe = 0;
5096 if (net->window_probe) {
5098 * In window probes we must assure a timer
5099 * is still running there
5101 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5102 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5103 stcb->sctp_ep, stcb, net);
5106 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5107 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5109 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
5114 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5115 (asoc->sent_queue_retran_cnt == 0) &&
5116 (win_probe_recovered == 0) &&
5119 * huh, this should not happen unless all packets are
5120 * PR-SCTP and marked to skip of course.
5122 if (sctp_fs_audit(asoc)) {
5123 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5124 net->flight_size = 0;
5126 asoc->total_flight = 0;
5127 asoc->total_flight_count = 0;
5128 asoc->sent_queue_retran_cnt = 0;
5129 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5130 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5131 sctp_flight_size_increase(tp1);
5132 sctp_total_flight_increase(stcb, tp1);
5133 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5134 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
5141 /*********************************************/
5142 /* Here we perform PR-SCTP procedures */
5144 /*********************************************/
5145 /* C1. update advancedPeerAckPoint */
5146 if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
5147 asoc->advanced_peer_ack_point = cum_ack;
5149 /* C2. try to further move advancedPeerAckPoint ahead */
5150 if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
5151 struct sctp_tmit_chunk *lchk;
5152 uint32_t old_adv_peer_ack_point;
5154 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5155 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5156 /* C3. See if we need to send a Fwd-TSN */
5157 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
5159 * ISSUE with ECN, see FWD-TSN processing.
5161 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5162 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5163 0xee, cum_ack, asoc->advanced_peer_ack_point,
5164 old_adv_peer_ack_point);
5166 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
5167 send_forward_tsn(stcb, asoc);
5169 /* try to FR fwd-tsn's that get lost too */
5170 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
5171 send_forward_tsn(stcb, asoc);
5176 /* Assure a timer is up */
5177 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5178 stcb->sctp_ep, stcb, lchk->whoTo);
5181 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5182 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5184 stcb->asoc.peers_rwnd,
5185 stcb->asoc.total_flight,
5186 stcb->asoc.total_output_queue_size);
5191 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
5194 uint32_t cum_ack, a_rwnd;
5196 cum_ack = ntohl(cp->cumulative_tsn_ack);
5197 /* Arrange so a_rwnd does NOT change */
5198 a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5200 /* Now call the express sack handling */
5201 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
5205 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5206 struct sctp_stream_in *strmin)
5208 struct sctp_queued_to_read *ctl, *nctl;
5209 struct sctp_association *asoc;
5213 tt = strmin->last_sequence_delivered;
5215 * First deliver anything prior to and including the stream no that
5218 TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next, nctl) {
5219 if (SCTP_SSN_GE(tt, ctl->sinfo_ssn)) {
5220 /* this is deliverable now */
5221 TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5222 /* subtract pending on streams */
5223 asoc->size_on_all_streams -= ctl->length;
5224 sctp_ucount_decr(asoc->cnt_on_all_streams);
5225 /* deliver it to at least the delivery-q */
5226 if (stcb->sctp_socket) {
5227 sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5228 sctp_add_to_readq(stcb->sctp_ep, stcb,
5230 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5233 /* no more delivery now. */
5238 * now we must deliver things in queue the normal way if any are
5241 tt = strmin->last_sequence_delivered + 1;
5242 TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next, nctl) {
5243 if (tt == ctl->sinfo_ssn) {
5244 /* this is deliverable now */
5245 TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5246 /* subtract pending on streams */
5247 asoc->size_on_all_streams -= ctl->length;
5248 sctp_ucount_decr(asoc->cnt_on_all_streams);
5249 /* deliver it to at least the delivery-q */
5250 strmin->last_sequence_delivered = ctl->sinfo_ssn;
5251 if (stcb->sctp_socket) {
5252 sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5253 sctp_add_to_readq(stcb->sctp_ep, stcb,
5255 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5258 tt = strmin->last_sequence_delivered + 1;
5266 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5267 struct sctp_association *asoc,
5268 uint16_t stream, uint16_t seq)
5270 struct sctp_tmit_chunk *chk, *nchk;
5272 /* For each one on here see if we need to toss it */
5274 * For now large messages held on the reasmqueue that are complete
5275 * will be tossed too. We could in theory do more work to spin
5276 * through and stop after dumping one msg aka seeing the start of a
5277 * new msg at the head, and call the delivery function... to see if
5278 * it can be delivered... But for now we just dump everything on the
5281 TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
5283 * Do not toss it if on a different stream or marked for
5284 * unordered delivery in which case the stream sequence
5285 * number has no meaning.
5287 if ((chk->rec.data.stream_number != stream) ||
5288 ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == SCTP_DATA_UNORDERED)) {
5291 if (chk->rec.data.stream_seq == seq) {
5292 /* It needs to be tossed */
5293 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5294 if (SCTP_TSN_GT(chk->rec.data.TSN_seq, asoc->tsn_last_delivered)) {
5295 asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
5296 asoc->str_of_pdapi = chk->rec.data.stream_number;
5297 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
5298 asoc->fragment_flags = chk->rec.data.rcv_flags;
5300 asoc->size_on_reasm_queue -= chk->send_size;
5301 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5303 /* Clear up any stream problem */
5304 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != SCTP_DATA_UNORDERED &&
5305 SCTP_SSN_GT(chk->rec.data.stream_seq, asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered)) {
5307 * We must dump forward this streams
5308 * sequence number if the chunk is not
5309 * unordered that is being skipped. There is
5310 * a chance that if the peer does not
5311 * include the last fragment in its FWD-TSN
5312 * we WILL have a problem here since you
5313 * would have a partial chunk in queue that
5314 * may not be deliverable. Also if a Partial
5315 * delivery API as started the user may get
5316 * a partial chunk. The next read returning
5317 * a new chunk... really ugly but I see no
5318 * way around it! Maybe a notify??
5320 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = chk->rec.data.stream_seq;
5323 sctp_m_freem(chk->data);
5326 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5327 } else if (SCTP_SSN_GT(chk->rec.data.stream_seq, seq)) {
5329 * If the stream_seq is > than the purging one, we
5339 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5340 struct sctp_forward_tsn_chunk *fwd,
5341 int *abort_flag, struct mbuf *m, int offset)
5343 /* The pr-sctp fwd tsn */
5345 * here we will perform all the data receiver side steps for
5346 * processing FwdTSN, as required in by pr-sctp draft:
5348 * Assume we get FwdTSN(x):
5350 * 1) update local cumTSN to x 2) try to further advance cumTSN to x +
5351 * others we have 3) examine and update re-ordering queue on
5352 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5353 * report where we are.
5355 struct sctp_association *asoc;
5356 uint32_t new_cum_tsn, gap;
5357 unsigned int i, fwd_sz, m_size;
5359 struct sctp_stream_in *strm;
5360 struct sctp_tmit_chunk *chk, *nchk;
5361 struct sctp_queued_to_read *ctl, *sv;
5364 if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5365 SCTPDBG(SCTP_DEBUG_INDATA1,
5366 "Bad size too small/big fwd-tsn\n");
5369 m_size = (stcb->asoc.mapping_array_size << 3);
5370 /*************************************************************/
5371 /* 1. Here we update local cumTSN and shift the bitmap array */
5372 /*************************************************************/
5373 new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5375 if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5376 /* Already got there ... */
5380 * now we know the new TSN is more advanced, let's find the actual
5383 SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5384 asoc->cumulative_tsn = new_cum_tsn;
5385 if (gap >= m_size) {
5386 if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5390 * out of range (of single byte chunks in the rwnd I
5391 * give out). This must be an attacker.
5394 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
5395 0, M_NOWAIT, 1, MT_DATA);
5397 struct sctp_paramhdr *ph;
5400 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
5401 (sizeof(uint32_t) * 3);
5402 ph = mtod(oper, struct sctp_paramhdr *);
5403 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
5404 ph->param_length = htons(SCTP_BUF_LEN(oper));
5405 ippp = (uint32_t *) (ph + 1);
5406 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_33);
5408 *ippp = asoc->highest_tsn_inside_map;
5410 *ippp = new_cum_tsn;
5412 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
5413 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
5416 SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5418 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5419 asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5420 asoc->highest_tsn_inside_map = new_cum_tsn;
5422 memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5423 asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5425 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5426 sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5429 SCTP_TCB_LOCK_ASSERT(stcb);
5430 for (i = 0; i <= gap; i++) {
5431 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5432 !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5433 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5434 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5435 asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5440 /*************************************************************/
5441 /* 2. Clear up re-assembly queue */
5442 /*************************************************************/
5444 * First service it if pd-api is up, just in case we can progress it
5447 if (asoc->fragmented_delivery_inprogress) {
5448 sctp_service_reassembly(stcb, asoc);
5450 /* For each one on here see if we need to toss it */
5452 * For now large messages held on the reasmqueue that are complete
5453 * will be tossed too. We could in theory do more work to spin
5454 * through and stop after dumping one msg aka seeing the start of a
5455 * new msg at the head, and call the delivery function... to see if
5456 * it can be delivered... But for now we just dump everything on the
5459 TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
5460 if (SCTP_TSN_GE(new_cum_tsn, chk->rec.data.TSN_seq)) {
5461 /* It needs to be tossed */
5462 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5463 if (SCTP_TSN_GT(chk->rec.data.TSN_seq, asoc->tsn_last_delivered)) {
5464 asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
5465 asoc->str_of_pdapi = chk->rec.data.stream_number;
5466 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
5467 asoc->fragment_flags = chk->rec.data.rcv_flags;
5469 asoc->size_on_reasm_queue -= chk->send_size;
5470 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5472 /* Clear up any stream problem */
5473 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != SCTP_DATA_UNORDERED &&
5474 SCTP_SSN_GT(chk->rec.data.stream_seq, asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered)) {
5476 * We must dump forward this streams
5477 * sequence number if the chunk is not
5478 * unordered that is being skipped. There is
5479 * a chance that if the peer does not
5480 * include the last fragment in its FWD-TSN
5481 * we WILL have a problem here since you
5482 * would have a partial chunk in queue that
5483 * may not be deliverable. Also if a Partial
5484 * delivery API as started the user may get
5485 * a partial chunk. The next read returning
5486 * a new chunk... really ugly but I see no
5487 * way around it! Maybe a notify??
5489 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = chk->rec.data.stream_seq;
5492 sctp_m_freem(chk->data);
5495 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5498 * Ok we have gone beyond the end of the fwd-tsn's
5504 /*******************************************************/
5505 /* 3. Update the PR-stream re-ordering queues and fix */
5506 /* delivery issues as needed. */
5507 /*******************************************************/
5508 fwd_sz -= sizeof(*fwd);
5511 unsigned int num_str;
5512 struct sctp_strseq *stseq, strseqbuf;
5514 offset += sizeof(*fwd);
5516 SCTP_INP_READ_LOCK(stcb->sctp_ep);
5517 num_str = fwd_sz / sizeof(struct sctp_strseq);
5518 for (i = 0; i < num_str; i++) {
5521 stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5522 sizeof(struct sctp_strseq),
5523 (uint8_t *) & strseqbuf);
5524 offset += sizeof(struct sctp_strseq);
5525 if (stseq == NULL) {
5529 st = ntohs(stseq->stream);
5531 st = ntohs(stseq->sequence);
5532 stseq->sequence = st;
5537 * Ok we now look for the stream/seq on the read
5538 * queue where its not all delivered. If we find it
5539 * we transmute the read entry into a PDI_ABORTED.
5541 if (stseq->stream >= asoc->streamincnt) {
5542 /* screwed up streams, stop! */
5545 if ((asoc->str_of_pdapi == stseq->stream) &&
5546 (asoc->ssn_of_pdapi == stseq->sequence)) {
5548 * If this is the one we were partially
5549 * delivering now then we no longer are.
5550 * Note this will change with the reassembly
5553 asoc->fragmented_delivery_inprogress = 0;
5555 sctp_flush_reassm_for_str_seq(stcb, asoc, stseq->stream, stseq->sequence);
5556 TAILQ_FOREACH(ctl, &stcb->sctp_ep->read_queue, next) {
5557 if ((ctl->sinfo_stream == stseq->stream) &&
5558 (ctl->sinfo_ssn == stseq->sequence)) {
5559 str_seq = (stseq->stream << 16) | stseq->sequence;
5561 ctl->pdapi_aborted = 1;
5562 sv = stcb->asoc.control_pdapi;
5563 stcb->asoc.control_pdapi = ctl;
5564 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5566 SCTP_PARTIAL_DELIVERY_ABORTED,
5568 SCTP_SO_NOT_LOCKED);
5569 stcb->asoc.control_pdapi = sv;
5571 } else if ((ctl->sinfo_stream == stseq->stream) &&
5572 SCTP_SSN_GT(ctl->sinfo_ssn, stseq->sequence)) {
5573 /* We are past our victim SSN */
5577 strm = &asoc->strmin[stseq->stream];
5578 if (SCTP_SSN_GT(stseq->sequence, strm->last_sequence_delivered)) {
5579 /* Update the sequence number */
5580 strm->last_sequence_delivered = stseq->sequence;
5582 /* now kick the stream the new way */
5583 /* sa_ignore NO_NULL_CHK */
5584 sctp_kick_prsctp_reorder_queue(stcb, strm);
5586 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5589 * Now slide thing forward.
5591 sctp_slide_mapping_arrays(stcb);
5593 if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
5594 /* now lets kick out and check for more fragmented delivery */
5595 /* sa_ignore NO_NULL_CHK */
5596 sctp_deliver_reasm_check(stcb, &stcb->asoc);