2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
9 * a) Redistributions of source code must retain the above copyright notice,
10 * this list of conditions and the following disclaimer.
12 * b) Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the distribution.
16 * c) Neither the name of Cisco Systems, Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_var.h>
38 #include <netinet/sctp_sysctl.h>
39 #include <netinet/sctp_pcb.h>
40 #include <netinet/sctp_header.h>
41 #include <netinet/sctputil.h>
42 #include <netinet/sctp_output.h>
43 #include <netinet/sctp_input.h>
44 #include <netinet/sctp_indata.h>
45 #include <netinet/sctp_uio.h>
46 #include <netinet/sctp_timer.h>
50 * NOTES: On the outbound side of things I need to check the sack timer to
51 * see if I should generate a sack into the chunk queue (if I have data to
52 * send that is and will be sending it .. for bundling.
54 * The callback in sctp_usrreq.c will get called when the socket is read from.
55 * This will cause sctp_service_queues() to get called on the top entry in
60 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
62 asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
65 /* Calculate what the rwnd would be */
67 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
72 * This is really set wrong with respect to a 1-2-m socket. Since
73 * the sb_cc is the count that everyone as put up. When we re-write
74 * sctp_soreceive then we will fix this so that ONLY this
75 * associations data is taken into account.
77 if (stcb->sctp_socket == NULL)
80 if (stcb->asoc.sb_cc == 0 &&
81 asoc->size_on_reasm_queue == 0 &&
82 asoc->size_on_all_streams == 0) {
83 /* Full rwnd granted */
84 calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
87 /* get actual space */
88 calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
91 * take out what has NOT been put on socket queue and we yet hold
94 calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_reasm_queue +
95 asoc->cnt_on_reasm_queue * MSIZE));
96 calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_all_streams +
97 asoc->cnt_on_all_streams * MSIZE));
103 /* what is the overhead of all these rwnd's */
104 calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
106 * If the window gets too small due to ctrl-stuff, reduce it to 1,
107 * even it is 0. SWS engaged
109 if (calc < stcb->asoc.my_rwnd_control_len) {
118 * Build out our readq entry based on the incoming packet.
120 struct sctp_queued_to_read *
121 sctp_build_readq_entry(struct sctp_tcb *stcb,
122 struct sctp_nets *net,
123 uint32_t tsn, uint32_t ppid,
124 uint32_t context, uint16_t stream_no,
125 uint16_t stream_seq, uint8_t flags,
128 struct sctp_queued_to_read *read_queue_e = NULL;
130 sctp_alloc_a_readq(stcb, read_queue_e);
131 if (read_queue_e == NULL) {
134 read_queue_e->sinfo_stream = stream_no;
135 read_queue_e->sinfo_ssn = stream_seq;
136 read_queue_e->sinfo_flags = (flags << 8);
137 read_queue_e->sinfo_ppid = ppid;
138 read_queue_e->sinfo_context = context;
139 read_queue_e->sinfo_timetolive = 0;
140 read_queue_e->sinfo_tsn = tsn;
141 read_queue_e->sinfo_cumtsn = tsn;
142 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
143 read_queue_e->whoFrom = net;
144 read_queue_e->length = 0;
145 atomic_add_int(&net->ref_count, 1);
146 read_queue_e->data = dm;
147 read_queue_e->spec_flags = 0;
148 read_queue_e->tail_mbuf = NULL;
149 read_queue_e->aux_data = NULL;
150 read_queue_e->stcb = stcb;
151 read_queue_e->port_from = stcb->rport;
152 read_queue_e->do_not_ref_stcb = 0;
153 read_queue_e->end_added = 0;
154 read_queue_e->some_taken = 0;
155 read_queue_e->pdapi_aborted = 0;
157 return (read_queue_e);
162 * Build out our readq entry based on the incoming packet.
164 static struct sctp_queued_to_read *
165 sctp_build_readq_entry_chk(struct sctp_tcb *stcb,
166 struct sctp_tmit_chunk *chk)
168 struct sctp_queued_to_read *read_queue_e = NULL;
170 sctp_alloc_a_readq(stcb, read_queue_e);
171 if (read_queue_e == NULL) {
174 read_queue_e->sinfo_stream = chk->rec.data.stream_number;
175 read_queue_e->sinfo_ssn = chk->rec.data.stream_seq;
176 read_queue_e->sinfo_flags = (chk->rec.data.rcv_flags << 8);
177 read_queue_e->sinfo_ppid = chk->rec.data.payloadtype;
178 read_queue_e->sinfo_context = stcb->asoc.context;
179 read_queue_e->sinfo_timetolive = 0;
180 read_queue_e->sinfo_tsn = chk->rec.data.TSN_seq;
181 read_queue_e->sinfo_cumtsn = chk->rec.data.TSN_seq;
182 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
183 read_queue_e->whoFrom = chk->whoTo;
184 read_queue_e->aux_data = NULL;
185 read_queue_e->length = 0;
186 atomic_add_int(&chk->whoTo->ref_count, 1);
187 read_queue_e->data = chk->data;
188 read_queue_e->tail_mbuf = NULL;
189 read_queue_e->stcb = stcb;
190 read_queue_e->port_from = stcb->rport;
191 read_queue_e->spec_flags = 0;
192 read_queue_e->do_not_ref_stcb = 0;
193 read_queue_e->end_added = 0;
194 read_queue_e->some_taken = 0;
195 read_queue_e->pdapi_aborted = 0;
197 return (read_queue_e);
202 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
204 struct sctp_extrcvinfo *seinfo;
205 struct sctp_sndrcvinfo *outinfo;
206 struct sctp_rcvinfo *rcvinfo;
207 struct sctp_nxtinfo *nxtinfo;
214 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
215 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
216 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
217 /* user does not want any ancillary data */
221 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
222 len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
224 seinfo = (struct sctp_extrcvinfo *)sinfo;
225 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
226 (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
228 len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
232 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
233 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
235 len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
238 len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
244 ret = sctp_get_mbuf_for_msg(len, 0, M_NOWAIT, 1, MT_DATA);
249 SCTP_BUF_LEN(ret) = 0;
251 /* We need a CMSG header followed by the struct */
252 cmh = mtod(ret, struct cmsghdr *);
253 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
254 cmh->cmsg_level = IPPROTO_SCTP;
255 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
256 cmh->cmsg_type = SCTP_RCVINFO;
257 rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
258 rcvinfo->rcv_sid = sinfo->sinfo_stream;
259 rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
260 rcvinfo->rcv_flags = sinfo->sinfo_flags;
261 rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
262 rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
263 rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
264 rcvinfo->rcv_context = sinfo->sinfo_context;
265 rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
266 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
267 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
270 cmh->cmsg_level = IPPROTO_SCTP;
271 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
272 cmh->cmsg_type = SCTP_NXTINFO;
273 nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
274 nxtinfo->nxt_sid = seinfo->sreinfo_next_stream;
275 nxtinfo->nxt_flags = 0;
276 if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
277 nxtinfo->nxt_flags |= SCTP_UNORDERED;
279 if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
280 nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
282 if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
283 nxtinfo->nxt_flags |= SCTP_COMPLETE;
285 nxtinfo->nxt_ppid = seinfo->sreinfo_next_ppid;
286 nxtinfo->nxt_length = seinfo->sreinfo_next_length;
287 nxtinfo->nxt_assoc_id = seinfo->sreinfo_next_aid;
288 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
289 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
291 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
292 cmh->cmsg_level = IPPROTO_SCTP;
293 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
295 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
296 cmh->cmsg_type = SCTP_EXTRCV;
297 memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
298 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
300 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
301 cmh->cmsg_type = SCTP_SNDRCV;
303 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
311 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
313 uint32_t gap, i, cumackp1;
316 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
319 cumackp1 = asoc->cumulative_tsn + 1;
320 if (SCTP_TSN_GT(cumackp1, tsn)) {
322 * this tsn is behind the cum ack and thus we don't need to
323 * worry about it being moved from one to the other.
327 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
328 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
329 SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn);
330 sctp_print_mapping_array(asoc);
332 panic("Things are really messed up now!!");
335 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
336 SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
337 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
338 asoc->highest_tsn_inside_nr_map = tsn;
340 if (tsn == asoc->highest_tsn_inside_map) {
341 /* We must back down to see what the new highest is */
342 for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
343 SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
344 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
345 asoc->highest_tsn_inside_map = i;
351 asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
358 * We are delivering currently from the reassembly queue. We must continue to
359 * deliver until we either: 1) run out of space. 2) run out of sequential
360 * TSN's 3) hit the SCTP_DATA_LAST_FRAG flag.
363 sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc)
365 struct sctp_tmit_chunk *chk, *nchk;
370 struct sctp_queued_to_read *control, *ctl, *nctl;
375 cntDel = stream_no = 0;
376 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
377 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) ||
378 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
379 /* socket above is long gone or going.. */
381 asoc->fragmented_delivery_inprogress = 0;
382 TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
383 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
384 asoc->size_on_reasm_queue -= chk->send_size;
385 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
387 * Lose the data pointer, since its in the socket
391 sctp_m_freem(chk->data);
394 /* Now free the address and data */
395 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
396 /* sa_ignore FREED_MEMORY */
400 SCTP_TCB_LOCK_ASSERT(stcb);
401 TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
402 if (chk->rec.data.TSN_seq != (asoc->tsn_last_delivered + 1)) {
403 /* Can't deliver more :< */
406 stream_no = chk->rec.data.stream_number;
407 nxt_todel = asoc->strmin[stream_no].last_sequence_delivered + 1;
408 if (nxt_todel != chk->rec.data.stream_seq &&
409 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
411 * Not the next sequence to deliver in its stream OR
416 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
418 control = sctp_build_readq_entry_chk(stcb, chk);
419 if (control == NULL) {
423 /* save it off for our future deliveries */
424 stcb->asoc.control_pdapi = control;
425 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
429 sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
430 sctp_add_to_readq(stcb->sctp_ep,
431 stcb, control, &stcb->sctp_socket->so_rcv, end,
432 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
435 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
439 sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
440 if (sctp_append_to_readq(stcb->sctp_ep, stcb,
441 stcb->asoc.control_pdapi,
442 chk->data, end, chk->rec.data.TSN_seq,
443 &stcb->sctp_socket->so_rcv)) {
445 * something is very wrong, either
446 * control_pdapi is NULL, or the tail_mbuf
447 * is corrupt, or there is a EOM already on
450 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
454 if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
455 panic("This should not happen control_pdapi NULL?");
457 /* if we did not panic, it was a EOM */
458 panic("Bad chunking ??");
460 if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
461 SCTP_PRINTF("This should not happen control_pdapi NULL?\n");
463 SCTP_PRINTF("Bad chunking ??\n");
464 SCTP_PRINTF("Dumping re-assembly queue this will probably hose the association\n");
472 /* pull it we did it */
473 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
474 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
475 asoc->fragmented_delivery_inprogress = 0;
476 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
477 asoc->strmin[stream_no].last_sequence_delivered++;
479 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
480 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
482 } else if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
484 * turn the flag back on since we just delivered
487 asoc->fragmented_delivery_inprogress = 1;
489 asoc->tsn_of_pdapi_last_delivered = chk->rec.data.TSN_seq;
490 asoc->last_flags_delivered = chk->rec.data.rcv_flags;
491 asoc->last_strm_seq_delivered = chk->rec.data.stream_seq;
492 asoc->last_strm_no_delivered = chk->rec.data.stream_number;
494 asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
495 asoc->size_on_reasm_queue -= chk->send_size;
496 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
497 /* free up the chk */
499 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
501 if (asoc->fragmented_delivery_inprogress == 0) {
503 * Now lets see if we can deliver the next one on
506 struct sctp_stream_in *strm;
508 strm = &asoc->strmin[stream_no];
509 nxt_todel = strm->last_sequence_delivered + 1;
510 TAILQ_FOREACH_SAFE(ctl, &strm->inqueue, next, nctl) {
511 /* Deliver more if we can. */
512 if (nxt_todel == ctl->sinfo_ssn) {
513 TAILQ_REMOVE(&strm->inqueue, ctl, next);
514 asoc->size_on_all_streams -= ctl->length;
515 sctp_ucount_decr(asoc->cnt_on_all_streams);
516 strm->last_sequence_delivered++;
517 sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
518 sctp_add_to_readq(stcb->sctp_ep, stcb,
520 &stcb->sctp_socket->so_rcv, 1,
521 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
525 nxt_todel = strm->last_sequence_delivered + 1;
533 * Queue the chunk either right into the socket buffer if it is the next one
534 * to go OR put it in the correct place in the delivery queue. If we do
535 * append to the so_buf, keep doing so until we are out of order. One big
536 * question still remains, what to do when the socket buffer is FULL??
539 sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc,
540 struct sctp_queued_to_read *control, int *abort_flag)
543 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
544 * all the data in one stream this could happen quite rapidly. One
545 * could use the TSN to keep track of things, but this scheme breaks
546 * down in the other type of stream useage that could occur. Send a
547 * single msg to stream 0, send 4Billion messages to stream 1, now
548 * send a message to stream 0. You have a situation where the TSN
549 * has wrapped but not in the stream. Is this worth worrying about
550 * or should we just change our queue sort at the bottom to be by
553 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2
554 * with TSN 1? If the peer is doing some sort of funky TSN/SSN
555 * assignment this could happen... and I don't see how this would be
556 * a violation. So for now I am undecided an will leave the sort by
557 * SSN alone. Maybe a hybred approach is the answer
560 struct sctp_stream_in *strm;
561 struct sctp_queued_to_read *at;
567 asoc->size_on_all_streams += control->length;
568 sctp_ucount_incr(asoc->cnt_on_all_streams);
569 strm = &asoc->strmin[control->sinfo_stream];
570 nxt_todel = strm->last_sequence_delivered + 1;
571 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
572 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
574 SCTPDBG(SCTP_DEBUG_INDATA1,
575 "queue to stream called for ssn:%u lastdel:%u nxt:%u\n",
576 (uint32_t) control->sinfo_stream,
577 (uint32_t) strm->last_sequence_delivered,
578 (uint32_t) nxt_todel);
579 if (SCTP_SSN_GE(strm->last_sequence_delivered, control->sinfo_ssn)) {
580 /* The incoming sseq is behind where we last delivered? */
581 SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ:%d delivered:%d from peer, Abort association\n",
582 control->sinfo_ssn, strm->last_sequence_delivered);
585 * throw it in the stream so it gets cleaned up in
586 * association destruction
588 TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
589 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
590 0, M_NOWAIT, 1, MT_DATA);
592 struct sctp_paramhdr *ph;
595 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
596 (sizeof(uint32_t) * 3);
597 ph = mtod(oper, struct sctp_paramhdr *);
598 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
599 ph->param_length = htons(SCTP_BUF_LEN(oper));
600 ippp = (uint32_t *) (ph + 1);
601 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_1);
603 *ippp = control->sinfo_tsn;
605 *ippp = ((control->sinfo_stream << 16) | control->sinfo_ssn);
607 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
608 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
613 if (nxt_todel == control->sinfo_ssn) {
614 /* can be delivered right away? */
615 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
616 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
618 /* EY it wont be queued if it could be delivered directly */
620 asoc->size_on_all_streams -= control->length;
621 sctp_ucount_decr(asoc->cnt_on_all_streams);
622 strm->last_sequence_delivered++;
624 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
625 sctp_add_to_readq(stcb->sctp_ep, stcb,
627 &stcb->sctp_socket->so_rcv, 1,
628 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
629 TAILQ_FOREACH_SAFE(control, &strm->inqueue, next, at) {
631 nxt_todel = strm->last_sequence_delivered + 1;
632 if (nxt_todel == control->sinfo_ssn) {
633 TAILQ_REMOVE(&strm->inqueue, control, next);
634 asoc->size_on_all_streams -= control->length;
635 sctp_ucount_decr(asoc->cnt_on_all_streams);
636 strm->last_sequence_delivered++;
638 * We ignore the return of deliver_data here
639 * since we always can hold the chunk on the
640 * d-queue. And we have a finite number that
641 * can be delivered from the strq.
643 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
644 sctp_log_strm_del(control, NULL,
645 SCTP_STR_LOG_FROM_IMMED_DEL);
647 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
648 sctp_add_to_readq(stcb->sctp_ep, stcb,
650 &stcb->sctp_socket->so_rcv, 1,
651 SCTP_READ_LOCK_NOT_HELD,
660 * Ok, we did not deliver this guy, find the correct place
661 * to put it on the queue.
663 if (SCTP_TSN_GE(asoc->cumulative_tsn, control->sinfo_tsn)) {
666 if (TAILQ_EMPTY(&strm->inqueue)) {
668 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
669 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INSERT_HD);
671 TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
673 TAILQ_FOREACH(at, &strm->inqueue, next) {
674 if (SCTP_SSN_GT(at->sinfo_ssn, control->sinfo_ssn)) {
676 * one in queue is bigger than the
677 * new one, insert before this one
679 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
680 sctp_log_strm_del(control, at,
681 SCTP_STR_LOG_FROM_INSERT_MD);
683 TAILQ_INSERT_BEFORE(at, control, next);
685 } else if (at->sinfo_ssn == control->sinfo_ssn) {
687 * Gak, He sent me a duplicate str
691 * foo bar, I guess I will just free
692 * this new guy, should we abort
693 * too? FIX ME MAYBE? Or it COULD be
694 * that the SSN's have wrapped.
695 * Maybe I should compare to TSN
696 * somehow... sigh for now just blow
701 sctp_m_freem(control->data);
702 control->data = NULL;
703 asoc->size_on_all_streams -= control->length;
704 sctp_ucount_decr(asoc->cnt_on_all_streams);
705 if (control->whoFrom) {
706 sctp_free_remote_addr(control->whoFrom);
707 control->whoFrom = NULL;
709 sctp_free_a_readq(stcb, control);
712 if (TAILQ_NEXT(at, next) == NULL) {
714 * We are at the end, insert
717 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
718 sctp_log_strm_del(control, at,
719 SCTP_STR_LOG_FROM_INSERT_TL);
721 TAILQ_INSERT_AFTER(&strm->inqueue,
732 * Returns two things: You get the total size of the deliverable parts of the
733 * first fragmented message on the reassembly queue. And you get a 1 back if
734 * all of the message is ready or a 0 back if the message is still incomplete
737 sctp_is_all_msg_on_reasm(struct sctp_association *asoc, uint32_t * t_size)
739 struct sctp_tmit_chunk *chk;
743 chk = TAILQ_FIRST(&asoc->reasmqueue);
745 /* nothing on the queue */
748 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
749 /* Not a first on the queue */
752 tsn = chk->rec.data.TSN_seq;
753 TAILQ_FOREACH(chk, &asoc->reasmqueue, sctp_next) {
754 if (tsn != chk->rec.data.TSN_seq) {
757 *t_size += chk->send_size;
758 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
767 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc)
769 struct sctp_tmit_chunk *chk;
771 uint32_t tsize, pd_point;
774 chk = TAILQ_FIRST(&asoc->reasmqueue);
777 asoc->size_on_reasm_queue = 0;
778 asoc->cnt_on_reasm_queue = 0;
781 if (asoc->fragmented_delivery_inprogress == 0) {
783 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
784 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
785 (nxt_todel == chk->rec.data.stream_seq ||
786 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
788 * Yep the first one is here and its ok to deliver
791 if (stcb->sctp_socket) {
792 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
793 stcb->sctp_ep->partial_delivery_point);
795 pd_point = stcb->sctp_ep->partial_delivery_point;
797 if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
799 * Yes, we setup to start reception, by
800 * backing down the TSN just in case we
801 * can't deliver. If we
803 asoc->fragmented_delivery_inprogress = 1;
804 asoc->tsn_last_delivered =
805 chk->rec.data.TSN_seq - 1;
807 chk->rec.data.stream_number;
808 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
809 asoc->pdapi_ppid = chk->rec.data.payloadtype;
810 asoc->fragment_flags = chk->rec.data.rcv_flags;
811 sctp_service_reassembly(stcb, asoc);
816 * Service re-assembly will deliver stream data queued at
817 * the end of fragmented delivery.. but it wont know to go
818 * back and call itself again... we do that here with the
821 sctp_service_reassembly(stcb, asoc);
822 if (asoc->fragmented_delivery_inprogress == 0) {
824 * finished our Fragmented delivery, could be more
833 * Dump onto the re-assembly queue, in its proper place. After dumping on the
834 * queue, see if anthing can be delivered. If so pull it off (or as much as
835 * we can. If we run out of space then we must dump what we can and set the
836 * appropriate flag to say we queued what we could.
839 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
840 struct sctp_tmit_chunk *chk, int *abort_flag)
843 uint32_t cum_ackp1, prev_tsn, post_tsn;
844 struct sctp_tmit_chunk *at, *prev, *next;
847 cum_ackp1 = asoc->tsn_last_delivered + 1;
848 if (TAILQ_EMPTY(&asoc->reasmqueue)) {
849 /* This is the first one on the queue */
850 TAILQ_INSERT_HEAD(&asoc->reasmqueue, chk, sctp_next);
852 * we do not check for delivery of anything when only one
855 asoc->size_on_reasm_queue = chk->send_size;
856 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
857 if (chk->rec.data.TSN_seq == cum_ackp1) {
858 if (asoc->fragmented_delivery_inprogress == 0 &&
859 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) !=
860 SCTP_DATA_FIRST_FRAG) {
862 * An empty queue, no delivery inprogress,
863 * we hit the next one and it does NOT have
864 * a FIRST fragment mark.
866 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not first, no fragmented delivery in progress\n");
867 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
868 0, M_NOWAIT, 1, MT_DATA);
871 struct sctp_paramhdr *ph;
875 sizeof(struct sctp_paramhdr) +
876 (sizeof(uint32_t) * 3);
877 ph = mtod(oper, struct sctp_paramhdr *);
879 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
880 ph->param_length = htons(SCTP_BUF_LEN(oper));
881 ippp = (uint32_t *) (ph + 1);
882 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_2);
884 *ippp = chk->rec.data.TSN_seq;
886 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
889 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
890 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
892 } else if (asoc->fragmented_delivery_inprogress &&
893 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
895 * We are doing a partial delivery and the
896 * NEXT chunk MUST be either the LAST or
897 * MIDDLE fragment NOT a FIRST
899 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS a first and fragmented delivery in progress\n");
900 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
901 0, M_NOWAIT, 1, MT_DATA);
903 struct sctp_paramhdr *ph;
907 sizeof(struct sctp_paramhdr) +
908 (3 * sizeof(uint32_t));
909 ph = mtod(oper, struct sctp_paramhdr *);
911 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
912 ph->param_length = htons(SCTP_BUF_LEN(oper));
913 ippp = (uint32_t *) (ph + 1);
914 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_3);
916 *ippp = chk->rec.data.TSN_seq;
918 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
920 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
921 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
923 } else if (asoc->fragmented_delivery_inprogress) {
925 * Here we are ok with a MIDDLE or LAST
928 if (chk->rec.data.stream_number !=
929 asoc->str_of_pdapi) {
930 /* Got to be the right STR No */
931 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream number %d vs %d\n",
932 chk->rec.data.stream_number,
934 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
935 0, M_NOWAIT, 1, MT_DATA);
937 struct sctp_paramhdr *ph;
941 sizeof(struct sctp_paramhdr) +
942 (sizeof(uint32_t) * 3);
944 struct sctp_paramhdr *);
946 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
948 htons(SCTP_BUF_LEN(oper));
949 ippp = (uint32_t *) (ph + 1);
950 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
952 *ippp = chk->rec.data.TSN_seq;
954 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
956 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_4;
957 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
959 } else if ((asoc->fragment_flags & SCTP_DATA_UNORDERED) !=
960 SCTP_DATA_UNORDERED &&
961 chk->rec.data.stream_seq != asoc->ssn_of_pdapi) {
962 /* Got to be the right STR Seq */
963 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream seq %d vs %d\n",
964 chk->rec.data.stream_seq,
966 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
967 0, M_NOWAIT, 1, MT_DATA);
969 struct sctp_paramhdr *ph;
973 sizeof(struct sctp_paramhdr) +
974 (3 * sizeof(uint32_t));
976 struct sctp_paramhdr *);
978 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
980 htons(SCTP_BUF_LEN(oper));
981 ippp = (uint32_t *) (ph + 1);
982 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
984 *ippp = chk->rec.data.TSN_seq;
986 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
989 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_5;
990 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
998 TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
999 if (SCTP_TSN_GT(at->rec.data.TSN_seq, chk->rec.data.TSN_seq)) {
1001 * one in queue is bigger than the new one, insert
1005 asoc->size_on_reasm_queue += chk->send_size;
1006 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1008 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1010 } else if (at->rec.data.TSN_seq == chk->rec.data.TSN_seq) {
1011 /* Gak, He sent me a duplicate str seq number */
1013 * foo bar, I guess I will just free this new guy,
1014 * should we abort too? FIX ME MAYBE? Or it COULD be
1015 * that the SSN's have wrapped. Maybe I should
1016 * compare to TSN somehow... sigh for now just blow
1020 sctp_m_freem(chk->data);
1023 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1027 if (TAILQ_NEXT(at, sctp_next) == NULL) {
1029 * We are at the end, insert it after this
1032 /* check it first */
1033 asoc->size_on_reasm_queue += chk->send_size;
1034 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1035 TAILQ_INSERT_AFTER(&asoc->reasmqueue, at, chk, sctp_next);
1040 /* Now the audits */
1042 prev_tsn = chk->rec.data.TSN_seq - 1;
1043 if (prev_tsn == prev->rec.data.TSN_seq) {
1045 * Ok the one I am dropping onto the end is the
1046 * NEXT. A bit of valdiation here.
1048 if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1049 SCTP_DATA_FIRST_FRAG ||
1050 (prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1051 SCTP_DATA_MIDDLE_FRAG) {
1053 * Insert chk MUST be a MIDDLE or LAST
1056 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1057 SCTP_DATA_FIRST_FRAG) {
1058 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - It can be a midlle or last but not a first\n");
1059 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it's a FIRST!\n");
1060 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1061 0, M_NOWAIT, 1, MT_DATA);
1063 struct sctp_paramhdr *ph;
1066 SCTP_BUF_LEN(oper) =
1067 sizeof(struct sctp_paramhdr) +
1068 (3 * sizeof(uint32_t));
1070 struct sctp_paramhdr *);
1072 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1074 htons(SCTP_BUF_LEN(oper));
1075 ippp = (uint32_t *) (ph + 1);
1076 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1078 *ippp = chk->rec.data.TSN_seq;
1080 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1083 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_6;
1084 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1088 if (chk->rec.data.stream_number !=
1089 prev->rec.data.stream_number) {
1091 * Huh, need the correct STR here,
1092 * they must be the same.
1094 SCTP_PRINTF("Prev check - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1095 chk->rec.data.stream_number,
1096 prev->rec.data.stream_number);
1097 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1098 0, M_NOWAIT, 1, MT_DATA);
1100 struct sctp_paramhdr *ph;
1103 SCTP_BUF_LEN(oper) =
1104 sizeof(struct sctp_paramhdr) +
1105 (3 * sizeof(uint32_t));
1107 struct sctp_paramhdr *);
1109 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1111 htons(SCTP_BUF_LEN(oper));
1112 ippp = (uint32_t *) (ph + 1);
1113 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1115 *ippp = chk->rec.data.TSN_seq;
1117 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1119 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_7;
1120 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1124 if ((prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1125 chk->rec.data.stream_seq !=
1126 prev->rec.data.stream_seq) {
1128 * Huh, need the correct STR here,
1129 * they must be the same.
1131 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1132 chk->rec.data.stream_seq,
1133 prev->rec.data.stream_seq);
1134 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1135 0, M_NOWAIT, 1, MT_DATA);
1137 struct sctp_paramhdr *ph;
1140 SCTP_BUF_LEN(oper) =
1141 sizeof(struct sctp_paramhdr) +
1142 (3 * sizeof(uint32_t));
1144 struct sctp_paramhdr *);
1146 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1148 htons(SCTP_BUF_LEN(oper));
1149 ippp = (uint32_t *) (ph + 1);
1150 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1152 *ippp = chk->rec.data.TSN_seq;
1154 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1156 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_8;
1157 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1161 } else if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1162 SCTP_DATA_LAST_FRAG) {
1163 /* Insert chk MUST be a FIRST */
1164 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1165 SCTP_DATA_FIRST_FRAG) {
1166 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, evil plot, its not FIRST and it must be!\n");
1167 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1168 0, M_NOWAIT, 1, MT_DATA);
1170 struct sctp_paramhdr *ph;
1173 SCTP_BUF_LEN(oper) =
1174 sizeof(struct sctp_paramhdr) +
1175 (3 * sizeof(uint32_t));
1177 struct sctp_paramhdr *);
1179 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1181 htons(SCTP_BUF_LEN(oper));
1182 ippp = (uint32_t *) (ph + 1);
1183 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1185 *ippp = chk->rec.data.TSN_seq;
1187 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1190 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_9;
1191 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1199 post_tsn = chk->rec.data.TSN_seq + 1;
1200 if (post_tsn == next->rec.data.TSN_seq) {
1202 * Ok the one I am inserting ahead of is my NEXT
1203 * one. A bit of valdiation here.
1205 if (next->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1206 /* Insert chk MUST be a last fragment */
1207 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK)
1208 != SCTP_DATA_LAST_FRAG) {
1209 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is FIRST, we must be LAST\n");
1210 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not a last!\n");
1211 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1212 0, M_NOWAIT, 1, MT_DATA);
1214 struct sctp_paramhdr *ph;
1217 SCTP_BUF_LEN(oper) =
1218 sizeof(struct sctp_paramhdr) +
1219 (3 * sizeof(uint32_t));
1221 struct sctp_paramhdr *);
1223 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1225 htons(SCTP_BUF_LEN(oper));
1226 ippp = (uint32_t *) (ph + 1);
1227 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1229 *ippp = chk->rec.data.TSN_seq;
1231 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1233 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_10;
1234 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1238 } else if ((next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1239 SCTP_DATA_MIDDLE_FRAG ||
1240 (next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1241 SCTP_DATA_LAST_FRAG) {
1243 * Insert chk CAN be MIDDLE or FIRST NOT
1246 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1247 SCTP_DATA_LAST_FRAG) {
1248 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is a MIDDLE/LAST\n");
1249 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, new prev chunk is a LAST\n");
1250 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1251 0, M_NOWAIT, 1, MT_DATA);
1253 struct sctp_paramhdr *ph;
1256 SCTP_BUF_LEN(oper) =
1257 sizeof(struct sctp_paramhdr) +
1258 (3 * sizeof(uint32_t));
1260 struct sctp_paramhdr *);
1262 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1264 htons(SCTP_BUF_LEN(oper));
1265 ippp = (uint32_t *) (ph + 1);
1266 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1268 *ippp = chk->rec.data.TSN_seq;
1270 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1273 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_11;
1274 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1278 if (chk->rec.data.stream_number !=
1279 next->rec.data.stream_number) {
1281 * Huh, need the correct STR here,
1282 * they must be the same.
1284 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1285 chk->rec.data.stream_number,
1286 next->rec.data.stream_number);
1287 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1288 0, M_NOWAIT, 1, MT_DATA);
1290 struct sctp_paramhdr *ph;
1293 SCTP_BUF_LEN(oper) =
1294 sizeof(struct sctp_paramhdr) +
1295 (3 * sizeof(uint32_t));
1297 struct sctp_paramhdr *);
1299 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1301 htons(SCTP_BUF_LEN(oper));
1302 ippp = (uint32_t *) (ph + 1);
1303 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1305 *ippp = chk->rec.data.TSN_seq;
1307 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1310 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_12;
1311 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1315 if ((next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1316 chk->rec.data.stream_seq !=
1317 next->rec.data.stream_seq) {
1319 * Huh, need the correct STR here,
1320 * they must be the same.
1322 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1323 chk->rec.data.stream_seq,
1324 next->rec.data.stream_seq);
1325 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1326 0, M_NOWAIT, 1, MT_DATA);
1328 struct sctp_paramhdr *ph;
1331 SCTP_BUF_LEN(oper) =
1332 sizeof(struct sctp_paramhdr) +
1333 (3 * sizeof(uint32_t));
1335 struct sctp_paramhdr *);
1337 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1339 htons(SCTP_BUF_LEN(oper));
1340 ippp = (uint32_t *) (ph + 1);
1341 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1343 *ippp = chk->rec.data.TSN_seq;
1345 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1347 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_13;
1348 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1355 /* Do we need to do some delivery? check */
1356 sctp_deliver_reasm_check(stcb, asoc);
1360 * This is an unfortunate routine. It checks to make sure a evil guy is not
1361 * stuffing us full of bad packet fragments. A broken peer could also do this
1362 * but this is doubtful. It is to bad I must worry about evil crackers sigh
1366 sctp_does_tsn_belong_to_reasm(struct sctp_association *asoc,
1369 struct sctp_tmit_chunk *at;
1372 TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1373 if (SCTP_TSN_GT(TSN_seq, at->rec.data.TSN_seq)) {
1374 /* is it one bigger? */
1375 tsn_est = at->rec.data.TSN_seq + 1;
1376 if (tsn_est == TSN_seq) {
1377 /* yep. It better be a last then */
1378 if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1379 SCTP_DATA_LAST_FRAG) {
1381 * Ok this guy belongs next to a guy
1382 * that is NOT last, it should be a
1383 * middle/last, not a complete
1389 * This guy is ok since its a LAST
1390 * and the new chunk is a fully
1391 * self- contained one.
1396 } else if (TSN_seq == at->rec.data.TSN_seq) {
1397 /* Software error since I have a dup? */
1401 * Ok, 'at' is larger than new chunk but does it
1402 * need to be right before it.
1404 tsn_est = TSN_seq + 1;
1405 if (tsn_est == at->rec.data.TSN_seq) {
1406 /* Yep, It better be a first */
1407 if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1408 SCTP_DATA_FIRST_FRAG) {
1420 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1421 struct mbuf **m, int offset, struct sctp_data_chunk *ch, int chk_length,
1422 struct sctp_nets *net, uint32_t * high_tsn, int *abort_flag,
1423 int *break_flag, int last_chunk)
1425 /* Process a data chunk */
1426 /* struct sctp_tmit_chunk *chk; */
1427 struct sctp_tmit_chunk *chk;
1431 int need_reasm_check = 0;
1432 uint16_t strmno, strmseq;
1434 struct sctp_queued_to_read *control;
1436 uint32_t protocol_id;
1437 uint8_t chunk_flags;
1438 struct sctp_stream_reset_list *liste;
1441 tsn = ntohl(ch->dp.tsn);
1442 chunk_flags = ch->ch.chunk_flags;
1443 if ((chunk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1444 asoc->send_sack = 1;
1446 protocol_id = ch->dp.protocol_id;
1447 ordered = ((chunk_flags & SCTP_DATA_UNORDERED) == 0);
1448 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1449 sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1454 SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, ch->ch.chunk_type, tsn);
1455 if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1456 /* It is a duplicate */
1457 SCTP_STAT_INCR(sctps_recvdupdata);
1458 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1459 /* Record a dup for the next outbound sack */
1460 asoc->dup_tsns[asoc->numduptsns] = tsn;
1463 asoc->send_sack = 1;
1466 /* Calculate the number of TSN's between the base and this TSN */
1467 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1468 if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1469 /* Can't hold the bit in the mapping at max array, toss it */
1472 if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
1473 SCTP_TCB_LOCK_ASSERT(stcb);
1474 if (sctp_expand_mapping_array(asoc, gap)) {
1475 /* Can't expand, drop it */
1479 if (SCTP_TSN_GT(tsn, *high_tsn)) {
1482 /* See if we have received this one already */
1483 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1484 SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1485 SCTP_STAT_INCR(sctps_recvdupdata);
1486 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1487 /* Record a dup for the next outbound sack */
1488 asoc->dup_tsns[asoc->numduptsns] = tsn;
1491 asoc->send_sack = 1;
1495 * Check to see about the GONE flag, duplicates would cause a sack
1496 * to be sent up above
1498 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1499 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1500 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))
1503 * wait a minute, this guy is gone, there is no longer a
1504 * receiver. Send peer an ABORT!
1506 struct mbuf *op_err;
1508 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
1509 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1514 * Now before going further we see if there is room. If NOT then we
1515 * MAY let one through only IF this TSN is the one we are waiting
1516 * for on a partial delivery API.
1519 /* now do the tests */
1520 if (((asoc->cnt_on_all_streams +
1521 asoc->cnt_on_reasm_queue +
1522 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1523 (((int)asoc->my_rwnd) <= 0)) {
1525 * When we have NO room in the rwnd we check to make sure
1526 * the reader is doing its job...
1528 if (stcb->sctp_socket->so_rcv.sb_cc) {
1529 /* some to read, wake-up */
1530 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1533 so = SCTP_INP_SO(stcb->sctp_ep);
1534 atomic_add_int(&stcb->asoc.refcnt, 1);
1535 SCTP_TCB_UNLOCK(stcb);
1536 SCTP_SOCKET_LOCK(so, 1);
1537 SCTP_TCB_LOCK(stcb);
1538 atomic_subtract_int(&stcb->asoc.refcnt, 1);
1539 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1540 /* assoc was freed while we were unlocked */
1541 SCTP_SOCKET_UNLOCK(so, 1);
1545 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1546 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1547 SCTP_SOCKET_UNLOCK(so, 1);
1550 /* now is it in the mapping array of what we have accepted? */
1551 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1552 SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1553 /* Nope not in the valid range dump it */
1554 sctp_set_rwnd(stcb, asoc);
1555 if ((asoc->cnt_on_all_streams +
1556 asoc->cnt_on_reasm_queue +
1557 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1558 SCTP_STAT_INCR(sctps_datadropchklmt);
1560 SCTP_STAT_INCR(sctps_datadroprwnd);
1566 strmno = ntohs(ch->dp.stream_id);
1567 if (strmno >= asoc->streamincnt) {
1568 struct sctp_paramhdr *phdr;
1571 mb = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) * 2),
1572 0, M_NOWAIT, 1, MT_DATA);
1574 /* add some space up front so prepend will work well */
1575 SCTP_BUF_RESV_UF(mb, sizeof(struct sctp_chunkhdr));
1576 phdr = mtod(mb, struct sctp_paramhdr *);
1578 * Error causes are just param's and this one has
1579 * two back to back phdr, one with the error type
1580 * and size, the other with the streamid and a rsvd
1582 SCTP_BUF_LEN(mb) = (sizeof(struct sctp_paramhdr) * 2);
1583 phdr->param_type = htons(SCTP_CAUSE_INVALID_STREAM);
1584 phdr->param_length =
1585 htons(sizeof(struct sctp_paramhdr) * 2);
1587 /* We insert the stream in the type field */
1588 phdr->param_type = ch->dp.stream_id;
1589 /* And set the length to 0 for the rsvd field */
1590 phdr->param_length = 0;
1591 sctp_queue_op_err(stcb, mb);
1593 SCTP_STAT_INCR(sctps_badsid);
1594 SCTP_TCB_LOCK_ASSERT(stcb);
1595 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1596 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1597 asoc->highest_tsn_inside_nr_map = tsn;
1599 if (tsn == (asoc->cumulative_tsn + 1)) {
1600 /* Update cum-ack */
1601 asoc->cumulative_tsn = tsn;
1606 * Before we continue lets validate that we are not being fooled by
1607 * an evil attacker. We can only have 4k chunks based on our TSN
1608 * spread allowed by the mapping array 512 * 8 bits, so there is no
1609 * way our stream sequence numbers could have wrapped. We of course
1610 * only validate the FIRST fragment so the bit must be set.
1612 strmseq = ntohs(ch->dp.stream_sequence);
1613 #ifdef SCTP_ASOCLOG_OF_TSNS
1614 SCTP_TCB_LOCK_ASSERT(stcb);
1615 if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1616 asoc->tsn_in_at = 0;
1617 asoc->tsn_in_wrapped = 1;
1619 asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1620 asoc->in_tsnlog[asoc->tsn_in_at].strm = strmno;
1621 asoc->in_tsnlog[asoc->tsn_in_at].seq = strmseq;
1622 asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1623 asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1624 asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1625 asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1626 asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1629 if ((chunk_flags & SCTP_DATA_FIRST_FRAG) &&
1630 (TAILQ_EMPTY(&asoc->resetHead)) &&
1631 (chunk_flags & SCTP_DATA_UNORDERED) == 0 &&
1632 SCTP_SSN_GE(asoc->strmin[strmno].last_sequence_delivered, strmseq)) {
1633 /* The incoming sseq is behind where we last delivered? */
1634 SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n",
1635 strmseq, asoc->strmin[strmno].last_sequence_delivered);
1636 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1637 0, M_NOWAIT, 1, MT_DATA);
1639 struct sctp_paramhdr *ph;
1642 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
1643 (3 * sizeof(uint32_t));
1644 ph = mtod(oper, struct sctp_paramhdr *);
1645 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1646 ph->param_length = htons(SCTP_BUF_LEN(oper));
1647 ippp = (uint32_t *) (ph + 1);
1648 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_14);
1652 *ippp = ((strmno << 16) | strmseq);
1655 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1656 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1660 /************************************
1661 * From here down we may find ch-> invalid
1662 * so its a good idea NOT to use it.
1663 *************************************/
1665 the_len = (chk_length - sizeof(struct sctp_data_chunk));
1666 if (last_chunk == 0) {
1667 dmbuf = SCTP_M_COPYM(*m,
1668 (offset + sizeof(struct sctp_data_chunk)),
1670 #ifdef SCTP_MBUF_LOGGING
1671 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
1674 for (mat = dmbuf; mat; mat = SCTP_BUF_NEXT(mat)) {
1675 if (SCTP_BUF_IS_EXTENDED(mat)) {
1676 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
1682 /* We can steal the last chunk */
1686 /* lop off the top part */
1687 m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
1688 if (SCTP_BUF_NEXT(dmbuf) == NULL) {
1689 l_len = SCTP_BUF_LEN(dmbuf);
1692 * need to count up the size hopefully does not hit
1698 for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
1699 l_len += SCTP_BUF_LEN(lat);
1702 if (l_len > the_len) {
1703 /* Trim the end round bytes off too */
1704 m_adj(dmbuf, -(l_len - the_len));
1707 if (dmbuf == NULL) {
1708 SCTP_STAT_INCR(sctps_nomem);
1711 if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
1712 asoc->fragmented_delivery_inprogress == 0 &&
1713 TAILQ_EMPTY(&asoc->resetHead) &&
1715 ((uint16_t) (asoc->strmin[strmno].last_sequence_delivered + 1) == strmseq &&
1716 TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) {
1717 /* Candidate for express delivery */
1719 * Its not fragmented, No PD-API is up, Nothing in the
1720 * delivery queue, Its un-ordered OR ordered and the next to
1721 * deliver AND nothing else is stuck on the stream queue,
1722 * And there is room for it in the socket buffer. Lets just
1723 * stuff it up the buffer....
1726 /* It would be nice to avoid this copy if we could :< */
1727 sctp_alloc_a_readq(stcb, control);
1728 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1733 if (control == NULL) {
1734 goto failed_express_del;
1736 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1737 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1738 asoc->highest_tsn_inside_nr_map = tsn;
1740 sctp_add_to_readq(stcb->sctp_ep, stcb,
1741 control, &stcb->sctp_socket->so_rcv,
1742 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1744 if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1745 /* for ordered, bump what we delivered */
1746 asoc->strmin[strmno].last_sequence_delivered++;
1748 SCTP_STAT_INCR(sctps_recvexpress);
1749 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
1750 sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno,
1751 SCTP_STR_LOG_FROM_EXPRS_DEL);
1755 goto finish_express_del;
1758 /* If we reach here this is a new chunk */
1761 /* Express for fragmented delivery? */
1762 if ((asoc->fragmented_delivery_inprogress) &&
1763 (stcb->asoc.control_pdapi) &&
1764 (asoc->str_of_pdapi == strmno) &&
1765 (asoc->ssn_of_pdapi == strmseq)
1767 control = stcb->asoc.control_pdapi;
1768 if ((chunk_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
1769 /* Can't be another first? */
1770 goto failed_pdapi_express_del;
1772 if (tsn == (control->sinfo_tsn + 1)) {
1773 /* Yep, we can add it on */
1776 if (chunk_flags & SCTP_DATA_LAST_FRAG) {
1779 if (sctp_append_to_readq(stcb->sctp_ep, stcb, control, dmbuf, end,
1781 &stcb->sctp_socket->so_rcv)) {
1782 SCTP_PRINTF("Append fails end:%d\n", end);
1783 goto failed_pdapi_express_del;
1785 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1786 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1787 asoc->highest_tsn_inside_nr_map = tsn;
1789 SCTP_STAT_INCR(sctps_recvexpressm);
1790 control->sinfo_tsn = tsn;
1791 asoc->tsn_last_delivered = tsn;
1792 asoc->fragment_flags = chunk_flags;
1793 asoc->tsn_of_pdapi_last_delivered = tsn;
1794 asoc->last_flags_delivered = chunk_flags;
1795 asoc->last_strm_seq_delivered = strmseq;
1796 asoc->last_strm_no_delivered = strmno;
1798 /* clean up the flags and such */
1799 asoc->fragmented_delivery_inprogress = 0;
1800 if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1801 asoc->strmin[strmno].last_sequence_delivered++;
1803 stcb->asoc.control_pdapi = NULL;
1804 if (TAILQ_EMPTY(&asoc->reasmqueue) == 0) {
1806 * There could be another message
1809 need_reasm_check = 1;
1813 goto finish_express_del;
1816 failed_pdapi_express_del:
1818 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
1819 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1820 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1821 asoc->highest_tsn_inside_nr_map = tsn;
1824 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
1825 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
1826 asoc->highest_tsn_inside_map = tsn;
1829 if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1830 sctp_alloc_a_chunk(stcb, chk);
1832 /* No memory so we drop the chunk */
1833 SCTP_STAT_INCR(sctps_nomem);
1834 if (last_chunk == 0) {
1835 /* we copied it, free the copy */
1836 sctp_m_freem(dmbuf);
1840 chk->rec.data.TSN_seq = tsn;
1841 chk->no_fr_allowed = 0;
1842 chk->rec.data.stream_seq = strmseq;
1843 chk->rec.data.stream_number = strmno;
1844 chk->rec.data.payloadtype = protocol_id;
1845 chk->rec.data.context = stcb->asoc.context;
1846 chk->rec.data.doing_fast_retransmit = 0;
1847 chk->rec.data.rcv_flags = chunk_flags;
1849 chk->send_size = the_len;
1851 atomic_add_int(&net->ref_count, 1);
1854 sctp_alloc_a_readq(stcb, control);
1855 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1860 if (control == NULL) {
1861 /* No memory so we drop the chunk */
1862 SCTP_STAT_INCR(sctps_nomem);
1863 if (last_chunk == 0) {
1864 /* we copied it, free the copy */
1865 sctp_m_freem(dmbuf);
1869 control->length = the_len;
1872 /* Mark it as received */
1873 /* Now queue it where it belongs */
1874 if (control != NULL) {
1875 /* First a sanity check */
1876 if (asoc->fragmented_delivery_inprogress) {
1878 * Ok, we have a fragmented delivery in progress if
1879 * this chunk is next to deliver OR belongs in our
1880 * view to the reassembly, the peer is evil or
1883 uint32_t estimate_tsn;
1885 estimate_tsn = asoc->tsn_last_delivered + 1;
1886 if (TAILQ_EMPTY(&asoc->reasmqueue) &&
1887 (estimate_tsn == control->sinfo_tsn)) {
1888 /* Evil/Broke peer */
1889 sctp_m_freem(control->data);
1890 control->data = NULL;
1891 if (control->whoFrom) {
1892 sctp_free_remote_addr(control->whoFrom);
1893 control->whoFrom = NULL;
1895 sctp_free_a_readq(stcb, control);
1896 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1897 0, M_NOWAIT, 1, MT_DATA);
1899 struct sctp_paramhdr *ph;
1902 SCTP_BUF_LEN(oper) =
1903 sizeof(struct sctp_paramhdr) +
1904 (3 * sizeof(uint32_t));
1905 ph = mtod(oper, struct sctp_paramhdr *);
1907 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1908 ph->param_length = htons(SCTP_BUF_LEN(oper));
1909 ippp = (uint32_t *) (ph + 1);
1910 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_15);
1914 *ippp = ((strmno << 16) | strmseq);
1916 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1917 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1921 if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
1922 sctp_m_freem(control->data);
1923 control->data = NULL;
1924 if (control->whoFrom) {
1925 sctp_free_remote_addr(control->whoFrom);
1926 control->whoFrom = NULL;
1928 sctp_free_a_readq(stcb, control);
1930 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1931 0, M_NOWAIT, 1, MT_DATA);
1933 struct sctp_paramhdr *ph;
1936 SCTP_BUF_LEN(oper) =
1937 sizeof(struct sctp_paramhdr) +
1938 (3 * sizeof(uint32_t));
1940 struct sctp_paramhdr *);
1942 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1944 htons(SCTP_BUF_LEN(oper));
1945 ippp = (uint32_t *) (ph + 1);
1946 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_16);
1950 *ippp = ((strmno << 16) | strmseq);
1952 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
1953 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1959 /* No PDAPI running */
1960 if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
1962 * Reassembly queue is NOT empty validate
1963 * that this tsn does not need to be in
1964 * reasembly queue. If it does then our peer
1965 * is broken or evil.
1967 if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
1968 sctp_m_freem(control->data);
1969 control->data = NULL;
1970 if (control->whoFrom) {
1971 sctp_free_remote_addr(control->whoFrom);
1972 control->whoFrom = NULL;
1974 sctp_free_a_readq(stcb, control);
1975 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1976 0, M_NOWAIT, 1, MT_DATA);
1978 struct sctp_paramhdr *ph;
1981 SCTP_BUF_LEN(oper) =
1982 sizeof(struct sctp_paramhdr) +
1983 (3 * sizeof(uint32_t));
1985 struct sctp_paramhdr *);
1987 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1989 htons(SCTP_BUF_LEN(oper));
1990 ippp = (uint32_t *) (ph + 1);
1991 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_17);
1995 *ippp = ((strmno << 16) | strmseq);
1997 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17;
1998 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
2004 /* ok, if we reach here we have passed the sanity checks */
2005 if (chunk_flags & SCTP_DATA_UNORDERED) {
2006 /* queue directly into socket buffer */
2007 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2008 sctp_add_to_readq(stcb->sctp_ep, stcb,
2010 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2013 * Special check for when streams are resetting. We
2014 * could be more smart about this and check the
2015 * actual stream to see if it is not being reset..
2016 * that way we would not create a HOLB when amongst
2017 * streams being reset and those not being reset.
2019 * We take complete messages that have a stream reset
2020 * intervening (aka the TSN is after where our
2021 * cum-ack needs to be) off and put them on a
2022 * pending_reply_queue. The reassembly ones we do
2023 * not have to worry about since they are all sorted
2024 * and proceessed by TSN order. It is only the
2025 * singletons I must worry about.
2027 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2028 SCTP_TSN_GT(tsn, liste->tsn)) {
2030 * yep its past where we need to reset... go
2031 * ahead and queue it.
2033 if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2035 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2037 struct sctp_queued_to_read *ctlOn,
2039 unsigned char inserted = 0;
2041 TAILQ_FOREACH_SAFE(ctlOn, &asoc->pending_reply_queue, next, nctlOn) {
2042 if (SCTP_TSN_GT(control->sinfo_tsn, ctlOn->sinfo_tsn)) {
2046 TAILQ_INSERT_BEFORE(ctlOn, control, next);
2051 if (inserted == 0) {
2053 * must be put at end, use
2054 * prevP (all setup from
2055 * loop) to setup nextP.
2057 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2061 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag);
2068 /* Into the re-assembly queue */
2069 sctp_queue_data_for_reasm(stcb, asoc, chk, abort_flag);
2072 * the assoc is now gone and chk was put onto the
2073 * reasm queue, which has all been freed.
2080 if (tsn == (asoc->cumulative_tsn + 1)) {
2081 /* Update cum-ack */
2082 asoc->cumulative_tsn = tsn;
2088 SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2090 SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2092 SCTP_STAT_INCR(sctps_recvdata);
2093 /* Set it present please */
2094 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2095 sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, SCTP_STR_LOG_FROM_MARK_TSN);
2097 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2098 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2099 asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2101 /* check the special flag for stream resets */
2102 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2103 SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
2105 * we have finished working through the backlogged TSN's now
2106 * time to reset streams. 1: call reset function. 2: free
2107 * pending_reply space 3: distribute any chunks in
2108 * pending_reply_queue.
2110 struct sctp_queued_to_read *ctl, *nctl;
2112 sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
2113 TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2114 SCTP_FREE(liste, SCTP_M_STRESET);
2115 /* sa_ignore FREED_MEMORY */
2116 liste = TAILQ_FIRST(&asoc->resetHead);
2117 if (TAILQ_EMPTY(&asoc->resetHead)) {
2118 /* All can be removed */
2119 TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
2120 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2121 sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
2127 TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
2128 if (SCTP_TSN_GT(ctl->sinfo_tsn, liste->tsn)) {
2132 * if ctl->sinfo_tsn is <= liste->tsn we can
2133 * process it which is the NOT of
2134 * ctl->sinfo_tsn > liste->tsn
2136 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2137 sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
2144 * Now service re-assembly to pick up anything that has been
2145 * held on reassembly queue?
2147 sctp_deliver_reasm_check(stcb, asoc);
2148 need_reasm_check = 0;
2150 if (need_reasm_check) {
2151 /* Another one waits ? */
2152 sctp_deliver_reasm_check(stcb, asoc);
2157 int8_t sctp_map_lookup_tab[256] = {
2158 0, 1, 0, 2, 0, 1, 0, 3,
2159 0, 1, 0, 2, 0, 1, 0, 4,
2160 0, 1, 0, 2, 0, 1, 0, 3,
2161 0, 1, 0, 2, 0, 1, 0, 5,
2162 0, 1, 0, 2, 0, 1, 0, 3,
2163 0, 1, 0, 2, 0, 1, 0, 4,
2164 0, 1, 0, 2, 0, 1, 0, 3,
2165 0, 1, 0, 2, 0, 1, 0, 6,
2166 0, 1, 0, 2, 0, 1, 0, 3,
2167 0, 1, 0, 2, 0, 1, 0, 4,
2168 0, 1, 0, 2, 0, 1, 0, 3,
2169 0, 1, 0, 2, 0, 1, 0, 5,
2170 0, 1, 0, 2, 0, 1, 0, 3,
2171 0, 1, 0, 2, 0, 1, 0, 4,
2172 0, 1, 0, 2, 0, 1, 0, 3,
2173 0, 1, 0, 2, 0, 1, 0, 7,
2174 0, 1, 0, 2, 0, 1, 0, 3,
2175 0, 1, 0, 2, 0, 1, 0, 4,
2176 0, 1, 0, 2, 0, 1, 0, 3,
2177 0, 1, 0, 2, 0, 1, 0, 5,
2178 0, 1, 0, 2, 0, 1, 0, 3,
2179 0, 1, 0, 2, 0, 1, 0, 4,
2180 0, 1, 0, 2, 0, 1, 0, 3,
2181 0, 1, 0, 2, 0, 1, 0, 6,
2182 0, 1, 0, 2, 0, 1, 0, 3,
2183 0, 1, 0, 2, 0, 1, 0, 4,
2184 0, 1, 0, 2, 0, 1, 0, 3,
2185 0, 1, 0, 2, 0, 1, 0, 5,
2186 0, 1, 0, 2, 0, 1, 0, 3,
2187 0, 1, 0, 2, 0, 1, 0, 4,
2188 0, 1, 0, 2, 0, 1, 0, 3,
2189 0, 1, 0, 2, 0, 1, 0, 8
2194 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2197 * Now we also need to check the mapping array in a couple of ways.
2198 * 1) Did we move the cum-ack point?
2200 * When you first glance at this you might think that all entries that
2201 * make up the postion of the cum-ack would be in the nr-mapping
2202 * array only.. i.e. things up to the cum-ack are always
2203 * deliverable. Thats true with one exception, when its a fragmented
2204 * message we may not deliver the data until some threshold (or all
2205 * of it) is in place. So we must OR the nr_mapping_array and
2206 * mapping_array to get a true picture of the cum-ack.
2208 struct sctp_association *asoc;
2211 int slide_from, slide_end, lgap, distance;
2212 uint32_t old_cumack, old_base, old_highest, highest_tsn;
2216 old_cumack = asoc->cumulative_tsn;
2217 old_base = asoc->mapping_array_base_tsn;
2218 old_highest = asoc->highest_tsn_inside_map;
2220 * We could probably improve this a small bit by calculating the
2221 * offset of the current cum-ack as the starting point.
2224 for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2225 val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2229 /* there is a 0 bit */
2230 at += sctp_map_lookup_tab[val];
2234 asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2236 if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2237 SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2239 panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2240 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2242 SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2243 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2244 sctp_print_mapping_array(asoc);
2245 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2246 sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2248 asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2249 asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2252 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2253 highest_tsn = asoc->highest_tsn_inside_nr_map;
2255 highest_tsn = asoc->highest_tsn_inside_map;
2257 if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2258 /* The complete array was completed by a single FR */
2259 /* highest becomes the cum-ack */
2267 /* clear the array */
2268 clr = ((at + 7) >> 3);
2269 if (clr > asoc->mapping_array_size) {
2270 clr = asoc->mapping_array_size;
2272 memset(asoc->mapping_array, 0, clr);
2273 memset(asoc->nr_mapping_array, 0, clr);
2275 for (i = 0; i < asoc->mapping_array_size; i++) {
2276 if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2277 SCTP_PRINTF("Error Mapping array's not clean at clear\n");
2278 sctp_print_mapping_array(asoc);
2282 asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2283 asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2284 } else if (at >= 8) {
2285 /* we can slide the mapping array down */
2286 /* slide_from holds where we hit the first NON 0xff byte */
2289 * now calculate the ceiling of the move using our highest
2292 SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2293 slide_end = (lgap >> 3);
2294 if (slide_end < slide_from) {
2295 sctp_print_mapping_array(asoc);
2297 panic("impossible slide");
2299 SCTP_PRINTF("impossible slide lgap:%x slide_end:%x slide_from:%x? at:%d\n",
2300 lgap, slide_end, slide_from, at);
2304 if (slide_end > asoc->mapping_array_size) {
2306 panic("would overrun buffer");
2308 SCTP_PRINTF("Gak, would have overrun map end:%d slide_end:%d\n",
2309 asoc->mapping_array_size, slide_end);
2310 slide_end = asoc->mapping_array_size;
2313 distance = (slide_end - slide_from) + 1;
2314 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2315 sctp_log_map(old_base, old_cumack, old_highest,
2316 SCTP_MAP_PREPARE_SLIDE);
2317 sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
2318 (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
2320 if (distance + slide_from > asoc->mapping_array_size ||
2323 * Here we do NOT slide forward the array so that
2324 * hopefully when more data comes in to fill it up
2325 * we will be able to slide it forward. Really I
2326 * don't think this should happen :-0
2329 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2330 sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
2331 (uint32_t) asoc->mapping_array_size,
2332 SCTP_MAP_SLIDE_NONE);
2337 for (ii = 0; ii < distance; ii++) {
2338 asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2339 asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2342 for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2343 asoc->mapping_array[ii] = 0;
2344 asoc->nr_mapping_array[ii] = 0;
2346 if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2347 asoc->highest_tsn_inside_map += (slide_from << 3);
2349 if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2350 asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2352 asoc->mapping_array_base_tsn += (slide_from << 3);
2353 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2354 sctp_log_map(asoc->mapping_array_base_tsn,
2355 asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2356 SCTP_MAP_SLIDE_RESULT);
2363 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
2365 struct sctp_association *asoc;
2366 uint32_t highest_tsn;
2369 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2370 highest_tsn = asoc->highest_tsn_inside_nr_map;
2372 highest_tsn = asoc->highest_tsn_inside_map;
2376 * Now we need to see if we need to queue a sack or just start the
2377 * timer (if allowed).
2379 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2381 * Ok special case, in SHUTDOWN-SENT case. here we maker
2382 * sure SACK timer is off and instead send a SHUTDOWN and a
2385 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2386 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2387 stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_INDATA + SCTP_LOC_18);
2389 sctp_send_shutdown(stcb,
2390 ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2391 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2395 /* is there a gap now ? */
2396 is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2399 * CMT DAC algorithm: increase number of packets received
2402 stcb->asoc.cmt_dac_pkts_rcvd++;
2404 if ((stcb->asoc.send_sack == 1) || /* We need to send a
2406 ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no
2408 (stcb->asoc.numduptsns) || /* we have dup's */
2409 (is_a_gap) || /* is still a gap */
2410 (stcb->asoc.delayed_ack == 0) || /* Delayed sack disabled */
2411 (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) /* hit limit of pkts */
2414 if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2415 (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2416 (stcb->asoc.send_sack == 0) &&
2417 (stcb->asoc.numduptsns == 0) &&
2418 (stcb->asoc.delayed_ack) &&
2419 (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2422 * CMT DAC algorithm: With CMT, delay acks
2423 * even in the face of
2425 * reordering. Therefore, if acks that do not
2426 * have to be sent because of the above
2427 * reasons, will be delayed. That is, acks
2428 * that would have been sent due to gap
2429 * reports will be delayed with DAC. Start
2430 * the delayed ack timer.
2432 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2433 stcb->sctp_ep, stcb, NULL);
2436 * Ok we must build a SACK since the timer
2437 * is pending, we got our first packet OR
2438 * there are gaps or duplicates.
2440 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2441 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2444 if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2445 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2446 stcb->sctp_ep, stcb, NULL);
2453 sctp_service_queues(struct sctp_tcb *stcb, struct sctp_association *asoc)
2455 struct sctp_tmit_chunk *chk;
2456 uint32_t tsize, pd_point;
2459 if (asoc->fragmented_delivery_inprogress) {
2460 sctp_service_reassembly(stcb, asoc);
2462 /* Can we proceed further, i.e. the PD-API is complete */
2463 if (asoc->fragmented_delivery_inprogress) {
2468 * Now is there some other chunk I can deliver from the reassembly
2472 chk = TAILQ_FIRST(&asoc->reasmqueue);
2474 asoc->size_on_reasm_queue = 0;
2475 asoc->cnt_on_reasm_queue = 0;
2478 nxt_todel = asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
2479 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
2480 ((nxt_todel == chk->rec.data.stream_seq) ||
2481 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
2483 * Yep the first one is here. We setup to start reception,
2484 * by backing down the TSN just in case we can't deliver.
2488 * Before we start though either all of the message should
2489 * be here or the socket buffer max or nothing on the
2490 * delivery queue and something can be delivered.
2492 if (stcb->sctp_socket) {
2493 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket) >> SCTP_PARTIAL_DELIVERY_SHIFT,
2494 stcb->sctp_ep->partial_delivery_point);
2496 pd_point = stcb->sctp_ep->partial_delivery_point;
2498 if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
2499 asoc->fragmented_delivery_inprogress = 1;
2500 asoc->tsn_last_delivered = chk->rec.data.TSN_seq - 1;
2501 asoc->str_of_pdapi = chk->rec.data.stream_number;
2502 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
2503 asoc->pdapi_ppid = chk->rec.data.payloadtype;
2504 asoc->fragment_flags = chk->rec.data.rcv_flags;
2505 sctp_service_reassembly(stcb, asoc);
2506 if (asoc->fragmented_delivery_inprogress == 0) {
2514 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2515 struct sockaddr *src, struct sockaddr *dst,
2516 struct sctphdr *sh, struct sctp_inpcb *inp,
2517 struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t * high_tsn,
2518 uint8_t use_mflowid, uint32_t mflowid,
2519 uint32_t vrf_id, uint16_t port)
2521 struct sctp_data_chunk *ch, chunk_buf;
2522 struct sctp_association *asoc;
2523 int num_chunks = 0; /* number of control chunks processed */
2525 int chk_length, break_flag, last_chunk;
2526 int abort_flag = 0, was_a_gap;
2528 uint32_t highest_tsn;
2531 sctp_set_rwnd(stcb, &stcb->asoc);
2534 SCTP_TCB_LOCK_ASSERT(stcb);
2536 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2537 highest_tsn = asoc->highest_tsn_inside_nr_map;
2539 highest_tsn = asoc->highest_tsn_inside_map;
2541 was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2543 * setup where we got the last DATA packet from for any SACK that
2544 * may need to go out. Don't bump the net. This is done ONLY when a
2545 * chunk is assigned.
2547 asoc->last_data_chunk_from = net;
2550 * Now before we proceed we must figure out if this is a wasted
2551 * cluster... i.e. it is a small packet sent in and yet the driver
2552 * underneath allocated a full cluster for it. If so we must copy it
2553 * to a smaller mbuf and free up the cluster mbuf. This will help
2554 * with cluster starvation. Note for __Panda__ we don't do this
2555 * since it has clusters all the way down to 64 bytes.
2557 if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2558 /* we only handle mbufs that are singletons.. not chains */
2559 m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_NOWAIT, 1, MT_DATA);
2561 /* ok lets see if we can copy the data up */
2564 /* get the pointers and copy */
2565 to = mtod(m, caddr_t *);
2566 from = mtod((*mm), caddr_t *);
2567 memcpy(to, from, SCTP_BUF_LEN((*mm)));
2568 /* copy the length and free up the old */
2569 SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2571 /* sucess, back copy */
2574 /* We are in trouble in the mbuf world .. yikes */
2578 /* get pointer to the first chunk header */
2579 ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2580 sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2585 * process all DATA chunks...
2587 *high_tsn = asoc->cumulative_tsn;
2589 asoc->data_pkts_seen++;
2590 while (stop_proc == 0) {
2591 /* validate chunk length */
2592 chk_length = ntohs(ch->ch.chunk_length);
2593 if (length - *offset < chk_length) {
2594 /* all done, mutulated chunk */
2598 if (ch->ch.chunk_type == SCTP_DATA) {
2599 if ((size_t)chk_length < sizeof(struct sctp_data_chunk) + 1) {
2601 * Need to send an abort since we had a
2602 * invalid data chunk.
2604 struct mbuf *op_err;
2606 op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 2 * sizeof(uint32_t)),
2607 0, M_NOWAIT, 1, MT_DATA);
2610 struct sctp_paramhdr *ph;
2613 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr) +
2614 (2 * sizeof(uint32_t));
2615 ph = mtod(op_err, struct sctp_paramhdr *);
2617 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2618 ph->param_length = htons(SCTP_BUF_LEN(op_err));
2619 ippp = (uint32_t *) (ph + 1);
2620 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_19);
2622 *ippp = asoc->cumulative_tsn;
2625 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
2626 sctp_abort_association(inp, stcb, m, iphlen,
2627 src, dst, sh, op_err,
2628 use_mflowid, mflowid,
2632 #ifdef SCTP_AUDITING_ENABLED
2633 sctp_audit_log(0xB1, 0);
2635 if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2640 if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, ch,
2641 chk_length, net, high_tsn, &abort_flag, &break_flag,
2650 * Set because of out of rwnd space and no
2651 * drop rep space left.
2657 /* not a data chunk in the data region */
2658 switch (ch->ch.chunk_type) {
2659 case SCTP_INITIATION:
2660 case SCTP_INITIATION_ACK:
2661 case SCTP_SELECTIVE_ACK:
2662 case SCTP_NR_SELECTIVE_ACK:
2663 case SCTP_HEARTBEAT_REQUEST:
2664 case SCTP_HEARTBEAT_ACK:
2665 case SCTP_ABORT_ASSOCIATION:
2667 case SCTP_SHUTDOWN_ACK:
2668 case SCTP_OPERATION_ERROR:
2669 case SCTP_COOKIE_ECHO:
2670 case SCTP_COOKIE_ACK:
2673 case SCTP_SHUTDOWN_COMPLETE:
2674 case SCTP_AUTHENTICATION:
2675 case SCTP_ASCONF_ACK:
2676 case SCTP_PACKET_DROPPED:
2677 case SCTP_STREAM_RESET:
2678 case SCTP_FORWARD_CUM_TSN:
2681 * Now, what do we do with KNOWN chunks that
2682 * are NOT in the right place?
2684 * For now, I do nothing but ignore them. We
2685 * may later want to add sysctl stuff to
2686 * switch out and do either an ABORT() or
2687 * possibly process them.
2689 if (SCTP_BASE_SYSCTL(sctp_strict_data_order)) {
2690 struct mbuf *op_err;
2692 op_err = sctp_generate_invmanparam(SCTP_CAUSE_PROTOCOL_VIOLATION);
2693 sctp_abort_association(inp, stcb,
2697 use_mflowid, mflowid,
2703 /* unknown chunk type, use bit rules */
2704 if (ch->ch.chunk_type & 0x40) {
2705 /* Add a error report to the queue */
2707 struct sctp_paramhdr *phd;
2709 merr = sctp_get_mbuf_for_msg(sizeof(*phd), 0, M_NOWAIT, 1, MT_DATA);
2711 phd = mtod(merr, struct sctp_paramhdr *);
2713 * We cheat and use param
2714 * type since we did not
2715 * bother to define a error
2716 * cause struct. They are
2717 * the same basic format
2718 * with different names.
2721 htons(SCTP_CAUSE_UNRECOG_CHUNK);
2723 htons(chk_length + sizeof(*phd));
2724 SCTP_BUF_LEN(merr) = sizeof(*phd);
2725 SCTP_BUF_NEXT(merr) = SCTP_M_COPYM(m, *offset, chk_length, M_NOWAIT);
2726 if (SCTP_BUF_NEXT(merr)) {
2727 if (sctp_pad_lastmbuf(SCTP_BUF_NEXT(merr), SCTP_SIZE32(chk_length) - chk_length, NULL)) {
2730 sctp_queue_op_err(stcb, merr);
2737 if ((ch->ch.chunk_type & 0x80) == 0) {
2738 /* discard the rest of this packet */
2740 } /* else skip this bad chunk and
2743 } /* switch of chunk type */
2745 *offset += SCTP_SIZE32(chk_length);
2746 if ((*offset >= length) || stop_proc) {
2747 /* no more data left in the mbuf chain */
2751 ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2752 sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2761 * we need to report rwnd overrun drops.
2763 sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
2767 * Did we get data, if so update the time for auto-close and
2768 * give peer credit for being alive.
2770 SCTP_STAT_INCR(sctps_recvpktwithdata);
2771 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2772 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2773 stcb->asoc.overall_error_count,
2775 SCTP_FROM_SCTP_INDATA,
2778 stcb->asoc.overall_error_count = 0;
2779 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2781 /* now service all of the reassm queue if needed */
2782 if (!(TAILQ_EMPTY(&asoc->reasmqueue)))
2783 sctp_service_queues(stcb, asoc);
2785 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2786 /* Assure that we ack right away */
2787 stcb->asoc.send_sack = 1;
2789 /* Start a sack timer or QUEUE a SACK for sending */
2790 sctp_sack_check(stcb, was_a_gap);
2795 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2796 uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2798 uint32_t * biggest_newly_acked_tsn,
2799 uint32_t * this_sack_lowest_newack,
2802 struct sctp_tmit_chunk *tp1;
2803 unsigned int theTSN;
2804 int j, wake_him = 0, circled = 0;
2806 /* Recover the tp1 we last saw */
2809 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2811 for (j = frag_strt; j <= frag_end; j++) {
2812 theTSN = j + last_tsn;
2814 if (tp1->rec.data.doing_fast_retransmit)
2818 * CMT: CUCv2 algorithm. For each TSN being
2819 * processed from the sent queue, track the
2820 * next expected pseudo-cumack, or
2821 * rtx_pseudo_cumack, if required. Separate
2822 * cumack trackers for first transmissions,
2823 * and retransmissions.
2825 if ((tp1->whoTo->find_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
2826 (tp1->snd_count == 1)) {
2827 tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq;
2828 tp1->whoTo->find_pseudo_cumack = 0;
2830 if ((tp1->whoTo->find_rtx_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
2831 (tp1->snd_count > 1)) {
2832 tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq;
2833 tp1->whoTo->find_rtx_pseudo_cumack = 0;
2835 if (tp1->rec.data.TSN_seq == theTSN) {
2836 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2838 * must be held until
2841 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
2843 * If it is less than RESEND, it is
2844 * now no-longer in flight.
2845 * Higher values may already be set
2846 * via previous Gap Ack Blocks...
2847 * i.e. ACKED or RESEND.
2849 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2850 *biggest_newly_acked_tsn)) {
2851 *biggest_newly_acked_tsn = tp1->rec.data.TSN_seq;
2854 * CMT: SFR algo (and HTNA) - set
2855 * saw_newack to 1 for dest being
2856 * newly acked. update
2857 * this_sack_highest_newack if
2860 if (tp1->rec.data.chunk_was_revoked == 0)
2861 tp1->whoTo->saw_newack = 1;
2863 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2864 tp1->whoTo->this_sack_highest_newack)) {
2865 tp1->whoTo->this_sack_highest_newack =
2866 tp1->rec.data.TSN_seq;
2869 * CMT DAC algo: also update
2870 * this_sack_lowest_newack
2872 if (*this_sack_lowest_newack == 0) {
2873 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2874 sctp_log_sack(*this_sack_lowest_newack,
2876 tp1->rec.data.TSN_seq,
2879 SCTP_LOG_TSN_ACKED);
2881 *this_sack_lowest_newack = tp1->rec.data.TSN_seq;
2884 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
2885 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
2886 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
2887 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
2888 * Separate pseudo_cumack trackers for first transmissions and
2891 if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) {
2892 if (tp1->rec.data.chunk_was_revoked == 0) {
2893 tp1->whoTo->new_pseudo_cumack = 1;
2895 tp1->whoTo->find_pseudo_cumack = 1;
2897 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
2898 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
2900 if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) {
2901 if (tp1->rec.data.chunk_was_revoked == 0) {
2902 tp1->whoTo->new_pseudo_cumack = 1;
2904 tp1->whoTo->find_rtx_pseudo_cumack = 1;
2906 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2907 sctp_log_sack(*biggest_newly_acked_tsn,
2909 tp1->rec.data.TSN_seq,
2912 SCTP_LOG_TSN_ACKED);
2914 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
2915 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
2916 tp1->whoTo->flight_size,
2918 (uintptr_t) tp1->whoTo,
2919 tp1->rec.data.TSN_seq);
2921 sctp_flight_size_decrease(tp1);
2922 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
2923 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
2926 sctp_total_flight_decrease(stcb, tp1);
2928 tp1->whoTo->net_ack += tp1->send_size;
2929 if (tp1->snd_count < 2) {
2931 * True non-retransmited chunk
2933 tp1->whoTo->net_ack2 += tp1->send_size;
2941 sctp_calculate_rto(stcb,
2944 &tp1->sent_rcv_time,
2945 sctp_align_safe_nocopy,
2946 SCTP_RTT_FROM_DATA);
2949 if (tp1->whoTo->rto_needed == 0) {
2950 tp1->whoTo->rto_needed = 1;
2956 if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
2957 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2958 stcb->asoc.this_sack_highest_gap)) {
2959 stcb->asoc.this_sack_highest_gap =
2960 tp1->rec.data.TSN_seq;
2962 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
2963 sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
2964 #ifdef SCTP_AUDITING_ENABLED
2965 sctp_audit_log(0xB2,
2966 (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
2971 * All chunks NOT UNSENT fall through here and are marked
2972 * (leave PR-SCTP ones that are to skip alone though)
2974 if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
2975 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
2976 tp1->sent = SCTP_DATAGRAM_MARKED;
2978 if (tp1->rec.data.chunk_was_revoked) {
2979 /* deflate the cwnd */
2980 tp1->whoTo->cwnd -= tp1->book_size;
2981 tp1->rec.data.chunk_was_revoked = 0;
2983 /* NR Sack code here */
2985 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
2986 if (stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
2987 stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues--;
2990 panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
2993 tp1->sent = SCTP_DATAGRAM_NR_ACKED;
2999 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
3000 sctp_m_freem(tp1->data);
3007 } /* if (tp1->TSN_seq == theTSN) */
3008 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, theTSN)) {
3011 tp1 = TAILQ_NEXT(tp1, sctp_next);
3012 if ((tp1 == NULL) && (circled == 0)) {
3014 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3016 } /* end while (tp1) */
3019 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3021 /* In case the fragments were not in order we must reset */
3022 } /* end for (j = fragStart */
3024 return (wake_him); /* Return value only used for nr-sack */
3029 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3030 uint32_t last_tsn, uint32_t * biggest_tsn_acked,
3031 uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack,
3032 int num_seg, int num_nr_seg, int *rto_ok)
3034 struct sctp_gap_ack_block *frag, block;
3035 struct sctp_tmit_chunk *tp1;
3040 uint16_t frag_strt, frag_end, prev_frag_end;
3042 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3046 for (i = 0; i < (num_seg + num_nr_seg); i++) {
3049 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3051 frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3052 sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
3053 *offset += sizeof(block);
3055 return (chunk_freed);
3057 frag_strt = ntohs(frag->start);
3058 frag_end = ntohs(frag->end);
3060 if (frag_strt > frag_end) {
3061 /* This gap report is malformed, skip it. */
3064 if (frag_strt <= prev_frag_end) {
3065 /* This gap report is not in order, so restart. */
3066 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3068 if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
3069 *biggest_tsn_acked = last_tsn + frag_end;
3076 if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3077 non_revocable, &num_frs, biggest_newly_acked_tsn,
3078 this_sack_lowest_newack, rto_ok)) {
3081 prev_frag_end = frag_end;
3083 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3085 sctp_log_fr(*biggest_tsn_acked,
3086 *biggest_newly_acked_tsn,
3087 last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3089 return (chunk_freed);
3093 sctp_check_for_revoked(struct sctp_tcb *stcb,
3094 struct sctp_association *asoc, uint32_t cumack,
3095 uint32_t biggest_tsn_acked)
3097 struct sctp_tmit_chunk *tp1;
3099 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3100 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cumack)) {
3102 * ok this guy is either ACK or MARKED. If it is
3103 * ACKED it has been previously acked but not this
3104 * time i.e. revoked. If it is MARKED it was ACK'ed
3107 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked)) {
3110 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3111 /* it has been revoked */
3112 tp1->sent = SCTP_DATAGRAM_SENT;
3113 tp1->rec.data.chunk_was_revoked = 1;
3115 * We must add this stuff back in to assure
3116 * timers and such get started.
3118 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3119 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3120 tp1->whoTo->flight_size,
3122 (uintptr_t) tp1->whoTo,
3123 tp1->rec.data.TSN_seq);
3125 sctp_flight_size_increase(tp1);
3126 sctp_total_flight_increase(stcb, tp1);
3128 * We inflate the cwnd to compensate for our
3129 * artificial inflation of the flight_size.
3131 tp1->whoTo->cwnd += tp1->book_size;
3132 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3133 sctp_log_sack(asoc->last_acked_seq,
3135 tp1->rec.data.TSN_seq,
3138 SCTP_LOG_TSN_REVOKED);
3140 } else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3141 /* it has been re-acked in this SACK */
3142 tp1->sent = SCTP_DATAGRAM_ACKED;
3145 if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3152 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3153 uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3155 struct sctp_tmit_chunk *tp1;
3156 int strike_flag = 0;
3158 int tot_retrans = 0;
3159 uint32_t sending_seq;
3160 struct sctp_nets *net;
3161 int num_dests_sacked = 0;
3164 * select the sending_seq, this is either the next thing ready to be
3165 * sent but not transmitted, OR, the next seq we assign.
3167 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3169 sending_seq = asoc->sending_seq;
3171 sending_seq = tp1->rec.data.TSN_seq;
3174 /* CMT DAC algo: finding out if SACK is a mixed SACK */
3175 if ((asoc->sctp_cmt_on_off > 0) &&
3176 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3177 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3178 if (net->saw_newack)
3182 if (stcb->asoc.peer_supports_prsctp) {
3183 (void)SCTP_GETTIME_TIMEVAL(&now);
3185 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3187 if (tp1->no_fr_allowed) {
3188 /* this one had a timeout or something */
3191 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3192 if (tp1->sent < SCTP_DATAGRAM_RESEND)
3193 sctp_log_fr(biggest_tsn_newly_acked,
3194 tp1->rec.data.TSN_seq,
3196 SCTP_FR_LOG_CHECK_STRIKE);
3198 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked) ||
3199 tp1->sent == SCTP_DATAGRAM_UNSENT) {
3203 if (stcb->asoc.peer_supports_prsctp) {
3204 if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3205 /* Is it expired? */
3206 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3207 /* Yes so drop it */
3208 if (tp1->data != NULL) {
3209 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3210 SCTP_SO_NOT_LOCKED);
3216 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->this_sack_highest_gap)) {
3217 /* we are beyond the tsn in the sack */
3220 if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3221 /* either a RESEND, ACKED, or MARKED */
3223 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3224 /* Continue strikin FWD-TSN chunks */
3225 tp1->rec.data.fwd_tsn_cnt++;
3230 * CMT : SFR algo (covers part of DAC and HTNA as well)
3232 if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3234 * No new acks were receieved for data sent to this
3235 * dest. Therefore, according to the SFR algo for
3236 * CMT, no data sent to this dest can be marked for
3237 * FR using this SACK.
3240 } else if (tp1->whoTo && SCTP_TSN_GT(tp1->rec.data.TSN_seq,
3241 tp1->whoTo->this_sack_highest_newack)) {
3243 * CMT: New acks were receieved for data sent to
3244 * this dest. But no new acks were seen for data
3245 * sent after tp1. Therefore, according to the SFR
3246 * algo for CMT, tp1 cannot be marked for FR using
3247 * this SACK. This step covers part of the DAC algo
3248 * and the HTNA algo as well.
3253 * Here we check to see if we were have already done a FR
3254 * and if so we see if the biggest TSN we saw in the sack is
3255 * smaller than the recovery point. If so we don't strike
3256 * the tsn... otherwise we CAN strike the TSN.
3259 * @@@ JRI: Check for CMT if (accum_moved &&
3260 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3263 if (accum_moved && asoc->fast_retran_loss_recovery) {
3265 * Strike the TSN if in fast-recovery and cum-ack
3268 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3269 sctp_log_fr(biggest_tsn_newly_acked,
3270 tp1->rec.data.TSN_seq,
3272 SCTP_FR_LOG_STRIKE_CHUNK);
3274 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3277 if ((asoc->sctp_cmt_on_off > 0) &&
3278 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3280 * CMT DAC algorithm: If SACK flag is set to
3281 * 0, then lowest_newack test will not pass
3282 * because it would have been set to the
3283 * cumack earlier. If not already to be
3284 * rtx'd, If not a mixed sack and if tp1 is
3285 * not between two sacked TSNs, then mark by
3286 * one more. NOTE that we are marking by one
3287 * additional time since the SACK DAC flag
3288 * indicates that two packets have been
3289 * received after this missing TSN.
3291 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3292 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3293 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3294 sctp_log_fr(16 + num_dests_sacked,
3295 tp1->rec.data.TSN_seq,
3297 SCTP_FR_LOG_STRIKE_CHUNK);
3302 } else if ((tp1->rec.data.doing_fast_retransmit) &&
3303 (asoc->sctp_cmt_on_off == 0)) {
3305 * For those that have done a FR we must take
3306 * special consideration if we strike. I.e the
3307 * biggest_newly_acked must be higher than the
3308 * sending_seq at the time we did the FR.
3311 #ifdef SCTP_FR_TO_ALTERNATE
3313 * If FR's go to new networks, then we must only do
3314 * this for singly homed asoc's. However if the FR's
3315 * go to the same network (Armando's work) then its
3316 * ok to FR multiple times.
3324 if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3325 tp1->rec.data.fast_retran_tsn)) {
3327 * Strike the TSN, since this ack is
3328 * beyond where things were when we
3331 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3332 sctp_log_fr(biggest_tsn_newly_acked,
3333 tp1->rec.data.TSN_seq,
3335 SCTP_FR_LOG_STRIKE_CHUNK);
3337 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3341 if ((asoc->sctp_cmt_on_off > 0) &&
3342 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3344 * CMT DAC algorithm: If
3345 * SACK flag is set to 0,
3346 * then lowest_newack test
3347 * will not pass because it
3348 * would have been set to
3349 * the cumack earlier. If
3350 * not already to be rtx'd,
3351 * If not a mixed sack and
3352 * if tp1 is not between two
3353 * sacked TSNs, then mark by
3354 * one more. NOTE that we
3355 * are marking by one
3356 * additional time since the
3357 * SACK DAC flag indicates
3358 * that two packets have
3359 * been received after this
3362 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3363 (num_dests_sacked == 1) &&
3364 SCTP_TSN_GT(this_sack_lowest_newack,
3365 tp1->rec.data.TSN_seq)) {
3366 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3367 sctp_log_fr(32 + num_dests_sacked,
3368 tp1->rec.data.TSN_seq,
3370 SCTP_FR_LOG_STRIKE_CHUNK);
3372 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3380 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3383 } else if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
3384 biggest_tsn_newly_acked)) {
3386 * We don't strike these: This is the HTNA
3387 * algorithm i.e. we don't strike If our TSN is
3388 * larger than the Highest TSN Newly Acked.
3392 /* Strike the TSN */
3393 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3394 sctp_log_fr(biggest_tsn_newly_acked,
3395 tp1->rec.data.TSN_seq,
3397 SCTP_FR_LOG_STRIKE_CHUNK);
3399 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3402 if ((asoc->sctp_cmt_on_off > 0) &&
3403 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3405 * CMT DAC algorithm: If SACK flag is set to
3406 * 0, then lowest_newack test will not pass
3407 * because it would have been set to the
3408 * cumack earlier. If not already to be
3409 * rtx'd, If not a mixed sack and if tp1 is
3410 * not between two sacked TSNs, then mark by
3411 * one more. NOTE that we are marking by one
3412 * additional time since the SACK DAC flag
3413 * indicates that two packets have been
3414 * received after this missing TSN.
3416 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3417 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3418 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3419 sctp_log_fr(48 + num_dests_sacked,
3420 tp1->rec.data.TSN_seq,
3422 SCTP_FR_LOG_STRIKE_CHUNK);
3428 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3429 struct sctp_nets *alt;
3431 /* fix counts and things */
3432 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3433 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3434 (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3436 (uintptr_t) tp1->whoTo,
3437 tp1->rec.data.TSN_seq);
3440 tp1->whoTo->net_ack++;
3441 sctp_flight_size_decrease(tp1);
3442 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3443 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3447 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3448 sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3449 asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3451 /* add back to the rwnd */
3452 asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3454 /* remove from the total flight */
3455 sctp_total_flight_decrease(stcb, tp1);
3457 if ((stcb->asoc.peer_supports_prsctp) &&
3458 (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3460 * Has it been retransmitted tv_sec times? -
3461 * we store the retran count there.
3463 if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3464 /* Yes, so drop it */
3465 if (tp1->data != NULL) {
3466 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3467 SCTP_SO_NOT_LOCKED);
3469 /* Make sure to flag we had a FR */
3470 tp1->whoTo->net_ack++;
3475 * SCTP_PRINTF("OK, we are now ready to FR this
3478 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3479 sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count,
3483 /* This is a subsequent FR */
3484 SCTP_STAT_INCR(sctps_sendmultfastretrans);
3486 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3487 if (asoc->sctp_cmt_on_off > 0) {
3489 * CMT: Using RTX_SSTHRESH policy for CMT.
3490 * If CMT is being used, then pick dest with
3491 * largest ssthresh for any retransmission.
3493 tp1->no_fr_allowed = 1;
3495 /* sa_ignore NO_NULL_CHK */
3496 if (asoc->sctp_cmt_pf > 0) {
3498 * JRS 5/18/07 - If CMT PF is on,
3499 * use the PF version of
3502 alt = sctp_find_alternate_net(stcb, alt, 2);
3505 * JRS 5/18/07 - If only CMT is on,
3506 * use the CMT version of
3509 /* sa_ignore NO_NULL_CHK */
3510 alt = sctp_find_alternate_net(stcb, alt, 1);
3516 * CUCv2: If a different dest is picked for
3517 * the retransmission, then new
3518 * (rtx-)pseudo_cumack needs to be tracked
3519 * for orig dest. Let CUCv2 track new (rtx-)
3520 * pseudo-cumack always.
3523 tp1->whoTo->find_pseudo_cumack = 1;
3524 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3526 } else {/* CMT is OFF */
3528 #ifdef SCTP_FR_TO_ALTERNATE
3529 /* Can we find an alternate? */
3530 alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3533 * default behavior is to NOT retransmit
3534 * FR's to an alternate. Armando Caro's
3535 * paper details why.
3541 tp1->rec.data.doing_fast_retransmit = 1;
3543 /* mark the sending seq for possible subsequent FR's */
3545 * SCTP_PRINTF("Marking TSN for FR new value %x\n",
3546 * (uint32_t)tpi->rec.data.TSN_seq);
3548 if (TAILQ_EMPTY(&asoc->send_queue)) {
3550 * If the queue of send is empty then its
3551 * the next sequence number that will be
3552 * assigned so we subtract one from this to
3553 * get the one we last sent.
3555 tp1->rec.data.fast_retran_tsn = sending_seq;
3558 * If there are chunks on the send queue
3559 * (unsent data that has made it from the
3560 * stream queues but not out the door, we
3561 * take the first one (which will have the
3562 * lowest TSN) and subtract one to get the
3565 struct sctp_tmit_chunk *ttt;
3567 ttt = TAILQ_FIRST(&asoc->send_queue);
3568 tp1->rec.data.fast_retran_tsn =
3569 ttt->rec.data.TSN_seq;
3574 * this guy had a RTO calculation pending on
3577 if ((tp1->whoTo != NULL) &&
3578 (tp1->whoTo->rto_needed == 0)) {
3579 tp1->whoTo->rto_needed = 1;
3583 if (alt != tp1->whoTo) {
3584 /* yes, there is an alternate. */
3585 sctp_free_remote_addr(tp1->whoTo);
3586 /* sa_ignore FREED_MEMORY */
3588 atomic_add_int(&alt->ref_count, 1);
3594 struct sctp_tmit_chunk *
3595 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3596 struct sctp_association *asoc)
3598 struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3602 if (asoc->peer_supports_prsctp == 0) {
3605 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3606 if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3607 tp1->sent != SCTP_DATAGRAM_RESEND &&
3608 tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3609 /* no chance to advance, out of here */
3612 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3613 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3614 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3615 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3616 asoc->advanced_peer_ack_point,
3617 tp1->rec.data.TSN_seq, 0, 0);
3620 if (!PR_SCTP_ENABLED(tp1->flags)) {
3622 * We can't fwd-tsn past any that are reliable aka
3623 * retransmitted until the asoc fails.
3628 (void)SCTP_GETTIME_TIMEVAL(&now);
3632 * now we got a chunk which is marked for another
3633 * retransmission to a PR-stream but has run out its chances
3634 * already maybe OR has been marked to skip now. Can we skip
3635 * it if its a resend?
3637 if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3638 (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3640 * Now is this one marked for resend and its time is
3643 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3644 /* Yes so drop it */
3646 (void)sctp_release_pr_sctp_chunk(stcb, tp1,
3647 1, SCTP_SO_NOT_LOCKED);
3651 * No, we are done when hit one for resend
3652 * whos time as not expired.
3658 * Ok now if this chunk is marked to drop it we can clean up
3659 * the chunk, advance our peer ack point and we can check
3662 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3663 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3664 /* advance PeerAckPoint goes forward */
3665 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->advanced_peer_ack_point)) {
3666 asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq;
3668 } else if (tp1->rec.data.TSN_seq == asoc->advanced_peer_ack_point) {
3669 /* No update but we do save the chk */
3674 * If it is still in RESEND we can advance no
3684 sctp_fs_audit(struct sctp_association *asoc)
3686 struct sctp_tmit_chunk *chk;
3687 int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3688 int entry_flight, entry_cnt, ret;
3690 entry_flight = asoc->total_flight;
3691 entry_cnt = asoc->total_flight_count;
3694 if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3697 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3698 if (chk->sent < SCTP_DATAGRAM_RESEND) {
3699 SCTP_PRINTF("Chk TSN:%u size:%d inflight cnt:%d\n",
3700 chk->rec.data.TSN_seq,
3704 } else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3706 } else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3708 } else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3715 if ((inflight > 0) || (inbetween > 0)) {
3717 panic("Flight size-express incorrect? \n");
3719 SCTP_PRINTF("asoc->total_flight:%d cnt:%d\n",
3720 entry_flight, entry_cnt);
3722 SCTP_PRINTF("Flight size-express incorrect F:%d I:%d R:%d Ab:%d ACK:%d\n",
3723 inflight, inbetween, resend, above, acked);
3732 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3733 struct sctp_association *asoc,
3734 struct sctp_tmit_chunk *tp1)
3736 tp1->window_probe = 0;
3737 if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3738 /* TSN's skipped we do NOT move back. */
3739 sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3740 tp1->whoTo->flight_size,
3742 (uintptr_t) tp1->whoTo,
3743 tp1->rec.data.TSN_seq);
3746 /* First setup this by shrinking flight */
3747 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3748 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3751 sctp_flight_size_decrease(tp1);
3752 sctp_total_flight_decrease(stcb, tp1);
3753 /* Now mark for resend */
3754 tp1->sent = SCTP_DATAGRAM_RESEND;
3755 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3757 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3758 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3759 tp1->whoTo->flight_size,
3761 (uintptr_t) tp1->whoTo,
3762 tp1->rec.data.TSN_seq);
3767 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3768 uint32_t rwnd, int *abort_now, int ecne_seen)
3770 struct sctp_nets *net;
3771 struct sctp_association *asoc;
3772 struct sctp_tmit_chunk *tp1, *tp2;
3774 int win_probe_recovery = 0;
3775 int win_probe_recovered = 0;
3776 int j, done_once = 0;
3779 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3780 sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3781 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3783 SCTP_TCB_LOCK_ASSERT(stcb);
3784 #ifdef SCTP_ASOCLOG_OF_TSNS
3785 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3786 stcb->asoc.cumack_log_at++;
3787 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3788 stcb->asoc.cumack_log_at = 0;
3792 old_rwnd = asoc->peers_rwnd;
3793 if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3796 } else if (asoc->last_acked_seq == cumack) {
3797 /* Window update sack */
3798 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3799 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3800 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3801 /* SWS sender side engages */
3802 asoc->peers_rwnd = 0;
3804 if (asoc->peers_rwnd > old_rwnd) {
3809 /* First setup for CC stuff */
3810 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3811 if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
3812 /* Drag along the window_tsn for cwr's */
3813 net->cwr_window_tsn = cumack;
3815 net->prev_cwnd = net->cwnd;
3820 * CMT: Reset CUC and Fast recovery algo variables before
3823 net->new_pseudo_cumack = 0;
3824 net->will_exit_fast_recovery = 0;
3825 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
3826 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
3829 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
3832 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3833 tp1 = TAILQ_LAST(&asoc->sent_queue,
3834 sctpchunk_listhead);
3835 send_s = tp1->rec.data.TSN_seq + 1;
3837 send_s = asoc->sending_seq;
3839 if (SCTP_TSN_GE(cumack, send_s)) {
3845 panic("Impossible sack 1");
3850 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
3851 0, M_NOWAIT, 1, MT_DATA);
3853 struct sctp_paramhdr *ph;
3856 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
3858 ph = mtod(oper, struct sctp_paramhdr *);
3859 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
3860 ph->param_length = htons(SCTP_BUF_LEN(oper));
3861 ippp = (uint32_t *) (ph + 1);
3862 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
3864 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
3865 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
3870 asoc->this_sack_highest_gap = cumack;
3871 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
3872 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
3873 stcb->asoc.overall_error_count,
3875 SCTP_FROM_SCTP_INDATA,
3878 stcb->asoc.overall_error_count = 0;
3879 if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
3880 /* process the new consecutive TSN first */
3881 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3882 if (SCTP_TSN_GE(cumack, tp1->rec.data.TSN_seq)) {
3883 if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
3884 SCTP_PRINTF("Warning, an unsent is now acked?\n");
3886 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
3888 * If it is less than ACKED, it is
3889 * now no-longer in flight. Higher
3890 * values may occur during marking
3892 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3893 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3894 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
3895 tp1->whoTo->flight_size,
3897 (uintptr_t) tp1->whoTo,
3898 tp1->rec.data.TSN_seq);
3900 sctp_flight_size_decrease(tp1);
3901 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3902 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3905 /* sa_ignore NO_NULL_CHK */
3906 sctp_total_flight_decrease(stcb, tp1);
3908 tp1->whoTo->net_ack += tp1->send_size;
3909 if (tp1->snd_count < 2) {
3911 * True non-retransmited
3914 tp1->whoTo->net_ack2 +=
3917 /* update RTO too? */
3926 sctp_calculate_rto(stcb,
3928 &tp1->sent_rcv_time,
3929 sctp_align_safe_nocopy,
3930 SCTP_RTT_FROM_DATA);
3933 if (tp1->whoTo->rto_needed == 0) {
3934 tp1->whoTo->rto_needed = 1;
3940 * CMT: CUCv2 algorithm. From the
3941 * cumack'd TSNs, for each TSN being
3942 * acked for the first time, set the
3943 * following variables for the
3944 * corresp destination.
3945 * new_pseudo_cumack will trigger a
3947 * find_(rtx_)pseudo_cumack will
3948 * trigger search for the next
3949 * expected (rtx-)pseudo-cumack.
3951 tp1->whoTo->new_pseudo_cumack = 1;
3952 tp1->whoTo->find_pseudo_cumack = 1;
3953 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3955 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3956 /* sa_ignore NO_NULL_CHK */
3957 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
3960 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3961 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
3963 if (tp1->rec.data.chunk_was_revoked) {
3964 /* deflate the cwnd */
3965 tp1->whoTo->cwnd -= tp1->book_size;
3966 tp1->rec.data.chunk_was_revoked = 0;
3968 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3969 if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
3970 asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--;
3973 panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
3977 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
3979 /* sa_ignore NO_NULL_CHK */
3980 sctp_free_bufspace(stcb, asoc, tp1, 1);
3981 sctp_m_freem(tp1->data);
3984 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3985 sctp_log_sack(asoc->last_acked_seq,
3987 tp1->rec.data.TSN_seq,
3990 SCTP_LOG_FREE_SENT);
3992 asoc->sent_queue_cnt--;
3993 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4000 /* sa_ignore NO_NULL_CHK */
4001 if (stcb->sctp_socket) {
4002 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4006 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4007 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4008 /* sa_ignore NO_NULL_CHK */
4009 sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
4011 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4012 so = SCTP_INP_SO(stcb->sctp_ep);
4013 atomic_add_int(&stcb->asoc.refcnt, 1);
4014 SCTP_TCB_UNLOCK(stcb);
4015 SCTP_SOCKET_LOCK(so, 1);
4016 SCTP_TCB_LOCK(stcb);
4017 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4018 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4019 /* assoc was freed while we were unlocked */
4020 SCTP_SOCKET_UNLOCK(so, 1);
4024 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4025 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4026 SCTP_SOCKET_UNLOCK(so, 1);
4029 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4030 sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
4034 /* JRS - Use the congestion control given in the CC module */
4035 if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
4036 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4037 if (net->net_ack2 > 0) {
4039 * Karn's rule applies to clearing error
4040 * count, this is optional.
4042 net->error_count = 0;
4043 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4044 /* addr came good */
4045 net->dest_state |= SCTP_ADDR_REACHABLE;
4046 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4047 0, (void *)net, SCTP_SO_NOT_LOCKED);
4049 if (net == stcb->asoc.primary_destination) {
4050 if (stcb->asoc.alternate) {
4052 * release the alternate,
4055 sctp_free_remote_addr(stcb->asoc.alternate);
4056 stcb->asoc.alternate = NULL;
4059 if (net->dest_state & SCTP_ADDR_PF) {
4060 net->dest_state &= ~SCTP_ADDR_PF;
4061 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_3);
4062 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4063 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4064 /* Done with this net */
4067 /* restore any doubled timers */
4068 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4069 if (net->RTO < stcb->asoc.minrto) {
4070 net->RTO = stcb->asoc.minrto;
4072 if (net->RTO > stcb->asoc.maxrto) {
4073 net->RTO = stcb->asoc.maxrto;
4077 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4079 asoc->last_acked_seq = cumack;
4081 if (TAILQ_EMPTY(&asoc->sent_queue)) {
4082 /* nothing left in-flight */
4083 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4084 net->flight_size = 0;
4085 net->partial_bytes_acked = 0;
4087 asoc->total_flight = 0;
4088 asoc->total_flight_count = 0;
4091 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4092 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4093 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4094 /* SWS sender side engages */
4095 asoc->peers_rwnd = 0;
4097 if (asoc->peers_rwnd > old_rwnd) {
4098 win_probe_recovery = 1;
4100 /* Now assure a timer where data is queued at */
4103 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4106 if (win_probe_recovery && (net->window_probe)) {
4107 win_probe_recovered = 1;
4109 * Find first chunk that was used with window probe
4110 * and clear the sent
4112 /* sa_ignore FREED_MEMORY */
4113 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4114 if (tp1->window_probe) {
4115 /* move back to data send queue */
4116 sctp_window_probe_recovery(stcb, asoc, tp1);
4121 if (net->RTO == 0) {
4122 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
4124 to_ticks = MSEC_TO_TICKS(net->RTO);
4126 if (net->flight_size) {
4128 (void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4129 sctp_timeout_handler, &net->rxt_timer);
4130 if (net->window_probe) {
4131 net->window_probe = 0;
4134 if (net->window_probe) {
4136 * In window probes we must assure a timer
4137 * is still running there
4139 net->window_probe = 0;
4140 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4141 SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4142 sctp_timeout_handler, &net->rxt_timer);
4144 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4145 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4147 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4152 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4153 (asoc->sent_queue_retran_cnt == 0) &&
4154 (win_probe_recovered == 0) &&
4157 * huh, this should not happen unless all packets are
4158 * PR-SCTP and marked to skip of course.
4160 if (sctp_fs_audit(asoc)) {
4161 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4162 net->flight_size = 0;
4164 asoc->total_flight = 0;
4165 asoc->total_flight_count = 0;
4166 asoc->sent_queue_retran_cnt = 0;
4167 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4168 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4169 sctp_flight_size_increase(tp1);
4170 sctp_total_flight_increase(stcb, tp1);
4171 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4172 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4179 /**********************************/
4180 /* Now what about shutdown issues */
4181 /**********************************/
4182 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4183 /* nothing left on sendqueue.. consider done */
4185 if ((asoc->stream_queue_cnt == 1) &&
4186 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4187 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4188 (asoc->locked_on_sending)
4190 struct sctp_stream_queue_pending *sp;
4193 * I may be in a state where we got all across.. but
4194 * cannot write more due to a shutdown... we abort
4195 * since the user did not indicate EOR in this case.
4196 * The sp will be cleaned during free of the asoc.
4198 sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4200 if ((sp) && (sp->length == 0)) {
4201 /* Let cleanup code purge it */
4202 if (sp->msg_is_complete) {
4203 asoc->stream_queue_cnt--;
4205 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4206 asoc->locked_on_sending = NULL;
4207 asoc->stream_queue_cnt--;
4211 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4212 (asoc->stream_queue_cnt == 0)) {
4213 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4214 /* Need to abort here */
4220 oper = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
4221 0, M_NOWAIT, 1, MT_DATA);
4223 struct sctp_paramhdr *ph;
4225 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr);
4226 ph = mtod(oper, struct sctp_paramhdr *);
4227 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
4228 ph->param_length = htons(SCTP_BUF_LEN(oper));
4230 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4231 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
4233 struct sctp_nets *netp;
4235 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4236 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4237 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4239 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4240 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4241 sctp_stop_timers_for_shutdown(stcb);
4242 if (asoc->alternate) {
4243 netp = asoc->alternate;
4245 netp = asoc->primary_destination;
4247 sctp_send_shutdown(stcb, netp);
4248 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4249 stcb->sctp_ep, stcb, netp);
4250 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4251 stcb->sctp_ep, stcb, netp);
4253 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4254 (asoc->stream_queue_cnt == 0)) {
4255 struct sctp_nets *netp;
4257 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4260 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4261 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4262 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4263 sctp_stop_timers_for_shutdown(stcb);
4264 if (asoc->alternate) {
4265 netp = asoc->alternate;
4267 netp = asoc->primary_destination;
4269 sctp_send_shutdown_ack(stcb, netp);
4270 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4271 stcb->sctp_ep, stcb, netp);
4274 /*********************************************/
4275 /* Here we perform PR-SCTP procedures */
4277 /*********************************************/
4278 /* C1. update advancedPeerAckPoint */
4279 if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4280 asoc->advanced_peer_ack_point = cumack;
4282 /* PR-Sctp issues need to be addressed too */
4283 if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
4284 struct sctp_tmit_chunk *lchk;
4285 uint32_t old_adv_peer_ack_point;
4287 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4288 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4289 /* C3. See if we need to send a Fwd-TSN */
4290 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4292 * ISSUE with ECN, see FWD-TSN processing.
4294 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4295 send_forward_tsn(stcb, asoc);
4297 /* try to FR fwd-tsn's that get lost too */
4298 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4299 send_forward_tsn(stcb, asoc);
4304 /* Assure a timer is up */
4305 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4306 stcb->sctp_ep, stcb, lchk->whoTo);
4309 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4310 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4312 stcb->asoc.peers_rwnd,
4313 stcb->asoc.total_flight,
4314 stcb->asoc.total_output_queue_size);
4319 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4320 struct sctp_tcb *stcb,
4321 uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4322 int *abort_now, uint8_t flags,
4323 uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4325 struct sctp_association *asoc;
4326 struct sctp_tmit_chunk *tp1, *tp2;
4327 uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4328 uint16_t wake_him = 0;
4329 uint32_t send_s = 0;
4331 int accum_moved = 0;
4332 int will_exit_fast_recovery = 0;
4333 uint32_t a_rwnd, old_rwnd;
4334 int win_probe_recovery = 0;
4335 int win_probe_recovered = 0;
4336 struct sctp_nets *net = NULL;
4339 uint8_t reneged_all = 0;
4340 uint8_t cmt_dac_flag;
4343 * we take any chance we can to service our queues since we cannot
4344 * get awoken when the socket is read from :<
4347 * Now perform the actual SACK handling: 1) Verify that it is not an
4348 * old sack, if so discard. 2) If there is nothing left in the send
4349 * queue (cum-ack is equal to last acked) then you have a duplicate
4350 * too, update any rwnd change and verify no timers are running.
4351 * then return. 3) Process any new consequtive data i.e. cum-ack
4352 * moved process these first and note that it moved. 4) Process any
4353 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4354 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4355 * sync up flightsizes and things, stop all timers and also check
4356 * for shutdown_pending state. If so then go ahead and send off the
4357 * shutdown. If in shutdown recv, send off the shutdown-ack and
4358 * start that timer, Ret. 9) Strike any non-acked things and do FR
4359 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4360 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4361 * if in shutdown_recv state.
4363 SCTP_TCB_LOCK_ASSERT(stcb);
4365 this_sack_lowest_newack = 0;
4366 SCTP_STAT_INCR(sctps_slowpath_sack);
4368 cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4369 #ifdef SCTP_ASOCLOG_OF_TSNS
4370 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4371 stcb->asoc.cumack_log_at++;
4372 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4373 stcb->asoc.cumack_log_at = 0;
4378 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4379 sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4380 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4382 old_rwnd = stcb->asoc.peers_rwnd;
4383 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4384 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4385 stcb->asoc.overall_error_count,
4387 SCTP_FROM_SCTP_INDATA,
4390 stcb->asoc.overall_error_count = 0;
4392 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4393 sctp_log_sack(asoc->last_acked_seq,
4400 if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4402 uint32_t *dupdata, dblock;
4404 for (i = 0; i < num_dup; i++) {
4405 dupdata = (uint32_t *) sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4406 sizeof(uint32_t), (uint8_t *) & dblock);
4407 if (dupdata == NULL) {
4410 sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4413 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4415 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4416 tp1 = TAILQ_LAST(&asoc->sent_queue,
4417 sctpchunk_listhead);
4418 send_s = tp1->rec.data.TSN_seq + 1;
4421 send_s = asoc->sending_seq;
4423 if (SCTP_TSN_GE(cum_ack, send_s)) {
4427 * no way, we have not even sent this TSN out yet.
4428 * Peer is hopelessly messed up with us.
4430 SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4433 SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1:%p\n",
4434 tp1->rec.data.TSN_seq, (void *)tp1);
4439 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4440 0, M_NOWAIT, 1, MT_DATA);
4442 struct sctp_paramhdr *ph;
4445 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4447 ph = mtod(oper, struct sctp_paramhdr *);
4448 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
4449 ph->param_length = htons(SCTP_BUF_LEN(oper));
4450 ippp = (uint32_t *) (ph + 1);
4451 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
4453 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4454 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
4458 /**********************/
4459 /* 1) check the range */
4460 /**********************/
4461 if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4462 /* acking something behind */
4465 /* update the Rwnd of the peer */
4466 if (TAILQ_EMPTY(&asoc->sent_queue) &&
4467 TAILQ_EMPTY(&asoc->send_queue) &&
4468 (asoc->stream_queue_cnt == 0)) {
4469 /* nothing left on send/sent and strmq */
4470 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4471 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4472 asoc->peers_rwnd, 0, 0, a_rwnd);
4474 asoc->peers_rwnd = a_rwnd;
4475 if (asoc->sent_queue_retran_cnt) {
4476 asoc->sent_queue_retran_cnt = 0;
4478 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4479 /* SWS sender side engages */
4480 asoc->peers_rwnd = 0;
4482 /* stop any timers */
4483 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4484 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4485 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4486 net->partial_bytes_acked = 0;
4487 net->flight_size = 0;
4489 asoc->total_flight = 0;
4490 asoc->total_flight_count = 0;
4494 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4495 * things. The total byte count acked is tracked in netAckSz AND
4496 * netAck2 is used to track the total bytes acked that are un-
4497 * amibguious and were never retransmitted. We track these on a per
4498 * destination address basis.
4500 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4501 if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4502 /* Drag along the window_tsn for cwr's */
4503 net->cwr_window_tsn = cum_ack;
4505 net->prev_cwnd = net->cwnd;
4510 * CMT: Reset CUC and Fast recovery algo variables before
4513 net->new_pseudo_cumack = 0;
4514 net->will_exit_fast_recovery = 0;
4515 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4516 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4519 /* process the new consecutive TSN first */
4520 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4521 if (SCTP_TSN_GE(last_tsn, tp1->rec.data.TSN_seq)) {
4522 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4524 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4526 * If it is less than ACKED, it is
4527 * now no-longer in flight. Higher
4528 * values may occur during marking
4530 if ((tp1->whoTo->dest_state &
4531 SCTP_ADDR_UNCONFIRMED) &&
4532 (tp1->snd_count < 2)) {
4534 * If there was no retran
4535 * and the address is
4536 * un-confirmed and we sent
4538 * sacked.. its confirmed,
4541 tp1->whoTo->dest_state &=
4542 ~SCTP_ADDR_UNCONFIRMED;
4544 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4545 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4546 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4547 tp1->whoTo->flight_size,
4549 (uintptr_t) tp1->whoTo,
4550 tp1->rec.data.TSN_seq);
4552 sctp_flight_size_decrease(tp1);
4553 sctp_total_flight_decrease(stcb, tp1);
4554 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4555 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4559 tp1->whoTo->net_ack += tp1->send_size;
4561 /* CMT SFR and DAC algos */
4562 this_sack_lowest_newack = tp1->rec.data.TSN_seq;
4563 tp1->whoTo->saw_newack = 1;
4565 if (tp1->snd_count < 2) {
4567 * True non-retransmited
4570 tp1->whoTo->net_ack2 +=
4573 /* update RTO too? */
4577 sctp_calculate_rto(stcb,
4579 &tp1->sent_rcv_time,
4580 sctp_align_safe_nocopy,
4581 SCTP_RTT_FROM_DATA);
4584 if (tp1->whoTo->rto_needed == 0) {
4585 tp1->whoTo->rto_needed = 1;
4591 * CMT: CUCv2 algorithm. From the
4592 * cumack'd TSNs, for each TSN being
4593 * acked for the first time, set the
4594 * following variables for the
4595 * corresp destination.
4596 * new_pseudo_cumack will trigger a
4598 * find_(rtx_)pseudo_cumack will
4599 * trigger search for the next
4600 * expected (rtx-)pseudo-cumack.
4602 tp1->whoTo->new_pseudo_cumack = 1;
4603 tp1->whoTo->find_pseudo_cumack = 1;
4604 tp1->whoTo->find_rtx_pseudo_cumack = 1;
4607 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4608 sctp_log_sack(asoc->last_acked_seq,
4610 tp1->rec.data.TSN_seq,
4613 SCTP_LOG_TSN_ACKED);
4615 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4616 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
4619 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4620 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4621 #ifdef SCTP_AUDITING_ENABLED
4622 sctp_audit_log(0xB3,
4623 (asoc->sent_queue_retran_cnt & 0x000000ff));
4626 if (tp1->rec.data.chunk_was_revoked) {
4627 /* deflate the cwnd */
4628 tp1->whoTo->cwnd -= tp1->book_size;
4629 tp1->rec.data.chunk_was_revoked = 0;
4631 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4632 tp1->sent = SCTP_DATAGRAM_ACKED;
4639 biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4640 /* always set this up to cum-ack */
4641 asoc->this_sack_highest_gap = last_tsn;
4643 if ((num_seg > 0) || (num_nr_seg > 0)) {
4646 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4647 * to be greater than the cumack. Also reset saw_newack to 0
4650 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4651 net->saw_newack = 0;
4652 net->this_sack_highest_newack = last_tsn;
4656 * thisSackHighestGap will increase while handling NEW
4657 * segments this_sack_highest_newack will increase while
4658 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4659 * used for CMT DAC algo. saw_newack will also change.
4661 if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4662 &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4663 num_seg, num_nr_seg, &rto_ok)) {
4666 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4668 * validate the biggest_tsn_acked in the gap acks if
4669 * strict adherence is wanted.
4671 if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4673 * peer is either confused or we are under
4674 * attack. We must abort.
4676 SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4677 biggest_tsn_acked, send_s);
4682 /*******************************************/
4683 /* cancel ALL T3-send timer if accum moved */
4684 /*******************************************/
4685 if (asoc->sctp_cmt_on_off > 0) {
4686 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4687 if (net->new_pseudo_cumack)
4688 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4690 SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4695 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4696 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4697 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
4701 /********************************************/
4702 /* drop the acked chunks from the sentqueue */
4703 /********************************************/
4704 asoc->last_acked_seq = cum_ack;
4706 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4707 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cum_ack)) {
4710 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4711 if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
4712 asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--;
4715 panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
4719 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4720 if (PR_SCTP_ENABLED(tp1->flags)) {
4721 if (asoc->pr_sctp_cnt != 0)
4722 asoc->pr_sctp_cnt--;
4724 asoc->sent_queue_cnt--;
4726 /* sa_ignore NO_NULL_CHK */
4727 sctp_free_bufspace(stcb, asoc, tp1, 1);
4728 sctp_m_freem(tp1->data);
4730 if (asoc->peer_supports_prsctp && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4731 asoc->sent_queue_cnt_removeable--;
4734 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4735 sctp_log_sack(asoc->last_acked_seq,
4737 tp1->rec.data.TSN_seq,
4740 SCTP_LOG_FREE_SENT);
4742 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4745 if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4747 panic("Warning flight size is postive and should be 0");
4749 SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4750 asoc->total_flight);
4752 asoc->total_flight = 0;
4754 /* sa_ignore NO_NULL_CHK */
4755 if ((wake_him) && (stcb->sctp_socket)) {
4756 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4760 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4761 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4762 sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
4764 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4765 so = SCTP_INP_SO(stcb->sctp_ep);
4766 atomic_add_int(&stcb->asoc.refcnt, 1);
4767 SCTP_TCB_UNLOCK(stcb);
4768 SCTP_SOCKET_LOCK(so, 1);
4769 SCTP_TCB_LOCK(stcb);
4770 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4771 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4772 /* assoc was freed while we were unlocked */
4773 SCTP_SOCKET_UNLOCK(so, 1);
4777 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4778 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4779 SCTP_SOCKET_UNLOCK(so, 1);
4782 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4783 sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
4787 if (asoc->fast_retran_loss_recovery && accum_moved) {
4788 if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4789 /* Setup so we will exit RFC2582 fast recovery */
4790 will_exit_fast_recovery = 1;
4794 * Check for revoked fragments:
4796 * if Previous sack - Had no frags then we can't have any revoked if
4797 * Previous sack - Had frag's then - If we now have frags aka
4798 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4799 * some of them. else - The peer revoked all ACKED fragments, since
4800 * we had some before and now we have NONE.
4804 sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4805 asoc->saw_sack_with_frags = 1;
4806 } else if (asoc->saw_sack_with_frags) {
4807 int cnt_revoked = 0;
4809 /* Peer revoked all dg's marked or acked */
4810 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4811 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4812 tp1->sent = SCTP_DATAGRAM_SENT;
4813 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4814 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4815 tp1->whoTo->flight_size,
4817 (uintptr_t) tp1->whoTo,
4818 tp1->rec.data.TSN_seq);
4820 sctp_flight_size_increase(tp1);
4821 sctp_total_flight_increase(stcb, tp1);
4822 tp1->rec.data.chunk_was_revoked = 1;
4824 * To ensure that this increase in
4825 * flightsize, which is artificial, does not
4826 * throttle the sender, we also increase the
4827 * cwnd artificially.
4829 tp1->whoTo->cwnd += tp1->book_size;
4836 asoc->saw_sack_with_frags = 0;
4839 asoc->saw_sack_with_nr_frags = 1;
4841 asoc->saw_sack_with_nr_frags = 0;
4843 /* JRS - Use the congestion control given in the CC module */
4844 if (ecne_seen == 0) {
4845 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4846 if (net->net_ack2 > 0) {
4848 * Karn's rule applies to clearing error
4849 * count, this is optional.
4851 net->error_count = 0;
4852 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4853 /* addr came good */
4854 net->dest_state |= SCTP_ADDR_REACHABLE;
4855 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4856 0, (void *)net, SCTP_SO_NOT_LOCKED);
4858 if (net == stcb->asoc.primary_destination) {
4859 if (stcb->asoc.alternate) {
4861 * release the alternate,
4864 sctp_free_remote_addr(stcb->asoc.alternate);
4865 stcb->asoc.alternate = NULL;
4868 if (net->dest_state & SCTP_ADDR_PF) {
4869 net->dest_state &= ~SCTP_ADDR_PF;
4870 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_3);
4871 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4872 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4873 /* Done with this net */
4876 /* restore any doubled timers */
4877 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4878 if (net->RTO < stcb->asoc.minrto) {
4879 net->RTO = stcb->asoc.minrto;
4881 if (net->RTO > stcb->asoc.maxrto) {
4882 net->RTO = stcb->asoc.maxrto;
4886 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4888 if (TAILQ_EMPTY(&asoc->sent_queue)) {
4889 /* nothing left in-flight */
4890 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4891 /* stop all timers */
4892 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4893 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4894 net->flight_size = 0;
4895 net->partial_bytes_acked = 0;
4897 asoc->total_flight = 0;
4898 asoc->total_flight_count = 0;
4900 /**********************************/
4901 /* Now what about shutdown issues */
4902 /**********************************/
4903 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4904 /* nothing left on sendqueue.. consider done */
4905 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4906 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4907 asoc->peers_rwnd, 0, 0, a_rwnd);
4909 asoc->peers_rwnd = a_rwnd;
4910 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4911 /* SWS sender side engages */
4912 asoc->peers_rwnd = 0;
4915 if ((asoc->stream_queue_cnt == 1) &&
4916 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4917 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4918 (asoc->locked_on_sending)
4920 struct sctp_stream_queue_pending *sp;
4923 * I may be in a state where we got all across.. but
4924 * cannot write more due to a shutdown... we abort
4925 * since the user did not indicate EOR in this case.
4927 sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4929 if ((sp) && (sp->length == 0)) {
4930 asoc->locked_on_sending = NULL;
4931 if (sp->msg_is_complete) {
4932 asoc->stream_queue_cnt--;
4934 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4935 asoc->stream_queue_cnt--;
4939 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4940 (asoc->stream_queue_cnt == 0)) {
4941 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4942 /* Need to abort here */
4948 oper = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
4949 0, M_NOWAIT, 1, MT_DATA);
4951 struct sctp_paramhdr *ph;
4953 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr);
4954 ph = mtod(oper, struct sctp_paramhdr *);
4955 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
4956 ph->param_length = htons(SCTP_BUF_LEN(oper));
4958 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31;
4959 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
4962 struct sctp_nets *netp;
4964 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4965 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4966 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4968 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4969 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4970 sctp_stop_timers_for_shutdown(stcb);
4971 if (asoc->alternate) {
4972 netp = asoc->alternate;
4974 netp = asoc->primary_destination;
4976 sctp_send_shutdown(stcb, netp);
4977 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4978 stcb->sctp_ep, stcb, netp);
4979 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4980 stcb->sctp_ep, stcb, netp);
4983 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4984 (asoc->stream_queue_cnt == 0)) {
4985 struct sctp_nets *netp;
4987 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4990 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4991 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4992 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4993 sctp_stop_timers_for_shutdown(stcb);
4994 if (asoc->alternate) {
4995 netp = asoc->alternate;
4997 netp = asoc->primary_destination;
4999 sctp_send_shutdown_ack(stcb, netp);
5000 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
5001 stcb->sctp_ep, stcb, netp);
5006 * Now here we are going to recycle net_ack for a different use...
5009 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5014 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
5015 * to be done. Setting this_sack_lowest_newack to the cum_ack will
5016 * automatically ensure that.
5018 if ((asoc->sctp_cmt_on_off > 0) &&
5019 SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
5020 (cmt_dac_flag == 0)) {
5021 this_sack_lowest_newack = cum_ack;
5023 if ((num_seg > 0) || (num_nr_seg > 0)) {
5024 sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
5025 biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
5027 /* JRS - Use the congestion control given in the CC module */
5028 asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
5030 /* Now are we exiting loss recovery ? */
5031 if (will_exit_fast_recovery) {
5032 /* Ok, we must exit fast recovery */
5033 asoc->fast_retran_loss_recovery = 0;
5035 if ((asoc->sat_t3_loss_recovery) &&
5036 SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
5037 /* end satellite t3 loss recovery */
5038 asoc->sat_t3_loss_recovery = 0;
5043 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5044 if (net->will_exit_fast_recovery) {
5045 /* Ok, we must exit fast recovery */
5046 net->fast_retran_loss_recovery = 0;
5050 /* Adjust and set the new rwnd value */
5051 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5052 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5053 asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
5055 asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5056 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
5057 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5058 /* SWS sender side engages */
5059 asoc->peers_rwnd = 0;
5061 if (asoc->peers_rwnd > old_rwnd) {
5062 win_probe_recovery = 1;
5065 * Now we must setup so we have a timer up for anyone with
5071 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5072 if (win_probe_recovery && (net->window_probe)) {
5073 win_probe_recovered = 1;
5075 * Find first chunk that was used with
5076 * window probe and clear the event. Put
5077 * it back into the send queue as if has
5080 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5081 if (tp1->window_probe) {
5082 sctp_window_probe_recovery(stcb, asoc, tp1);
5087 if (net->flight_size) {
5089 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5090 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5091 stcb->sctp_ep, stcb, net);
5093 if (net->window_probe) {
5094 net->window_probe = 0;
5097 if (net->window_probe) {
5099 * In window probes we must assure a timer
5100 * is still running there
5102 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5103 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5104 stcb->sctp_ep, stcb, net);
5107 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5108 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5110 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
5115 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5116 (asoc->sent_queue_retran_cnt == 0) &&
5117 (win_probe_recovered == 0) &&
5120 * huh, this should not happen unless all packets are
5121 * PR-SCTP and marked to skip of course.
5123 if (sctp_fs_audit(asoc)) {
5124 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5125 net->flight_size = 0;
5127 asoc->total_flight = 0;
5128 asoc->total_flight_count = 0;
5129 asoc->sent_queue_retran_cnt = 0;
5130 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5131 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5132 sctp_flight_size_increase(tp1);
5133 sctp_total_flight_increase(stcb, tp1);
5134 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5135 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
5142 /*********************************************/
5143 /* Here we perform PR-SCTP procedures */
5145 /*********************************************/
5146 /* C1. update advancedPeerAckPoint */
5147 if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
5148 asoc->advanced_peer_ack_point = cum_ack;
5150 /* C2. try to further move advancedPeerAckPoint ahead */
5151 if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
5152 struct sctp_tmit_chunk *lchk;
5153 uint32_t old_adv_peer_ack_point;
5155 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5156 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5157 /* C3. See if we need to send a Fwd-TSN */
5158 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
5160 * ISSUE with ECN, see FWD-TSN processing.
5162 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5163 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5164 0xee, cum_ack, asoc->advanced_peer_ack_point,
5165 old_adv_peer_ack_point);
5167 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
5168 send_forward_tsn(stcb, asoc);
5170 /* try to FR fwd-tsn's that get lost too */
5171 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
5172 send_forward_tsn(stcb, asoc);
5177 /* Assure a timer is up */
5178 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5179 stcb->sctp_ep, stcb, lchk->whoTo);
5182 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5183 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5185 stcb->asoc.peers_rwnd,
5186 stcb->asoc.total_flight,
5187 stcb->asoc.total_output_queue_size);
5192 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
5195 uint32_t cum_ack, a_rwnd;
5197 cum_ack = ntohl(cp->cumulative_tsn_ack);
5198 /* Arrange so a_rwnd does NOT change */
5199 a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5201 /* Now call the express sack handling */
5202 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
5206 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5207 struct sctp_stream_in *strmin)
5209 struct sctp_queued_to_read *ctl, *nctl;
5210 struct sctp_association *asoc;
5214 tt = strmin->last_sequence_delivered;
5216 * First deliver anything prior to and including the stream no that
5219 TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next, nctl) {
5220 if (SCTP_SSN_GE(tt, ctl->sinfo_ssn)) {
5221 /* this is deliverable now */
5222 TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5223 /* subtract pending on streams */
5224 asoc->size_on_all_streams -= ctl->length;
5225 sctp_ucount_decr(asoc->cnt_on_all_streams);
5226 /* deliver it to at least the delivery-q */
5227 if (stcb->sctp_socket) {
5228 sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5229 sctp_add_to_readq(stcb->sctp_ep, stcb,
5231 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5234 /* no more delivery now. */
5239 * now we must deliver things in queue the normal way if any are
5242 tt = strmin->last_sequence_delivered + 1;
5243 TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next, nctl) {
5244 if (tt == ctl->sinfo_ssn) {
5245 /* this is deliverable now */
5246 TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5247 /* subtract pending on streams */
5248 asoc->size_on_all_streams -= ctl->length;
5249 sctp_ucount_decr(asoc->cnt_on_all_streams);
5250 /* deliver it to at least the delivery-q */
5251 strmin->last_sequence_delivered = ctl->sinfo_ssn;
5252 if (stcb->sctp_socket) {
5253 sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5254 sctp_add_to_readq(stcb->sctp_ep, stcb,
5256 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5259 tt = strmin->last_sequence_delivered + 1;
5267 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5268 struct sctp_association *asoc,
5269 uint16_t stream, uint16_t seq)
5271 struct sctp_tmit_chunk *chk, *nchk;
5273 /* For each one on here see if we need to toss it */
5275 * For now large messages held on the reasmqueue that are complete
5276 * will be tossed too. We could in theory do more work to spin
5277 * through and stop after dumping one msg aka seeing the start of a
5278 * new msg at the head, and call the delivery function... to see if
5279 * it can be delivered... But for now we just dump everything on the
5282 TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
5284 * Do not toss it if on a different stream or marked for
5285 * unordered delivery in which case the stream sequence
5286 * number has no meaning.
5288 if ((chk->rec.data.stream_number != stream) ||
5289 ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == SCTP_DATA_UNORDERED)) {
5292 if (chk->rec.data.stream_seq == seq) {
5293 /* It needs to be tossed */
5294 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5295 if (SCTP_TSN_GT(chk->rec.data.TSN_seq, asoc->tsn_last_delivered)) {
5296 asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
5297 asoc->str_of_pdapi = chk->rec.data.stream_number;
5298 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
5299 asoc->fragment_flags = chk->rec.data.rcv_flags;
5301 asoc->size_on_reasm_queue -= chk->send_size;
5302 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5304 /* Clear up any stream problem */
5305 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != SCTP_DATA_UNORDERED &&
5306 SCTP_SSN_GT(chk->rec.data.stream_seq, asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered)) {
5308 * We must dump forward this streams
5309 * sequence number if the chunk is not
5310 * unordered that is being skipped. There is
5311 * a chance that if the peer does not
5312 * include the last fragment in its FWD-TSN
5313 * we WILL have a problem here since you
5314 * would have a partial chunk in queue that
5315 * may not be deliverable. Also if a Partial
5316 * delivery API as started the user may get
5317 * a partial chunk. The next read returning
5318 * a new chunk... really ugly but I see no
5319 * way around it! Maybe a notify??
5321 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = chk->rec.data.stream_seq;
5324 sctp_m_freem(chk->data);
5327 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5328 } else if (SCTP_SSN_GT(chk->rec.data.stream_seq, seq)) {
5330 * If the stream_seq is > than the purging one, we
5340 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5341 struct sctp_forward_tsn_chunk *fwd,
5342 int *abort_flag, struct mbuf *m, int offset)
5344 /* The pr-sctp fwd tsn */
5346 * here we will perform all the data receiver side steps for
5347 * processing FwdTSN, as required in by pr-sctp draft:
5349 * Assume we get FwdTSN(x):
5351 * 1) update local cumTSN to x 2) try to further advance cumTSN to x +
5352 * others we have 3) examine and update re-ordering queue on
5353 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5354 * report where we are.
5356 struct sctp_association *asoc;
5357 uint32_t new_cum_tsn, gap;
5358 unsigned int i, fwd_sz, m_size;
5360 struct sctp_stream_in *strm;
5361 struct sctp_tmit_chunk *chk, *nchk;
5362 struct sctp_queued_to_read *ctl, *sv;
5365 if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5366 SCTPDBG(SCTP_DEBUG_INDATA1,
5367 "Bad size too small/big fwd-tsn\n");
5370 m_size = (stcb->asoc.mapping_array_size << 3);
5371 /*************************************************************/
5372 /* 1. Here we update local cumTSN and shift the bitmap array */
5373 /*************************************************************/
5374 new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5376 if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5377 /* Already got there ... */
5381 * now we know the new TSN is more advanced, let's find the actual
5384 SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5385 asoc->cumulative_tsn = new_cum_tsn;
5386 if (gap >= m_size) {
5387 if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5391 * out of range (of single byte chunks in the rwnd I
5392 * give out). This must be an attacker.
5395 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
5396 0, M_NOWAIT, 1, MT_DATA);
5398 struct sctp_paramhdr *ph;
5401 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
5402 (sizeof(uint32_t) * 3);
5403 ph = mtod(oper, struct sctp_paramhdr *);
5404 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
5405 ph->param_length = htons(SCTP_BUF_LEN(oper));
5406 ippp = (uint32_t *) (ph + 1);
5407 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_33);
5409 *ippp = asoc->highest_tsn_inside_map;
5411 *ippp = new_cum_tsn;
5413 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
5414 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
5417 SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5419 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5420 asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5421 asoc->highest_tsn_inside_map = new_cum_tsn;
5423 memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5424 asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5426 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5427 sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5430 SCTP_TCB_LOCK_ASSERT(stcb);
5431 for (i = 0; i <= gap; i++) {
5432 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5433 !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5434 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5435 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5436 asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5441 /*************************************************************/
5442 /* 2. Clear up re-assembly queue */
5443 /*************************************************************/
5445 * First service it if pd-api is up, just in case we can progress it
5448 if (asoc->fragmented_delivery_inprogress) {
5449 sctp_service_reassembly(stcb, asoc);
5451 /* For each one on here see if we need to toss it */
5453 * For now large messages held on the reasmqueue that are complete
5454 * will be tossed too. We could in theory do more work to spin
5455 * through and stop after dumping one msg aka seeing the start of a
5456 * new msg at the head, and call the delivery function... to see if
5457 * it can be delivered... But for now we just dump everything on the
5460 TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
5461 if (SCTP_TSN_GE(new_cum_tsn, chk->rec.data.TSN_seq)) {
5462 /* It needs to be tossed */
5463 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5464 if (SCTP_TSN_GT(chk->rec.data.TSN_seq, asoc->tsn_last_delivered)) {
5465 asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
5466 asoc->str_of_pdapi = chk->rec.data.stream_number;
5467 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
5468 asoc->fragment_flags = chk->rec.data.rcv_flags;
5470 asoc->size_on_reasm_queue -= chk->send_size;
5471 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5473 /* Clear up any stream problem */
5474 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != SCTP_DATA_UNORDERED &&
5475 SCTP_SSN_GT(chk->rec.data.stream_seq, asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered)) {
5477 * We must dump forward this streams
5478 * sequence number if the chunk is not
5479 * unordered that is being skipped. There is
5480 * a chance that if the peer does not
5481 * include the last fragment in its FWD-TSN
5482 * we WILL have a problem here since you
5483 * would have a partial chunk in queue that
5484 * may not be deliverable. Also if a Partial
5485 * delivery API as started the user may get
5486 * a partial chunk. The next read returning
5487 * a new chunk... really ugly but I see no
5488 * way around it! Maybe a notify??
5490 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = chk->rec.data.stream_seq;
5493 sctp_m_freem(chk->data);
5496 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5499 * Ok we have gone beyond the end of the fwd-tsn's
5505 /*******************************************************/
5506 /* 3. Update the PR-stream re-ordering queues and fix */
5507 /* delivery issues as needed. */
5508 /*******************************************************/
5509 fwd_sz -= sizeof(*fwd);
5512 unsigned int num_str;
5513 struct sctp_strseq *stseq, strseqbuf;
5515 offset += sizeof(*fwd);
5517 SCTP_INP_READ_LOCK(stcb->sctp_ep);
5518 num_str = fwd_sz / sizeof(struct sctp_strseq);
5519 for (i = 0; i < num_str; i++) {
5522 stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5523 sizeof(struct sctp_strseq),
5524 (uint8_t *) & strseqbuf);
5525 offset += sizeof(struct sctp_strseq);
5526 if (stseq == NULL) {
5530 st = ntohs(stseq->stream);
5532 st = ntohs(stseq->sequence);
5533 stseq->sequence = st;
5538 * Ok we now look for the stream/seq on the read
5539 * queue where its not all delivered. If we find it
5540 * we transmute the read entry into a PDI_ABORTED.
5542 if (stseq->stream >= asoc->streamincnt) {
5543 /* screwed up streams, stop! */
5546 if ((asoc->str_of_pdapi == stseq->stream) &&
5547 (asoc->ssn_of_pdapi == stseq->sequence)) {
5549 * If this is the one we were partially
5550 * delivering now then we no longer are.
5551 * Note this will change with the reassembly
5554 asoc->fragmented_delivery_inprogress = 0;
5556 sctp_flush_reassm_for_str_seq(stcb, asoc, stseq->stream, stseq->sequence);
5557 TAILQ_FOREACH(ctl, &stcb->sctp_ep->read_queue, next) {
5558 if ((ctl->sinfo_stream == stseq->stream) &&
5559 (ctl->sinfo_ssn == stseq->sequence)) {
5560 str_seq = (stseq->stream << 16) | stseq->sequence;
5562 ctl->pdapi_aborted = 1;
5563 sv = stcb->asoc.control_pdapi;
5564 stcb->asoc.control_pdapi = ctl;
5565 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5567 SCTP_PARTIAL_DELIVERY_ABORTED,
5569 SCTP_SO_NOT_LOCKED);
5570 stcb->asoc.control_pdapi = sv;
5572 } else if ((ctl->sinfo_stream == stseq->stream) &&
5573 SCTP_SSN_GT(ctl->sinfo_ssn, stseq->sequence)) {
5574 /* We are past our victim SSN */
5578 strm = &asoc->strmin[stseq->stream];
5579 if (SCTP_SSN_GT(stseq->sequence, strm->last_sequence_delivered)) {
5580 /* Update the sequence number */
5581 strm->last_sequence_delivered = stseq->sequence;
5583 /* now kick the stream the new way */
5584 /* sa_ignore NO_NULL_CHK */
5585 sctp_kick_prsctp_reorder_queue(stcb, strm);
5587 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5590 * Now slide thing forward.
5592 sctp_slide_mapping_arrays(stcb);
5594 if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
5595 /* now lets kick out and check for more fragmented delivery */
5596 /* sa_ignore NO_NULL_CHK */
5597 sctp_deliver_reasm_check(stcb, &stcb->asoc);