2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
9 * a) Redistributions of source code must retain the above copyright notice,
10 * this list of conditions and the following disclaimer.
12 * b) Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the distribution.
16 * c) Neither the name of Cisco Systems, Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_var.h>
38 #include <netinet/sctp_sysctl.h>
39 #include <netinet/sctp_pcb.h>
40 #include <netinet/sctp_header.h>
41 #include <netinet/sctputil.h>
42 #include <netinet/sctp_output.h>
43 #include <netinet/sctp_input.h>
44 #include <netinet/sctp_indata.h>
45 #include <netinet/sctp_uio.h>
46 #include <netinet/sctp_timer.h>
50 * NOTES: On the outbound side of things I need to check the sack timer to
51 * see if I should generate a sack into the chunk queue (if I have data to
52 * send that is and will be sending it .. for bundling.
54 * The callback in sctp_usrreq.c will get called when the socket is read from.
55 * This will cause sctp_service_queues() to get called on the top entry in
60 sctp_set_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
62 asoc->my_rwnd = sctp_calc_rwnd(stcb, asoc);
65 /* Calculate what the rwnd would be */
67 sctp_calc_rwnd(struct sctp_tcb *stcb, struct sctp_association *asoc)
72 * This is really set wrong with respect to a 1-2-m socket. Since
73 * the sb_cc is the count that everyone as put up. When we re-write
74 * sctp_soreceive then we will fix this so that ONLY this
75 * associations data is taken into account.
77 if (stcb->sctp_socket == NULL)
80 if (stcb->asoc.sb_cc == 0 &&
81 asoc->size_on_reasm_queue == 0 &&
82 asoc->size_on_all_streams == 0) {
83 /* Full rwnd granted */
84 calc = max(SCTP_SB_LIMIT_RCV(stcb->sctp_socket), SCTP_MINIMAL_RWND);
87 /* get actual space */
88 calc = (uint32_t) sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv);
91 * take out what has NOT been put on socket queue and we yet hold
94 calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_reasm_queue +
95 asoc->cnt_on_reasm_queue * MSIZE));
96 calc = sctp_sbspace_sub(calc, (uint32_t) (asoc->size_on_all_streams +
97 asoc->cnt_on_all_streams * MSIZE));
103 /* what is the overhead of all these rwnd's */
104 calc = sctp_sbspace_sub(calc, stcb->asoc.my_rwnd_control_len);
106 * If the window gets too small due to ctrl-stuff, reduce it to 1,
107 * even it is 0. SWS engaged
109 if (calc < stcb->asoc.my_rwnd_control_len) {
118 * Build out our readq entry based on the incoming packet.
120 struct sctp_queued_to_read *
121 sctp_build_readq_entry(struct sctp_tcb *stcb,
122 struct sctp_nets *net,
123 uint32_t tsn, uint32_t ppid,
124 uint32_t context, uint16_t stream_no,
125 uint16_t stream_seq, uint8_t flags,
128 struct sctp_queued_to_read *read_queue_e = NULL;
130 sctp_alloc_a_readq(stcb, read_queue_e);
131 if (read_queue_e == NULL) {
134 read_queue_e->sinfo_stream = stream_no;
135 read_queue_e->sinfo_ssn = stream_seq;
136 read_queue_e->sinfo_flags = (flags << 8);
137 read_queue_e->sinfo_ppid = ppid;
138 read_queue_e->sinfo_context = context;
139 read_queue_e->sinfo_timetolive = 0;
140 read_queue_e->sinfo_tsn = tsn;
141 read_queue_e->sinfo_cumtsn = tsn;
142 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
143 read_queue_e->whoFrom = net;
144 read_queue_e->length = 0;
145 atomic_add_int(&net->ref_count, 1);
146 read_queue_e->data = dm;
147 read_queue_e->spec_flags = 0;
148 read_queue_e->tail_mbuf = NULL;
149 read_queue_e->aux_data = NULL;
150 read_queue_e->stcb = stcb;
151 read_queue_e->port_from = stcb->rport;
152 read_queue_e->do_not_ref_stcb = 0;
153 read_queue_e->end_added = 0;
154 read_queue_e->some_taken = 0;
155 read_queue_e->pdapi_aborted = 0;
157 return (read_queue_e);
162 * Build out our readq entry based on the incoming packet.
164 static struct sctp_queued_to_read *
165 sctp_build_readq_entry_chk(struct sctp_tcb *stcb,
166 struct sctp_tmit_chunk *chk)
168 struct sctp_queued_to_read *read_queue_e = NULL;
170 sctp_alloc_a_readq(stcb, read_queue_e);
171 if (read_queue_e == NULL) {
174 read_queue_e->sinfo_stream = chk->rec.data.stream_number;
175 read_queue_e->sinfo_ssn = chk->rec.data.stream_seq;
176 read_queue_e->sinfo_flags = (chk->rec.data.rcv_flags << 8);
177 read_queue_e->sinfo_ppid = chk->rec.data.payloadtype;
178 read_queue_e->sinfo_context = stcb->asoc.context;
179 read_queue_e->sinfo_timetolive = 0;
180 read_queue_e->sinfo_tsn = chk->rec.data.TSN_seq;
181 read_queue_e->sinfo_cumtsn = chk->rec.data.TSN_seq;
182 read_queue_e->sinfo_assoc_id = sctp_get_associd(stcb);
183 read_queue_e->whoFrom = chk->whoTo;
184 read_queue_e->aux_data = NULL;
185 read_queue_e->length = 0;
186 atomic_add_int(&chk->whoTo->ref_count, 1);
187 read_queue_e->data = chk->data;
188 read_queue_e->tail_mbuf = NULL;
189 read_queue_e->stcb = stcb;
190 read_queue_e->port_from = stcb->rport;
191 read_queue_e->spec_flags = 0;
192 read_queue_e->do_not_ref_stcb = 0;
193 read_queue_e->end_added = 0;
194 read_queue_e->some_taken = 0;
195 read_queue_e->pdapi_aborted = 0;
197 return (read_queue_e);
202 sctp_build_ctl_nchunk(struct sctp_inpcb *inp, struct sctp_sndrcvinfo *sinfo)
204 struct sctp_extrcvinfo *seinfo;
205 struct sctp_sndrcvinfo *outinfo;
206 struct sctp_rcvinfo *rcvinfo;
207 struct sctp_nxtinfo *nxtinfo;
214 if (sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT) &&
215 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVRCVINFO) &&
216 sctp_is_feature_off(inp, SCTP_PCB_FLAGS_RECVNXTINFO)) {
217 /* user does not want any ancillary data */
221 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
222 len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
224 seinfo = (struct sctp_extrcvinfo *)sinfo;
225 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVNXTINFO) &&
226 (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_AVAIL)) {
228 len += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
232 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
233 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_EXT_RCVINFO)) {
235 len += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
238 len += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
244 ret = sctp_get_mbuf_for_msg(len, 0, M_DONTWAIT, 1, MT_DATA);
249 SCTP_BUF_LEN(ret) = 0;
251 /* We need a CMSG header followed by the struct */
252 cmh = mtod(ret, struct cmsghdr *);
253 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVRCVINFO)) {
254 cmh->cmsg_level = IPPROTO_SCTP;
255 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_rcvinfo));
256 cmh->cmsg_type = SCTP_RCVINFO;
257 rcvinfo = (struct sctp_rcvinfo *)CMSG_DATA(cmh);
258 rcvinfo->rcv_sid = sinfo->sinfo_stream;
259 rcvinfo->rcv_ssn = sinfo->sinfo_ssn;
260 rcvinfo->rcv_flags = sinfo->sinfo_flags;
261 rcvinfo->rcv_ppid = sinfo->sinfo_ppid;
262 rcvinfo->rcv_tsn = sinfo->sinfo_tsn;
263 rcvinfo->rcv_cumtsn = sinfo->sinfo_cumtsn;
264 rcvinfo->rcv_context = sinfo->sinfo_context;
265 rcvinfo->rcv_assoc_id = sinfo->sinfo_assoc_id;
266 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_rcvinfo)));
267 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_rcvinfo));
270 cmh->cmsg_level = IPPROTO_SCTP;
271 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_nxtinfo));
272 cmh->cmsg_type = SCTP_NXTINFO;
273 nxtinfo = (struct sctp_nxtinfo *)CMSG_DATA(cmh);
274 nxtinfo->nxt_sid = seinfo->sreinfo_next_stream;
275 nxtinfo->nxt_flags = 0;
276 if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_IS_UNORDERED) {
277 nxtinfo->nxt_flags |= SCTP_UNORDERED;
279 if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_IS_NOTIFICATION) {
280 nxtinfo->nxt_flags |= SCTP_NOTIFICATION;
282 if (seinfo->sreinfo_next_flags & SCTP_NEXT_MSG_ISCOMPLETE) {
283 nxtinfo->nxt_flags |= SCTP_COMPLETE;
285 nxtinfo->nxt_ppid = seinfo->sreinfo_next_ppid;
286 nxtinfo->nxt_length = seinfo->sreinfo_next_length;
287 nxtinfo->nxt_assoc_id = seinfo->sreinfo_next_aid;
288 cmh = (struct cmsghdr *)((caddr_t)cmh + CMSG_SPACE(sizeof(struct sctp_nxtinfo)));
289 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_nxtinfo));
291 if (sctp_is_feature_on(inp, SCTP_PCB_FLAGS_RECVDATAIOEVNT)) {
292 cmh->cmsg_level = IPPROTO_SCTP;
293 outinfo = (struct sctp_sndrcvinfo *)CMSG_DATA(cmh);
295 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_extrcvinfo));
296 cmh->cmsg_type = SCTP_EXTRCV;
297 memcpy(outinfo, sinfo, sizeof(struct sctp_extrcvinfo));
298 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_extrcvinfo));
300 cmh->cmsg_len = CMSG_LEN(sizeof(struct sctp_sndrcvinfo));
301 cmh->cmsg_type = SCTP_SNDRCV;
303 SCTP_BUF_LEN(ret) += CMSG_SPACE(sizeof(struct sctp_sndrcvinfo));
311 sctp_mark_non_revokable(struct sctp_association *asoc, uint32_t tsn)
313 uint32_t gap, i, cumackp1;
316 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
319 cumackp1 = asoc->cumulative_tsn + 1;
320 if (SCTP_TSN_GT(cumackp1, tsn)) {
322 * this tsn is behind the cum ack and thus we don't need to
323 * worry about it being moved from one to the other.
327 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
328 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
329 SCTP_PRINTF("gap:%x tsn:%x\n", gap, tsn);
330 sctp_print_mapping_array(asoc);
332 panic("Things are really messed up now!!");
335 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
336 SCTP_UNSET_TSN_PRESENT(asoc->mapping_array, gap);
337 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
338 asoc->highest_tsn_inside_nr_map = tsn;
340 if (tsn == asoc->highest_tsn_inside_map) {
341 /* We must back down to see what the new highest is */
342 for (i = tsn - 1; SCTP_TSN_GE(i, asoc->mapping_array_base_tsn); i--) {
343 SCTP_CALC_TSN_TO_GAP(gap, i, asoc->mapping_array_base_tsn);
344 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap)) {
345 asoc->highest_tsn_inside_map = i;
351 asoc->highest_tsn_inside_map = asoc->mapping_array_base_tsn - 1;
358 * We are delivering currently from the reassembly queue. We must continue to
359 * deliver until we either: 1) run out of space. 2) run out of sequential
360 * TSN's 3) hit the SCTP_DATA_LAST_FRAG flag.
363 sctp_service_reassembly(struct sctp_tcb *stcb, struct sctp_association *asoc)
365 struct sctp_tmit_chunk *chk, *nchk;
370 struct sctp_queued_to_read *control, *ctl, *nctl;
375 cntDel = stream_no = 0;
376 if ((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
377 (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) ||
378 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET)) {
379 /* socket above is long gone or going.. */
381 asoc->fragmented_delivery_inprogress = 0;
382 TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
383 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
384 asoc->size_on_reasm_queue -= chk->send_size;
385 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
387 * Lose the data pointer, since its in the socket
391 sctp_m_freem(chk->data);
394 /* Now free the address and data */
395 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
396 /* sa_ignore FREED_MEMORY */
400 SCTP_TCB_LOCK_ASSERT(stcb);
401 TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
402 if (chk->rec.data.TSN_seq != (asoc->tsn_last_delivered + 1)) {
403 /* Can't deliver more :< */
406 stream_no = chk->rec.data.stream_number;
407 nxt_todel = asoc->strmin[stream_no].last_sequence_delivered + 1;
408 if (nxt_todel != chk->rec.data.stream_seq &&
409 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
411 * Not the next sequence to deliver in its stream OR
416 if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
418 control = sctp_build_readq_entry_chk(stcb, chk);
419 if (control == NULL) {
423 /* save it off for our future deliveries */
424 stcb->asoc.control_pdapi = control;
425 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
429 sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
430 sctp_add_to_readq(stcb->sctp_ep,
431 stcb, control, &stcb->sctp_socket->so_rcv, end,
432 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
435 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG)
439 sctp_mark_non_revokable(asoc, chk->rec.data.TSN_seq);
440 if (sctp_append_to_readq(stcb->sctp_ep, stcb,
441 stcb->asoc.control_pdapi,
442 chk->data, end, chk->rec.data.TSN_seq,
443 &stcb->sctp_socket->so_rcv)) {
445 * something is very wrong, either
446 * control_pdapi is NULL, or the tail_mbuf
447 * is corrupt, or there is a EOM already on
450 if (stcb->asoc.state & SCTP_STATE_ABOUT_TO_BE_FREED) {
454 if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
455 panic("This should not happen control_pdapi NULL?");
457 /* if we did not panic, it was a EOM */
458 panic("Bad chunking ??");
460 if ((stcb->asoc.control_pdapi == NULL) || (stcb->asoc.control_pdapi->tail_mbuf == NULL)) {
461 SCTP_PRINTF("This should not happen control_pdapi NULL?\n");
463 SCTP_PRINTF("Bad chunking ??\n");
464 SCTP_PRINTF("Dumping re-assembly queue this will probably hose the association\n");
472 /* pull it we did it */
473 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
474 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
475 asoc->fragmented_delivery_inprogress = 0;
476 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0) {
477 asoc->strmin[stream_no].last_sequence_delivered++;
479 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
480 SCTP_STAT_INCR_COUNTER64(sctps_reasmusrmsgs);
482 } else if (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
484 * turn the flag back on since we just delivered
487 asoc->fragmented_delivery_inprogress = 1;
489 asoc->tsn_of_pdapi_last_delivered = chk->rec.data.TSN_seq;
490 asoc->last_flags_delivered = chk->rec.data.rcv_flags;
491 asoc->last_strm_seq_delivered = chk->rec.data.stream_seq;
492 asoc->last_strm_no_delivered = chk->rec.data.stream_number;
494 asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
495 asoc->size_on_reasm_queue -= chk->send_size;
496 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
497 /* free up the chk */
499 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
501 if (asoc->fragmented_delivery_inprogress == 0) {
503 * Now lets see if we can deliver the next one on
506 struct sctp_stream_in *strm;
508 strm = &asoc->strmin[stream_no];
509 nxt_todel = strm->last_sequence_delivered + 1;
510 TAILQ_FOREACH_SAFE(ctl, &strm->inqueue, next, nctl) {
511 /* Deliver more if we can. */
512 if (nxt_todel == ctl->sinfo_ssn) {
513 TAILQ_REMOVE(&strm->inqueue, ctl, next);
514 asoc->size_on_all_streams -= ctl->length;
515 sctp_ucount_decr(asoc->cnt_on_all_streams);
516 strm->last_sequence_delivered++;
517 sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
518 sctp_add_to_readq(stcb->sctp_ep, stcb,
520 &stcb->sctp_socket->so_rcv, 1,
521 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
525 nxt_todel = strm->last_sequence_delivered + 1;
533 * Queue the chunk either right into the socket buffer if it is the next one
534 * to go OR put it in the correct place in the delivery queue. If we do
535 * append to the so_buf, keep doing so until we are out of order. One big
536 * question still remains, what to do when the socket buffer is FULL??
539 sctp_queue_data_to_stream(struct sctp_tcb *stcb, struct sctp_association *asoc,
540 struct sctp_queued_to_read *control, int *abort_flag)
543 * FIX-ME maybe? What happens when the ssn wraps? If we are getting
544 * all the data in one stream this could happen quite rapidly. One
545 * could use the TSN to keep track of things, but this scheme breaks
546 * down in the other type of stream useage that could occur. Send a
547 * single msg to stream 0, send 4Billion messages to stream 1, now
548 * send a message to stream 0. You have a situation where the TSN
549 * has wrapped but not in the stream. Is this worth worrying about
550 * or should we just change our queue sort at the bottom to be by
553 * Could it also be legal for a peer to send ssn 1 with TSN 2 and ssn 2
554 * with TSN 1? If the peer is doing some sort of funky TSN/SSN
555 * assignment this could happen... and I don't see how this would be
556 * a violation. So for now I am undecided an will leave the sort by
557 * SSN alone. Maybe a hybred approach is the answer
560 struct sctp_stream_in *strm;
561 struct sctp_queued_to_read *at;
567 asoc->size_on_all_streams += control->length;
568 sctp_ucount_incr(asoc->cnt_on_all_streams);
569 strm = &asoc->strmin[control->sinfo_stream];
570 nxt_todel = strm->last_sequence_delivered + 1;
571 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
572 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INTO_STRD);
574 SCTPDBG(SCTP_DEBUG_INDATA1,
575 "queue to stream called for ssn:%u lastdel:%u nxt:%u\n",
576 (uint32_t) control->sinfo_stream,
577 (uint32_t) strm->last_sequence_delivered,
578 (uint32_t) nxt_todel);
579 if (SCTP_SSN_GE(strm->last_sequence_delivered, control->sinfo_ssn)) {
580 /* The incoming sseq is behind where we last delivered? */
581 SCTPDBG(SCTP_DEBUG_INDATA1, "Duplicate S-SEQ:%d delivered:%d from peer, Abort association\n",
582 control->sinfo_ssn, strm->last_sequence_delivered);
585 * throw it in the stream so it gets cleaned up in
586 * association destruction
588 TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
589 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
590 0, M_DONTWAIT, 1, MT_DATA);
592 struct sctp_paramhdr *ph;
595 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
596 (sizeof(uint32_t) * 3);
597 ph = mtod(oper, struct sctp_paramhdr *);
598 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
599 ph->param_length = htons(SCTP_BUF_LEN(oper));
600 ippp = (uint32_t *) (ph + 1);
601 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_1);
603 *ippp = control->sinfo_tsn;
605 *ippp = ((control->sinfo_stream << 16) | control->sinfo_ssn);
607 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_1;
608 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
613 if (nxt_todel == control->sinfo_ssn) {
614 /* can be delivered right away? */
615 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
616 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_IMMED_DEL);
618 /* EY it wont be queued if it could be delivered directly */
620 asoc->size_on_all_streams -= control->length;
621 sctp_ucount_decr(asoc->cnt_on_all_streams);
622 strm->last_sequence_delivered++;
624 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
625 sctp_add_to_readq(stcb->sctp_ep, stcb,
627 &stcb->sctp_socket->so_rcv, 1,
628 SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
629 TAILQ_FOREACH_SAFE(control, &strm->inqueue, next, at) {
631 nxt_todel = strm->last_sequence_delivered + 1;
632 if (nxt_todel == control->sinfo_ssn) {
633 TAILQ_REMOVE(&strm->inqueue, control, next);
634 asoc->size_on_all_streams -= control->length;
635 sctp_ucount_decr(asoc->cnt_on_all_streams);
636 strm->last_sequence_delivered++;
638 * We ignore the return of deliver_data here
639 * since we always can hold the chunk on the
640 * d-queue. And we have a finite number that
641 * can be delivered from the strq.
643 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
644 sctp_log_strm_del(control, NULL,
645 SCTP_STR_LOG_FROM_IMMED_DEL);
647 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
648 sctp_add_to_readq(stcb->sctp_ep, stcb,
650 &stcb->sctp_socket->so_rcv, 1,
651 SCTP_READ_LOCK_NOT_HELD,
660 * Ok, we did not deliver this guy, find the correct place
661 * to put it on the queue.
663 if (SCTP_TSN_GE(asoc->cumulative_tsn, control->sinfo_tsn)) {
666 if (TAILQ_EMPTY(&strm->inqueue)) {
668 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
669 sctp_log_strm_del(control, NULL, SCTP_STR_LOG_FROM_INSERT_HD);
671 TAILQ_INSERT_HEAD(&strm->inqueue, control, next);
673 TAILQ_FOREACH(at, &strm->inqueue, next) {
674 if (SCTP_SSN_GT(at->sinfo_ssn, control->sinfo_ssn)) {
676 * one in queue is bigger than the
677 * new one, insert before this one
679 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
680 sctp_log_strm_del(control, at,
681 SCTP_STR_LOG_FROM_INSERT_MD);
683 TAILQ_INSERT_BEFORE(at, control, next);
685 } else if (at->sinfo_ssn == control->sinfo_ssn) {
687 * Gak, He sent me a duplicate str
691 * foo bar, I guess I will just free
692 * this new guy, should we abort
693 * too? FIX ME MAYBE? Or it COULD be
694 * that the SSN's have wrapped.
695 * Maybe I should compare to TSN
696 * somehow... sigh for now just blow
701 sctp_m_freem(control->data);
702 control->data = NULL;
703 asoc->size_on_all_streams -= control->length;
704 sctp_ucount_decr(asoc->cnt_on_all_streams);
705 if (control->whoFrom) {
706 sctp_free_remote_addr(control->whoFrom);
707 control->whoFrom = NULL;
709 sctp_free_a_readq(stcb, control);
712 if (TAILQ_NEXT(at, next) == NULL) {
714 * We are at the end, insert
717 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
718 sctp_log_strm_del(control, at,
719 SCTP_STR_LOG_FROM_INSERT_TL);
721 TAILQ_INSERT_AFTER(&strm->inqueue,
732 * Returns two things: You get the total size of the deliverable parts of the
733 * first fragmented message on the reassembly queue. And you get a 1 back if
734 * all of the message is ready or a 0 back if the message is still incomplete
737 sctp_is_all_msg_on_reasm(struct sctp_association *asoc, uint32_t * t_size)
739 struct sctp_tmit_chunk *chk;
743 chk = TAILQ_FIRST(&asoc->reasmqueue);
745 /* nothing on the queue */
748 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == 0) {
749 /* Not a first on the queue */
752 tsn = chk->rec.data.TSN_seq;
753 TAILQ_FOREACH(chk, &asoc->reasmqueue, sctp_next) {
754 if (tsn != chk->rec.data.TSN_seq) {
757 *t_size += chk->send_size;
758 if (chk->rec.data.rcv_flags & SCTP_DATA_LAST_FRAG) {
767 sctp_deliver_reasm_check(struct sctp_tcb *stcb, struct sctp_association *asoc)
769 struct sctp_tmit_chunk *chk;
771 uint32_t tsize, pd_point;
774 chk = TAILQ_FIRST(&asoc->reasmqueue);
777 asoc->size_on_reasm_queue = 0;
778 asoc->cnt_on_reasm_queue = 0;
781 if (asoc->fragmented_delivery_inprogress == 0) {
783 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
784 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
785 (nxt_todel == chk->rec.data.stream_seq ||
786 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
788 * Yep the first one is here and its ok to deliver
791 if (stcb->sctp_socket) {
792 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket),
793 stcb->sctp_ep->partial_delivery_point);
795 pd_point = stcb->sctp_ep->partial_delivery_point;
797 if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
800 * Yes, we setup to start reception, by
801 * backing down the TSN just in case we
802 * can't deliver. If we
804 asoc->fragmented_delivery_inprogress = 1;
805 asoc->tsn_last_delivered =
806 chk->rec.data.TSN_seq - 1;
808 chk->rec.data.stream_number;
809 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
810 asoc->pdapi_ppid = chk->rec.data.payloadtype;
811 asoc->fragment_flags = chk->rec.data.rcv_flags;
812 sctp_service_reassembly(stcb, asoc);
817 * Service re-assembly will deliver stream data queued at
818 * the end of fragmented delivery.. but it wont know to go
819 * back and call itself again... we do that here with the
822 sctp_service_reassembly(stcb, asoc);
823 if (asoc->fragmented_delivery_inprogress == 0) {
825 * finished our Fragmented delivery, could be more
834 * Dump onto the re-assembly queue, in its proper place. After dumping on the
835 * queue, see if anthing can be delivered. If so pull it off (or as much as
836 * we can. If we run out of space then we must dump what we can and set the
837 * appropriate flag to say we queued what we could.
840 sctp_queue_data_for_reasm(struct sctp_tcb *stcb, struct sctp_association *asoc,
841 struct sctp_tmit_chunk *chk, int *abort_flag)
844 uint32_t cum_ackp1, prev_tsn, post_tsn;
845 struct sctp_tmit_chunk *at, *prev, *next;
848 cum_ackp1 = asoc->tsn_last_delivered + 1;
849 if (TAILQ_EMPTY(&asoc->reasmqueue)) {
850 /* This is the first one on the queue */
851 TAILQ_INSERT_HEAD(&asoc->reasmqueue, chk, sctp_next);
853 * we do not check for delivery of anything when only one
856 asoc->size_on_reasm_queue = chk->send_size;
857 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
858 if (chk->rec.data.TSN_seq == cum_ackp1) {
859 if (asoc->fragmented_delivery_inprogress == 0 &&
860 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) !=
861 SCTP_DATA_FIRST_FRAG) {
863 * An empty queue, no delivery inprogress,
864 * we hit the next one and it does NOT have
865 * a FIRST fragment mark.
867 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not first, no fragmented delivery in progress\n");
868 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
869 0, M_DONTWAIT, 1, MT_DATA);
872 struct sctp_paramhdr *ph;
876 sizeof(struct sctp_paramhdr) +
877 (sizeof(uint32_t) * 3);
878 ph = mtod(oper, struct sctp_paramhdr *);
880 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
881 ph->param_length = htons(SCTP_BUF_LEN(oper));
882 ippp = (uint32_t *) (ph + 1);
883 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_2);
885 *ippp = chk->rec.data.TSN_seq;
887 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
890 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_2;
891 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
893 } else if (asoc->fragmented_delivery_inprogress &&
894 (chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
896 * We are doing a partial delivery and the
897 * NEXT chunk MUST be either the LAST or
898 * MIDDLE fragment NOT a FIRST
900 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS a first and fragmented delivery in progress\n");
901 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
902 0, M_DONTWAIT, 1, MT_DATA);
904 struct sctp_paramhdr *ph;
908 sizeof(struct sctp_paramhdr) +
909 (3 * sizeof(uint32_t));
910 ph = mtod(oper, struct sctp_paramhdr *);
912 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
913 ph->param_length = htons(SCTP_BUF_LEN(oper));
914 ippp = (uint32_t *) (ph + 1);
915 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_3);
917 *ippp = chk->rec.data.TSN_seq;
919 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
921 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_3;
922 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
924 } else if (asoc->fragmented_delivery_inprogress) {
926 * Here we are ok with a MIDDLE or LAST
929 if (chk->rec.data.stream_number !=
930 asoc->str_of_pdapi) {
931 /* Got to be the right STR No */
932 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream number %d vs %d\n",
933 chk->rec.data.stream_number,
935 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
936 0, M_DONTWAIT, 1, MT_DATA);
938 struct sctp_paramhdr *ph;
942 sizeof(struct sctp_paramhdr) +
943 (sizeof(uint32_t) * 3);
945 struct sctp_paramhdr *);
947 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
949 htons(SCTP_BUF_LEN(oper));
950 ippp = (uint32_t *) (ph + 1);
951 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_4);
953 *ippp = chk->rec.data.TSN_seq;
955 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
957 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_4;
958 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
960 } else if ((asoc->fragment_flags & SCTP_DATA_UNORDERED) !=
961 SCTP_DATA_UNORDERED &&
962 chk->rec.data.stream_seq != asoc->ssn_of_pdapi) {
963 /* Got to be the right STR Seq */
964 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it IS not same stream seq %d vs %d\n",
965 chk->rec.data.stream_seq,
967 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
968 0, M_DONTWAIT, 1, MT_DATA);
970 struct sctp_paramhdr *ph;
974 sizeof(struct sctp_paramhdr) +
975 (3 * sizeof(uint32_t));
977 struct sctp_paramhdr *);
979 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
981 htons(SCTP_BUF_LEN(oper));
982 ippp = (uint32_t *) (ph + 1);
983 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_5);
985 *ippp = chk->rec.data.TSN_seq;
987 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
990 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_5;
991 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
999 TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1000 if (SCTP_TSN_GT(at->rec.data.TSN_seq, chk->rec.data.TSN_seq)) {
1002 * one in queue is bigger than the new one, insert
1006 asoc->size_on_reasm_queue += chk->send_size;
1007 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1009 TAILQ_INSERT_BEFORE(at, chk, sctp_next);
1011 } else if (at->rec.data.TSN_seq == chk->rec.data.TSN_seq) {
1012 /* Gak, He sent me a duplicate str seq number */
1014 * foo bar, I guess I will just free this new guy,
1015 * should we abort too? FIX ME MAYBE? Or it COULD be
1016 * that the SSN's have wrapped. Maybe I should
1017 * compare to TSN somehow... sigh for now just blow
1021 sctp_m_freem(chk->data);
1024 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
1028 if (TAILQ_NEXT(at, sctp_next) == NULL) {
1030 * We are at the end, insert it after this
1033 /* check it first */
1034 asoc->size_on_reasm_queue += chk->send_size;
1035 sctp_ucount_incr(asoc->cnt_on_reasm_queue);
1036 TAILQ_INSERT_AFTER(&asoc->reasmqueue, at, chk, sctp_next);
1041 /* Now the audits */
1043 prev_tsn = chk->rec.data.TSN_seq - 1;
1044 if (prev_tsn == prev->rec.data.TSN_seq) {
1046 * Ok the one I am dropping onto the end is the
1047 * NEXT. A bit of valdiation here.
1049 if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1050 SCTP_DATA_FIRST_FRAG ||
1051 (prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1052 SCTP_DATA_MIDDLE_FRAG) {
1054 * Insert chk MUST be a MIDDLE or LAST
1057 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1058 SCTP_DATA_FIRST_FRAG) {
1059 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - It can be a midlle or last but not a first\n");
1060 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, it's a FIRST!\n");
1061 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1062 0, M_DONTWAIT, 1, MT_DATA);
1064 struct sctp_paramhdr *ph;
1067 SCTP_BUF_LEN(oper) =
1068 sizeof(struct sctp_paramhdr) +
1069 (3 * sizeof(uint32_t));
1071 struct sctp_paramhdr *);
1073 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1075 htons(SCTP_BUF_LEN(oper));
1076 ippp = (uint32_t *) (ph + 1);
1077 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_6);
1079 *ippp = chk->rec.data.TSN_seq;
1081 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1084 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_6;
1085 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1089 if (chk->rec.data.stream_number !=
1090 prev->rec.data.stream_number) {
1092 * Huh, need the correct STR here,
1093 * they must be the same.
1095 SCTP_PRINTF("Prev check - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1096 chk->rec.data.stream_number,
1097 prev->rec.data.stream_number);
1098 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1099 0, M_DONTWAIT, 1, MT_DATA);
1101 struct sctp_paramhdr *ph;
1104 SCTP_BUF_LEN(oper) =
1105 sizeof(struct sctp_paramhdr) +
1106 (3 * sizeof(uint32_t));
1108 struct sctp_paramhdr *);
1110 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1112 htons(SCTP_BUF_LEN(oper));
1113 ippp = (uint32_t *) (ph + 1);
1114 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_7);
1116 *ippp = chk->rec.data.TSN_seq;
1118 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1120 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_7;
1121 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1125 if ((prev->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1126 chk->rec.data.stream_seq !=
1127 prev->rec.data.stream_seq) {
1129 * Huh, need the correct STR here,
1130 * they must be the same.
1132 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1133 chk->rec.data.stream_seq,
1134 prev->rec.data.stream_seq);
1135 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1136 0, M_DONTWAIT, 1, MT_DATA);
1138 struct sctp_paramhdr *ph;
1141 SCTP_BUF_LEN(oper) =
1142 sizeof(struct sctp_paramhdr) +
1143 (3 * sizeof(uint32_t));
1145 struct sctp_paramhdr *);
1147 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1149 htons(SCTP_BUF_LEN(oper));
1150 ippp = (uint32_t *) (ph + 1);
1151 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_8);
1153 *ippp = chk->rec.data.TSN_seq;
1155 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1157 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_8;
1158 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1162 } else if ((prev->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1163 SCTP_DATA_LAST_FRAG) {
1164 /* Insert chk MUST be a FIRST */
1165 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1166 SCTP_DATA_FIRST_FRAG) {
1167 SCTPDBG(SCTP_DEBUG_INDATA1, "Prev check - Gak, evil plot, its not FIRST and it must be!\n");
1168 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1169 0, M_DONTWAIT, 1, MT_DATA);
1171 struct sctp_paramhdr *ph;
1174 SCTP_BUF_LEN(oper) =
1175 sizeof(struct sctp_paramhdr) +
1176 (3 * sizeof(uint32_t));
1178 struct sctp_paramhdr *);
1180 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1182 htons(SCTP_BUF_LEN(oper));
1183 ippp = (uint32_t *) (ph + 1);
1184 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_9);
1186 *ippp = chk->rec.data.TSN_seq;
1188 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1191 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_9;
1192 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1200 post_tsn = chk->rec.data.TSN_seq + 1;
1201 if (post_tsn == next->rec.data.TSN_seq) {
1203 * Ok the one I am inserting ahead of is my NEXT
1204 * one. A bit of valdiation here.
1206 if (next->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) {
1207 /* Insert chk MUST be a last fragment */
1208 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK)
1209 != SCTP_DATA_LAST_FRAG) {
1210 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is FIRST, we must be LAST\n");
1211 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, its not a last!\n");
1212 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1213 0, M_DONTWAIT, 1, MT_DATA);
1215 struct sctp_paramhdr *ph;
1218 SCTP_BUF_LEN(oper) =
1219 sizeof(struct sctp_paramhdr) +
1220 (3 * sizeof(uint32_t));
1222 struct sctp_paramhdr *);
1224 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1226 htons(SCTP_BUF_LEN(oper));
1227 ippp = (uint32_t *) (ph + 1);
1228 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_10);
1230 *ippp = chk->rec.data.TSN_seq;
1232 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1234 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_10;
1235 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1239 } else if ((next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1240 SCTP_DATA_MIDDLE_FRAG ||
1241 (next->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1242 SCTP_DATA_LAST_FRAG) {
1244 * Insert chk CAN be MIDDLE or FIRST NOT
1247 if ((chk->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) ==
1248 SCTP_DATA_LAST_FRAG) {
1249 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Next is a MIDDLE/LAST\n");
1250 SCTPDBG(SCTP_DEBUG_INDATA1, "Gak, Evil plot, new prev chunk is a LAST\n");
1251 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1252 0, M_DONTWAIT, 1, MT_DATA);
1254 struct sctp_paramhdr *ph;
1257 SCTP_BUF_LEN(oper) =
1258 sizeof(struct sctp_paramhdr) +
1259 (3 * sizeof(uint32_t));
1261 struct sctp_paramhdr *);
1263 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1265 htons(SCTP_BUF_LEN(oper));
1266 ippp = (uint32_t *) (ph + 1);
1267 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_11);
1269 *ippp = chk->rec.data.TSN_seq;
1271 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1274 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_11;
1275 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1279 if (chk->rec.data.stream_number !=
1280 next->rec.data.stream_number) {
1282 * Huh, need the correct STR here,
1283 * they must be the same.
1285 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, ssn:%d not the same as at:%d\n",
1286 chk->rec.data.stream_number,
1287 next->rec.data.stream_number);
1288 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1289 0, M_DONTWAIT, 1, MT_DATA);
1291 struct sctp_paramhdr *ph;
1294 SCTP_BUF_LEN(oper) =
1295 sizeof(struct sctp_paramhdr) +
1296 (3 * sizeof(uint32_t));
1298 struct sctp_paramhdr *);
1300 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1302 htons(SCTP_BUF_LEN(oper));
1303 ippp = (uint32_t *) (ph + 1);
1304 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_12);
1306 *ippp = chk->rec.data.TSN_seq;
1308 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1311 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_12;
1312 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1316 if ((next->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == 0 &&
1317 chk->rec.data.stream_seq !=
1318 next->rec.data.stream_seq) {
1320 * Huh, need the correct STR here,
1321 * they must be the same.
1323 SCTPDBG(SCTP_DEBUG_INDATA1, "Next chk - Gak, Evil plot, sseq:%d not the same as at:%d\n",
1324 chk->rec.data.stream_seq,
1325 next->rec.data.stream_seq);
1326 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1327 0, M_DONTWAIT, 1, MT_DATA);
1329 struct sctp_paramhdr *ph;
1332 SCTP_BUF_LEN(oper) =
1333 sizeof(struct sctp_paramhdr) +
1334 (3 * sizeof(uint32_t));
1336 struct sctp_paramhdr *);
1338 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1340 htons(SCTP_BUF_LEN(oper));
1341 ippp = (uint32_t *) (ph + 1);
1342 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_13);
1344 *ippp = chk->rec.data.TSN_seq;
1346 *ippp = ((chk->rec.data.stream_number << 16) | chk->rec.data.stream_seq);
1348 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_13;
1349 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1356 /* Do we need to do some delivery? check */
1357 sctp_deliver_reasm_check(stcb, asoc);
1361 * This is an unfortunate routine. It checks to make sure a evil guy is not
1362 * stuffing us full of bad packet fragments. A broken peer could also do this
1363 * but this is doubtful. It is to bad I must worry about evil crackers sigh
1367 sctp_does_tsn_belong_to_reasm(struct sctp_association *asoc,
1370 struct sctp_tmit_chunk *at;
1373 TAILQ_FOREACH(at, &asoc->reasmqueue, sctp_next) {
1374 if (SCTP_TSN_GT(TSN_seq, at->rec.data.TSN_seq)) {
1375 /* is it one bigger? */
1376 tsn_est = at->rec.data.TSN_seq + 1;
1377 if (tsn_est == TSN_seq) {
1378 /* yep. It better be a last then */
1379 if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1380 SCTP_DATA_LAST_FRAG) {
1382 * Ok this guy belongs next to a guy
1383 * that is NOT last, it should be a
1384 * middle/last, not a complete
1390 * This guy is ok since its a LAST
1391 * and the new chunk is a fully
1392 * self- contained one.
1397 } else if (TSN_seq == at->rec.data.TSN_seq) {
1398 /* Software error since I have a dup? */
1402 * Ok, 'at' is larger than new chunk but does it
1403 * need to be right before it.
1405 tsn_est = TSN_seq + 1;
1406 if (tsn_est == at->rec.data.TSN_seq) {
1407 /* Yep, It better be a first */
1408 if ((at->rec.data.rcv_flags & SCTP_DATA_FRAG_MASK) !=
1409 SCTP_DATA_FIRST_FRAG) {
1422 sctp_process_a_data_chunk(struct sctp_tcb *stcb, struct sctp_association *asoc,
1423 struct mbuf **m, int offset, struct sctp_data_chunk *ch, int chk_length,
1424 struct sctp_nets *net, uint32_t * high_tsn, int *abort_flag,
1425 int *break_flag, int last_chunk)
1427 /* Process a data chunk */
1428 /* struct sctp_tmit_chunk *chk; */
1429 struct sctp_tmit_chunk *chk;
1433 int need_reasm_check = 0;
1434 uint16_t strmno, strmseq;
1436 struct sctp_queued_to_read *control;
1438 uint32_t protocol_id;
1439 uint8_t chunk_flags;
1440 struct sctp_stream_reset_list *liste;
1443 tsn = ntohl(ch->dp.tsn);
1444 chunk_flags = ch->ch.chunk_flags;
1445 if ((chunk_flags & SCTP_DATA_SACK_IMMEDIATELY) == SCTP_DATA_SACK_IMMEDIATELY) {
1446 asoc->send_sack = 1;
1448 protocol_id = ch->dp.protocol_id;
1449 ordered = ((chunk_flags & SCTP_DATA_UNORDERED) == 0);
1450 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
1451 sctp_log_map(tsn, asoc->cumulative_tsn, asoc->highest_tsn_inside_map, SCTP_MAP_TSN_ENTERS);
1456 SCTP_LTRACE_CHK(stcb->sctp_ep, stcb, ch->ch.chunk_type, tsn);
1457 if (SCTP_TSN_GE(asoc->cumulative_tsn, tsn)) {
1458 /* It is a duplicate */
1459 SCTP_STAT_INCR(sctps_recvdupdata);
1460 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1461 /* Record a dup for the next outbound sack */
1462 asoc->dup_tsns[asoc->numduptsns] = tsn;
1465 asoc->send_sack = 1;
1468 /* Calculate the number of TSN's between the base and this TSN */
1469 SCTP_CALC_TSN_TO_GAP(gap, tsn, asoc->mapping_array_base_tsn);
1470 if (gap >= (SCTP_MAPPING_ARRAY << 3)) {
1471 /* Can't hold the bit in the mapping at max array, toss it */
1474 if (gap >= (uint32_t) (asoc->mapping_array_size << 3)) {
1475 SCTP_TCB_LOCK_ASSERT(stcb);
1476 if (sctp_expand_mapping_array(asoc, gap)) {
1477 /* Can't expand, drop it */
1481 if (SCTP_TSN_GT(tsn, *high_tsn)) {
1484 /* See if we have received this one already */
1485 if (SCTP_IS_TSN_PRESENT(asoc->mapping_array, gap) ||
1486 SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, gap)) {
1487 SCTP_STAT_INCR(sctps_recvdupdata);
1488 if (asoc->numduptsns < SCTP_MAX_DUP_TSNS) {
1489 /* Record a dup for the next outbound sack */
1490 asoc->dup_tsns[asoc->numduptsns] = tsn;
1493 asoc->send_sack = 1;
1497 * Check to see about the GONE flag, duplicates would cause a sack
1498 * to be sent up above
1500 if (((stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_GONE) ||
1501 (stcb->sctp_ep->sctp_flags & SCTP_PCB_FLAGS_SOCKET_ALLGONE) ||
1502 (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET))
1505 * wait a minute, this guy is gone, there is no longer a
1506 * receiver. Send peer an ABORT!
1508 struct mbuf *op_err;
1510 op_err = sctp_generate_invmanparam(SCTP_CAUSE_OUT_OF_RESC);
1511 sctp_abort_an_association(stcb->sctp_ep, stcb, op_err, SCTP_SO_NOT_LOCKED);
1516 * Now before going further we see if there is room. If NOT then we
1517 * MAY let one through only IF this TSN is the one we are waiting
1518 * for on a partial delivery API.
1521 /* now do the tests */
1522 if (((asoc->cnt_on_all_streams +
1523 asoc->cnt_on_reasm_queue +
1524 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) ||
1525 (((int)asoc->my_rwnd) <= 0)) {
1527 * When we have NO room in the rwnd we check to make sure
1528 * the reader is doing its job...
1530 if (stcb->sctp_socket->so_rcv.sb_cc) {
1531 /* some to read, wake-up */
1532 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1535 so = SCTP_INP_SO(stcb->sctp_ep);
1536 atomic_add_int(&stcb->asoc.refcnt, 1);
1537 SCTP_TCB_UNLOCK(stcb);
1538 SCTP_SOCKET_LOCK(so, 1);
1539 SCTP_TCB_LOCK(stcb);
1540 atomic_subtract_int(&stcb->asoc.refcnt, 1);
1541 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
1542 /* assoc was freed while we were unlocked */
1543 SCTP_SOCKET_UNLOCK(so, 1);
1547 sctp_sorwakeup(stcb->sctp_ep, stcb->sctp_socket);
1548 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
1549 SCTP_SOCKET_UNLOCK(so, 1);
1552 /* now is it in the mapping array of what we have accepted? */
1553 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map) &&
1554 SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1555 /* Nope not in the valid range dump it */
1556 sctp_set_rwnd(stcb, asoc);
1557 if ((asoc->cnt_on_all_streams +
1558 asoc->cnt_on_reasm_queue +
1559 asoc->cnt_msg_on_sb) >= SCTP_BASE_SYSCTL(sctp_max_chunks_on_queue)) {
1560 SCTP_STAT_INCR(sctps_datadropchklmt);
1562 SCTP_STAT_INCR(sctps_datadroprwnd);
1568 strmno = ntohs(ch->dp.stream_id);
1569 if (strmno >= asoc->streamincnt) {
1570 struct sctp_paramhdr *phdr;
1573 mb = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) * 2),
1574 0, M_DONTWAIT, 1, MT_DATA);
1576 /* add some space up front so prepend will work well */
1577 SCTP_BUF_RESV_UF(mb, sizeof(struct sctp_chunkhdr));
1578 phdr = mtod(mb, struct sctp_paramhdr *);
1580 * Error causes are just param's and this one has
1581 * two back to back phdr, one with the error type
1582 * and size, the other with the streamid and a rsvd
1584 SCTP_BUF_LEN(mb) = (sizeof(struct sctp_paramhdr) * 2);
1585 phdr->param_type = htons(SCTP_CAUSE_INVALID_STREAM);
1586 phdr->param_length =
1587 htons(sizeof(struct sctp_paramhdr) * 2);
1589 /* We insert the stream in the type field */
1590 phdr->param_type = ch->dp.stream_id;
1591 /* And set the length to 0 for the rsvd field */
1592 phdr->param_length = 0;
1593 sctp_queue_op_err(stcb, mb);
1595 SCTP_STAT_INCR(sctps_badsid);
1596 SCTP_TCB_LOCK_ASSERT(stcb);
1597 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1598 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1599 asoc->highest_tsn_inside_nr_map = tsn;
1601 if (tsn == (asoc->cumulative_tsn + 1)) {
1602 /* Update cum-ack */
1603 asoc->cumulative_tsn = tsn;
1608 * Before we continue lets validate that we are not being fooled by
1609 * an evil attacker. We can only have 4k chunks based on our TSN
1610 * spread allowed by the mapping array 512 * 8 bits, so there is no
1611 * way our stream sequence numbers could have wrapped. We of course
1612 * only validate the FIRST fragment so the bit must be set.
1614 strmseq = ntohs(ch->dp.stream_sequence);
1615 #ifdef SCTP_ASOCLOG_OF_TSNS
1616 SCTP_TCB_LOCK_ASSERT(stcb);
1617 if (asoc->tsn_in_at >= SCTP_TSN_LOG_SIZE) {
1618 asoc->tsn_in_at = 0;
1619 asoc->tsn_in_wrapped = 1;
1621 asoc->in_tsnlog[asoc->tsn_in_at].tsn = tsn;
1622 asoc->in_tsnlog[asoc->tsn_in_at].strm = strmno;
1623 asoc->in_tsnlog[asoc->tsn_in_at].seq = strmseq;
1624 asoc->in_tsnlog[asoc->tsn_in_at].sz = chk_length;
1625 asoc->in_tsnlog[asoc->tsn_in_at].flgs = chunk_flags;
1626 asoc->in_tsnlog[asoc->tsn_in_at].stcb = (void *)stcb;
1627 asoc->in_tsnlog[asoc->tsn_in_at].in_pos = asoc->tsn_in_at;
1628 asoc->in_tsnlog[asoc->tsn_in_at].in_out = 1;
1631 if ((chunk_flags & SCTP_DATA_FIRST_FRAG) &&
1632 (TAILQ_EMPTY(&asoc->resetHead)) &&
1633 (chunk_flags & SCTP_DATA_UNORDERED) == 0 &&
1634 SCTP_SSN_GE(asoc->strmin[strmno].last_sequence_delivered, strmseq)) {
1635 /* The incoming sseq is behind where we last delivered? */
1636 SCTPDBG(SCTP_DEBUG_INDATA1, "EVIL/Broken-Dup S-SEQ:%d delivered:%d from peer, Abort!\n",
1637 strmseq, asoc->strmin[strmno].last_sequence_delivered);
1638 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1639 0, M_DONTWAIT, 1, MT_DATA);
1641 struct sctp_paramhdr *ph;
1644 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
1645 (3 * sizeof(uint32_t));
1646 ph = mtod(oper, struct sctp_paramhdr *);
1647 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1648 ph->param_length = htons(SCTP_BUF_LEN(oper));
1649 ippp = (uint32_t *) (ph + 1);
1650 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_14);
1654 *ippp = ((strmno << 16) | strmseq);
1657 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_14;
1658 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1662 /************************************
1663 * From here down we may find ch-> invalid
1664 * so its a good idea NOT to use it.
1665 *************************************/
1667 the_len = (chk_length - sizeof(struct sctp_data_chunk));
1668 if (last_chunk == 0) {
1669 dmbuf = SCTP_M_COPYM(*m,
1670 (offset + sizeof(struct sctp_data_chunk)),
1671 the_len, M_DONTWAIT);
1672 #ifdef SCTP_MBUF_LOGGING
1673 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MBUF_LOGGING_ENABLE) {
1676 for (mat = dmbuf; mat; mat = SCTP_BUF_NEXT(mat)) {
1677 if (SCTP_BUF_IS_EXTENDED(mat)) {
1678 sctp_log_mb(mat, SCTP_MBUF_ICOPY);
1684 /* We can steal the last chunk */
1688 /* lop off the top part */
1689 m_adj(dmbuf, (offset + sizeof(struct sctp_data_chunk)));
1690 if (SCTP_BUF_NEXT(dmbuf) == NULL) {
1691 l_len = SCTP_BUF_LEN(dmbuf);
1694 * need to count up the size hopefully does not hit
1700 for (lat = dmbuf; lat; lat = SCTP_BUF_NEXT(lat)) {
1701 l_len += SCTP_BUF_LEN(lat);
1704 if (l_len > the_len) {
1705 /* Trim the end round bytes off too */
1706 m_adj(dmbuf, -(l_len - the_len));
1709 if (dmbuf == NULL) {
1710 SCTP_STAT_INCR(sctps_nomem);
1713 if ((chunk_flags & SCTP_DATA_NOT_FRAG) == SCTP_DATA_NOT_FRAG &&
1714 asoc->fragmented_delivery_inprogress == 0 &&
1715 TAILQ_EMPTY(&asoc->resetHead) &&
1717 ((uint16_t) (asoc->strmin[strmno].last_sequence_delivered + 1) == strmseq &&
1718 TAILQ_EMPTY(&asoc->strmin[strmno].inqueue)))) {
1719 /* Candidate for express delivery */
1721 * Its not fragmented, No PD-API is up, Nothing in the
1722 * delivery queue, Its un-ordered OR ordered and the next to
1723 * deliver AND nothing else is stuck on the stream queue,
1724 * And there is room for it in the socket buffer. Lets just
1725 * stuff it up the buffer....
1728 /* It would be nice to avoid this copy if we could :< */
1729 sctp_alloc_a_readq(stcb, control);
1730 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1735 if (control == NULL) {
1736 goto failed_express_del;
1738 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1739 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1740 asoc->highest_tsn_inside_nr_map = tsn;
1742 sctp_add_to_readq(stcb->sctp_ep, stcb,
1743 control, &stcb->sctp_socket->so_rcv,
1744 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
1746 if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1747 /* for ordered, bump what we delivered */
1748 asoc->strmin[strmno].last_sequence_delivered++;
1750 SCTP_STAT_INCR(sctps_recvexpress);
1751 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
1752 sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno,
1753 SCTP_STR_LOG_FROM_EXPRS_DEL);
1757 goto finish_express_del;
1760 /* If we reach here this is a new chunk */
1763 /* Express for fragmented delivery? */
1764 if ((asoc->fragmented_delivery_inprogress) &&
1765 (stcb->asoc.control_pdapi) &&
1766 (asoc->str_of_pdapi == strmno) &&
1767 (asoc->ssn_of_pdapi == strmseq)
1769 control = stcb->asoc.control_pdapi;
1770 if ((chunk_flags & SCTP_DATA_FIRST_FRAG) == SCTP_DATA_FIRST_FRAG) {
1771 /* Can't be another first? */
1772 goto failed_pdapi_express_del;
1774 if (tsn == (control->sinfo_tsn + 1)) {
1775 /* Yep, we can add it on */
1778 if (chunk_flags & SCTP_DATA_LAST_FRAG) {
1781 if (sctp_append_to_readq(stcb->sctp_ep, stcb, control, dmbuf, end,
1783 &stcb->sctp_socket->so_rcv)) {
1784 SCTP_PRINTF("Append fails end:%d\n", end);
1785 goto failed_pdapi_express_del;
1787 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1788 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1789 asoc->highest_tsn_inside_nr_map = tsn;
1791 SCTP_STAT_INCR(sctps_recvexpressm);
1792 control->sinfo_tsn = tsn;
1793 asoc->tsn_last_delivered = tsn;
1794 asoc->fragment_flags = chunk_flags;
1795 asoc->tsn_of_pdapi_last_delivered = tsn;
1796 asoc->last_flags_delivered = chunk_flags;
1797 asoc->last_strm_seq_delivered = strmseq;
1798 asoc->last_strm_no_delivered = strmno;
1800 /* clean up the flags and such */
1801 asoc->fragmented_delivery_inprogress = 0;
1802 if ((chunk_flags & SCTP_DATA_UNORDERED) == 0) {
1803 asoc->strmin[strmno].last_sequence_delivered++;
1805 stcb->asoc.control_pdapi = NULL;
1806 if (TAILQ_EMPTY(&asoc->reasmqueue) == 0) {
1808 * There could be another message
1811 need_reasm_check = 1;
1815 goto finish_express_del;
1818 failed_pdapi_express_del:
1820 if (SCTP_BASE_SYSCTL(sctp_do_drain) == 0) {
1821 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, gap);
1822 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_nr_map)) {
1823 asoc->highest_tsn_inside_nr_map = tsn;
1826 SCTP_SET_TSN_PRESENT(asoc->mapping_array, gap);
1827 if (SCTP_TSN_GT(tsn, asoc->highest_tsn_inside_map)) {
1828 asoc->highest_tsn_inside_map = tsn;
1831 if ((chunk_flags & SCTP_DATA_NOT_FRAG) != SCTP_DATA_NOT_FRAG) {
1832 sctp_alloc_a_chunk(stcb, chk);
1834 /* No memory so we drop the chunk */
1835 SCTP_STAT_INCR(sctps_nomem);
1836 if (last_chunk == 0) {
1837 /* we copied it, free the copy */
1838 sctp_m_freem(dmbuf);
1842 chk->rec.data.TSN_seq = tsn;
1843 chk->no_fr_allowed = 0;
1844 chk->rec.data.stream_seq = strmseq;
1845 chk->rec.data.stream_number = strmno;
1846 chk->rec.data.payloadtype = protocol_id;
1847 chk->rec.data.context = stcb->asoc.context;
1848 chk->rec.data.doing_fast_retransmit = 0;
1849 chk->rec.data.rcv_flags = chunk_flags;
1851 chk->send_size = the_len;
1853 atomic_add_int(&net->ref_count, 1);
1856 sctp_alloc_a_readq(stcb, control);
1857 sctp_build_readq_entry_mac(control, stcb, asoc->context, net, tsn,
1862 if (control == NULL) {
1863 /* No memory so we drop the chunk */
1864 SCTP_STAT_INCR(sctps_nomem);
1865 if (last_chunk == 0) {
1866 /* we copied it, free the copy */
1867 sctp_m_freem(dmbuf);
1871 control->length = the_len;
1874 /* Mark it as received */
1875 /* Now queue it where it belongs */
1876 if (control != NULL) {
1877 /* First a sanity check */
1878 if (asoc->fragmented_delivery_inprogress) {
1880 * Ok, we have a fragmented delivery in progress if
1881 * this chunk is next to deliver OR belongs in our
1882 * view to the reassembly, the peer is evil or
1885 uint32_t estimate_tsn;
1887 estimate_tsn = asoc->tsn_last_delivered + 1;
1888 if (TAILQ_EMPTY(&asoc->reasmqueue) &&
1889 (estimate_tsn == control->sinfo_tsn)) {
1890 /* Evil/Broke peer */
1891 sctp_m_freem(control->data);
1892 control->data = NULL;
1893 if (control->whoFrom) {
1894 sctp_free_remote_addr(control->whoFrom);
1895 control->whoFrom = NULL;
1897 sctp_free_a_readq(stcb, control);
1898 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1899 0, M_DONTWAIT, 1, MT_DATA);
1901 struct sctp_paramhdr *ph;
1904 SCTP_BUF_LEN(oper) =
1905 sizeof(struct sctp_paramhdr) +
1906 (3 * sizeof(uint32_t));
1907 ph = mtod(oper, struct sctp_paramhdr *);
1909 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1910 ph->param_length = htons(SCTP_BUF_LEN(oper));
1911 ippp = (uint32_t *) (ph + 1);
1912 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_15);
1916 *ippp = ((strmno << 16) | strmseq);
1918 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_15;
1919 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1923 if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
1924 sctp_m_freem(control->data);
1925 control->data = NULL;
1926 if (control->whoFrom) {
1927 sctp_free_remote_addr(control->whoFrom);
1928 control->whoFrom = NULL;
1930 sctp_free_a_readq(stcb, control);
1932 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1933 0, M_DONTWAIT, 1, MT_DATA);
1935 struct sctp_paramhdr *ph;
1938 SCTP_BUF_LEN(oper) =
1939 sizeof(struct sctp_paramhdr) +
1940 (3 * sizeof(uint32_t));
1942 struct sctp_paramhdr *);
1944 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1946 htons(SCTP_BUF_LEN(oper));
1947 ippp = (uint32_t *) (ph + 1);
1948 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_16);
1952 *ippp = ((strmno << 16) | strmseq);
1954 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_16;
1955 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
1961 /* No PDAPI running */
1962 if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
1964 * Reassembly queue is NOT empty validate
1965 * that this tsn does not need to be in
1966 * reasembly queue. If it does then our peer
1967 * is broken or evil.
1969 if (sctp_does_tsn_belong_to_reasm(asoc, control->sinfo_tsn)) {
1970 sctp_m_freem(control->data);
1971 control->data = NULL;
1972 if (control->whoFrom) {
1973 sctp_free_remote_addr(control->whoFrom);
1974 control->whoFrom = NULL;
1976 sctp_free_a_readq(stcb, control);
1977 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
1978 0, M_DONTWAIT, 1, MT_DATA);
1980 struct sctp_paramhdr *ph;
1983 SCTP_BUF_LEN(oper) =
1984 sizeof(struct sctp_paramhdr) +
1985 (3 * sizeof(uint32_t));
1987 struct sctp_paramhdr *);
1989 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1991 htons(SCTP_BUF_LEN(oper));
1992 ippp = (uint32_t *) (ph + 1);
1993 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_17);
1997 *ippp = ((strmno << 16) | strmseq);
1999 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_17;
2000 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
2006 /* ok, if we reach here we have passed the sanity checks */
2007 if (chunk_flags & SCTP_DATA_UNORDERED) {
2008 /* queue directly into socket buffer */
2009 sctp_mark_non_revokable(asoc, control->sinfo_tsn);
2010 sctp_add_to_readq(stcb->sctp_ep, stcb,
2012 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_NOT_HELD, SCTP_SO_NOT_LOCKED);
2015 * Special check for when streams are resetting. We
2016 * could be more smart about this and check the
2017 * actual stream to see if it is not being reset..
2018 * that way we would not create a HOLB when amongst
2019 * streams being reset and those not being reset.
2021 * We take complete messages that have a stream reset
2022 * intervening (aka the TSN is after where our
2023 * cum-ack needs to be) off and put them on a
2024 * pending_reply_queue. The reassembly ones we do
2025 * not have to worry about since they are all sorted
2026 * and proceessed by TSN order. It is only the
2027 * singletons I must worry about.
2029 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2030 SCTP_TSN_GT(tsn, liste->tsn)) {
2032 * yep its past where we need to reset... go
2033 * ahead and queue it.
2035 if (TAILQ_EMPTY(&asoc->pending_reply_queue)) {
2037 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2039 struct sctp_queued_to_read *ctlOn,
2041 unsigned char inserted = 0;
2043 TAILQ_FOREACH_SAFE(ctlOn, &asoc->pending_reply_queue, next, nctlOn) {
2044 if (SCTP_TSN_GT(control->sinfo_tsn, ctlOn->sinfo_tsn)) {
2048 TAILQ_INSERT_BEFORE(ctlOn, control, next);
2053 if (inserted == 0) {
2055 * must be put at end, use
2056 * prevP (all setup from
2057 * loop) to setup nextP.
2059 TAILQ_INSERT_TAIL(&asoc->pending_reply_queue, control, next);
2063 sctp_queue_data_to_stream(stcb, asoc, control, abort_flag);
2070 /* Into the re-assembly queue */
2071 sctp_queue_data_for_reasm(stcb, asoc, chk, abort_flag);
2074 * the assoc is now gone and chk was put onto the
2075 * reasm queue, which has all been freed.
2082 if (tsn == (asoc->cumulative_tsn + 1)) {
2083 /* Update cum-ack */
2084 asoc->cumulative_tsn = tsn;
2090 SCTP_STAT_INCR_COUNTER64(sctps_inorderchunks);
2092 SCTP_STAT_INCR_COUNTER64(sctps_inunorderchunks);
2094 SCTP_STAT_INCR(sctps_recvdata);
2095 /* Set it present please */
2096 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_STR_LOGGING_ENABLE) {
2097 sctp_log_strm_del_alt(stcb, tsn, strmseq, strmno, SCTP_STR_LOG_FROM_MARK_TSN);
2099 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2100 sctp_log_map(asoc->mapping_array_base_tsn, asoc->cumulative_tsn,
2101 asoc->highest_tsn_inside_map, SCTP_MAP_PREPARE_SLIDE);
2103 /* check the special flag for stream resets */
2104 if (((liste = TAILQ_FIRST(&asoc->resetHead)) != NULL) &&
2105 SCTP_TSN_GE(asoc->cumulative_tsn, liste->tsn)) {
2107 * we have finished working through the backlogged TSN's now
2108 * time to reset streams. 1: call reset function. 2: free
2109 * pending_reply space 3: distribute any chunks in
2110 * pending_reply_queue.
2112 struct sctp_queued_to_read *ctl, *nctl;
2114 sctp_reset_in_stream(stcb, liste->number_entries, liste->list_of_streams);
2115 TAILQ_REMOVE(&asoc->resetHead, liste, next_resp);
2116 SCTP_FREE(liste, SCTP_M_STRESET);
2117 /* sa_ignore FREED_MEMORY */
2118 liste = TAILQ_FIRST(&asoc->resetHead);
2119 if (TAILQ_EMPTY(&asoc->resetHead)) {
2120 /* All can be removed */
2121 TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
2122 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2123 sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
2129 TAILQ_FOREACH_SAFE(ctl, &asoc->pending_reply_queue, next, nctl) {
2130 if (SCTP_TSN_GT(ctl->sinfo_tsn, liste->tsn)) {
2134 * if ctl->sinfo_tsn is <= liste->tsn we can
2135 * process it which is the NOT of
2136 * ctl->sinfo_tsn > liste->tsn
2138 TAILQ_REMOVE(&asoc->pending_reply_queue, ctl, next);
2139 sctp_queue_data_to_stream(stcb, asoc, ctl, abort_flag);
2146 * Now service re-assembly to pick up anything that has been
2147 * held on reassembly queue?
2149 sctp_deliver_reasm_check(stcb, asoc);
2150 need_reasm_check = 0;
2152 if (need_reasm_check) {
2153 /* Another one waits ? */
2154 sctp_deliver_reasm_check(stcb, asoc);
2159 int8_t sctp_map_lookup_tab[256] = {
2160 0, 1, 0, 2, 0, 1, 0, 3,
2161 0, 1, 0, 2, 0, 1, 0, 4,
2162 0, 1, 0, 2, 0, 1, 0, 3,
2163 0, 1, 0, 2, 0, 1, 0, 5,
2164 0, 1, 0, 2, 0, 1, 0, 3,
2165 0, 1, 0, 2, 0, 1, 0, 4,
2166 0, 1, 0, 2, 0, 1, 0, 3,
2167 0, 1, 0, 2, 0, 1, 0, 6,
2168 0, 1, 0, 2, 0, 1, 0, 3,
2169 0, 1, 0, 2, 0, 1, 0, 4,
2170 0, 1, 0, 2, 0, 1, 0, 3,
2171 0, 1, 0, 2, 0, 1, 0, 5,
2172 0, 1, 0, 2, 0, 1, 0, 3,
2173 0, 1, 0, 2, 0, 1, 0, 4,
2174 0, 1, 0, 2, 0, 1, 0, 3,
2175 0, 1, 0, 2, 0, 1, 0, 7,
2176 0, 1, 0, 2, 0, 1, 0, 3,
2177 0, 1, 0, 2, 0, 1, 0, 4,
2178 0, 1, 0, 2, 0, 1, 0, 3,
2179 0, 1, 0, 2, 0, 1, 0, 5,
2180 0, 1, 0, 2, 0, 1, 0, 3,
2181 0, 1, 0, 2, 0, 1, 0, 4,
2182 0, 1, 0, 2, 0, 1, 0, 3,
2183 0, 1, 0, 2, 0, 1, 0, 6,
2184 0, 1, 0, 2, 0, 1, 0, 3,
2185 0, 1, 0, 2, 0, 1, 0, 4,
2186 0, 1, 0, 2, 0, 1, 0, 3,
2187 0, 1, 0, 2, 0, 1, 0, 5,
2188 0, 1, 0, 2, 0, 1, 0, 3,
2189 0, 1, 0, 2, 0, 1, 0, 4,
2190 0, 1, 0, 2, 0, 1, 0, 3,
2191 0, 1, 0, 2, 0, 1, 0, 8
2196 sctp_slide_mapping_arrays(struct sctp_tcb *stcb)
2199 * Now we also need to check the mapping array in a couple of ways.
2200 * 1) Did we move the cum-ack point?
2202 * When you first glance at this you might think that all entries that
2203 * make up the postion of the cum-ack would be in the nr-mapping
2204 * array only.. i.e. things up to the cum-ack are always
2205 * deliverable. Thats true with one exception, when its a fragmented
2206 * message we may not deliver the data until some threshold (or all
2207 * of it) is in place. So we must OR the nr_mapping_array and
2208 * mapping_array to get a true picture of the cum-ack.
2210 struct sctp_association *asoc;
2213 int slide_from, slide_end, lgap, distance;
2214 uint32_t old_cumack, old_base, old_highest, highest_tsn;
2218 old_cumack = asoc->cumulative_tsn;
2219 old_base = asoc->mapping_array_base_tsn;
2220 old_highest = asoc->highest_tsn_inside_map;
2222 * We could probably improve this a small bit by calculating the
2223 * offset of the current cum-ack as the starting point.
2226 for (slide_from = 0; slide_from < stcb->asoc.mapping_array_size; slide_from++) {
2227 val = asoc->nr_mapping_array[slide_from] | asoc->mapping_array[slide_from];
2231 /* there is a 0 bit */
2232 at += sctp_map_lookup_tab[val];
2236 asoc->cumulative_tsn = asoc->mapping_array_base_tsn + (at - 1);
2238 if (SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_map) &&
2239 SCTP_TSN_GT(asoc->cumulative_tsn, asoc->highest_tsn_inside_nr_map)) {
2241 panic("huh, cumack 0x%x greater than high-tsn 0x%x in map",
2242 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2244 SCTP_PRINTF("huh, cumack 0x%x greater than high-tsn 0x%x in map - should panic?\n",
2245 asoc->cumulative_tsn, asoc->highest_tsn_inside_map);
2246 sctp_print_mapping_array(asoc);
2247 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2248 sctp_log_map(0, 6, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
2250 asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2251 asoc->highest_tsn_inside_nr_map = asoc->cumulative_tsn;
2254 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2255 highest_tsn = asoc->highest_tsn_inside_nr_map;
2257 highest_tsn = asoc->highest_tsn_inside_map;
2259 if ((asoc->cumulative_tsn == highest_tsn) && (at >= 8)) {
2260 /* The complete array was completed by a single FR */
2261 /* highest becomes the cum-ack */
2269 /* clear the array */
2270 clr = ((at + 7) >> 3);
2271 if (clr > asoc->mapping_array_size) {
2272 clr = asoc->mapping_array_size;
2274 memset(asoc->mapping_array, 0, clr);
2275 memset(asoc->nr_mapping_array, 0, clr);
2277 for (i = 0; i < asoc->mapping_array_size; i++) {
2278 if ((asoc->mapping_array[i]) || (asoc->nr_mapping_array[i])) {
2279 SCTP_PRINTF("Error Mapping array's not clean at clear\n");
2280 sctp_print_mapping_array(asoc);
2284 asoc->mapping_array_base_tsn = asoc->cumulative_tsn + 1;
2285 asoc->highest_tsn_inside_nr_map = asoc->highest_tsn_inside_map = asoc->cumulative_tsn;
2286 } else if (at >= 8) {
2287 /* we can slide the mapping array down */
2288 /* slide_from holds where we hit the first NON 0xff byte */
2291 * now calculate the ceiling of the move using our highest
2294 SCTP_CALC_TSN_TO_GAP(lgap, highest_tsn, asoc->mapping_array_base_tsn);
2295 slide_end = (lgap >> 3);
2296 if (slide_end < slide_from) {
2297 sctp_print_mapping_array(asoc);
2299 panic("impossible slide");
2301 SCTP_PRINTF("impossible slide lgap:%x slide_end:%x slide_from:%x? at:%d\n",
2302 lgap, slide_end, slide_from, at);
2306 if (slide_end > asoc->mapping_array_size) {
2308 panic("would overrun buffer");
2310 SCTP_PRINTF("Gak, would have overrun map end:%d slide_end:%d\n",
2311 asoc->mapping_array_size, slide_end);
2312 slide_end = asoc->mapping_array_size;
2315 distance = (slide_end - slide_from) + 1;
2316 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2317 sctp_log_map(old_base, old_cumack, old_highest,
2318 SCTP_MAP_PREPARE_SLIDE);
2319 sctp_log_map((uint32_t) slide_from, (uint32_t) slide_end,
2320 (uint32_t) lgap, SCTP_MAP_SLIDE_FROM);
2322 if (distance + slide_from > asoc->mapping_array_size ||
2325 * Here we do NOT slide forward the array so that
2326 * hopefully when more data comes in to fill it up
2327 * we will be able to slide it forward. Really I
2328 * don't think this should happen :-0
2331 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2332 sctp_log_map((uint32_t) distance, (uint32_t) slide_from,
2333 (uint32_t) asoc->mapping_array_size,
2334 SCTP_MAP_SLIDE_NONE);
2339 for (ii = 0; ii < distance; ii++) {
2340 asoc->mapping_array[ii] = asoc->mapping_array[slide_from + ii];
2341 asoc->nr_mapping_array[ii] = asoc->nr_mapping_array[slide_from + ii];
2344 for (ii = distance; ii < asoc->mapping_array_size; ii++) {
2345 asoc->mapping_array[ii] = 0;
2346 asoc->nr_mapping_array[ii] = 0;
2348 if (asoc->highest_tsn_inside_map + 1 == asoc->mapping_array_base_tsn) {
2349 asoc->highest_tsn_inside_map += (slide_from << 3);
2351 if (asoc->highest_tsn_inside_nr_map + 1 == asoc->mapping_array_base_tsn) {
2352 asoc->highest_tsn_inside_nr_map += (slide_from << 3);
2354 asoc->mapping_array_base_tsn += (slide_from << 3);
2355 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
2356 sctp_log_map(asoc->mapping_array_base_tsn,
2357 asoc->cumulative_tsn, asoc->highest_tsn_inside_map,
2358 SCTP_MAP_SLIDE_RESULT);
2365 sctp_sack_check(struct sctp_tcb *stcb, int was_a_gap)
2367 struct sctp_association *asoc;
2368 uint32_t highest_tsn;
2371 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2372 highest_tsn = asoc->highest_tsn_inside_nr_map;
2374 highest_tsn = asoc->highest_tsn_inside_map;
2378 * Now we need to see if we need to queue a sack or just start the
2379 * timer (if allowed).
2381 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2383 * Ok special case, in SHUTDOWN-SENT case. here we maker
2384 * sure SACK timer is off and instead send a SHUTDOWN and a
2387 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2388 sctp_timer_stop(SCTP_TIMER_TYPE_RECV,
2389 stcb->sctp_ep, stcb, NULL, SCTP_FROM_SCTP_INDATA + SCTP_LOC_18);
2391 sctp_send_shutdown(stcb,
2392 ((stcb->asoc.alternate) ? stcb->asoc.alternate : stcb->asoc.primary_destination));
2393 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2397 /* is there a gap now ? */
2398 is_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2401 * CMT DAC algorithm: increase number of packets received
2404 stcb->asoc.cmt_dac_pkts_rcvd++;
2406 if ((stcb->asoc.send_sack == 1) || /* We need to send a
2408 ((was_a_gap) && (is_a_gap == 0)) || /* was a gap, but no
2410 (stcb->asoc.numduptsns) || /* we have dup's */
2411 (is_a_gap) || /* is still a gap */
2412 (stcb->asoc.delayed_ack == 0) || /* Delayed sack disabled */
2413 (stcb->asoc.data_pkts_seen >= stcb->asoc.sack_freq) /* hit limit of pkts */
2416 if ((stcb->asoc.sctp_cmt_on_off > 0) &&
2417 (SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) &&
2418 (stcb->asoc.send_sack == 0) &&
2419 (stcb->asoc.numduptsns == 0) &&
2420 (stcb->asoc.delayed_ack) &&
2421 (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer))) {
2424 * CMT DAC algorithm: With CMT, delay acks
2425 * even in the face of
2427 * reordering. Therefore, if acks that do not
2428 * have to be sent because of the above
2429 * reasons, will be delayed. That is, acks
2430 * that would have been sent due to gap
2431 * reports will be delayed with DAC. Start
2432 * the delayed ack timer.
2434 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2435 stcb->sctp_ep, stcb, NULL);
2438 * Ok we must build a SACK since the timer
2439 * is pending, we got our first packet OR
2440 * there are gaps or duplicates.
2442 (void)SCTP_OS_TIMER_STOP(&stcb->asoc.dack_timer.timer);
2443 sctp_send_sack(stcb, SCTP_SO_NOT_LOCKED);
2446 if (!SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
2447 sctp_timer_start(SCTP_TIMER_TYPE_RECV,
2448 stcb->sctp_ep, stcb, NULL);
2455 sctp_service_queues(struct sctp_tcb *stcb, struct sctp_association *asoc)
2457 struct sctp_tmit_chunk *chk;
2458 uint32_t tsize, pd_point;
2461 if (asoc->fragmented_delivery_inprogress) {
2462 sctp_service_reassembly(stcb, asoc);
2464 /* Can we proceed further, i.e. the PD-API is complete */
2465 if (asoc->fragmented_delivery_inprogress) {
2470 * Now is there some other chunk I can deliver from the reassembly
2474 chk = TAILQ_FIRST(&asoc->reasmqueue);
2476 asoc->size_on_reasm_queue = 0;
2477 asoc->cnt_on_reasm_queue = 0;
2480 nxt_todel = asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered + 1;
2481 if ((chk->rec.data.rcv_flags & SCTP_DATA_FIRST_FRAG) &&
2482 ((nxt_todel == chk->rec.data.stream_seq) ||
2483 (chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED))) {
2485 * Yep the first one is here. We setup to start reception,
2486 * by backing down the TSN just in case we can't deliver.
2490 * Before we start though either all of the message should
2491 * be here or the socket buffer max or nothing on the
2492 * delivery queue and something can be delivered.
2494 if (stcb->sctp_socket) {
2495 pd_point = min(SCTP_SB_LIMIT_RCV(stcb->sctp_socket),
2496 stcb->sctp_ep->partial_delivery_point);
2498 pd_point = stcb->sctp_ep->partial_delivery_point;
2500 if (sctp_is_all_msg_on_reasm(asoc, &tsize) || (tsize >= pd_point)) {
2501 asoc->fragmented_delivery_inprogress = 1;
2502 asoc->tsn_last_delivered = chk->rec.data.TSN_seq - 1;
2503 asoc->str_of_pdapi = chk->rec.data.stream_number;
2504 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
2505 asoc->pdapi_ppid = chk->rec.data.payloadtype;
2506 asoc->fragment_flags = chk->rec.data.rcv_flags;
2507 sctp_service_reassembly(stcb, asoc);
2508 if (asoc->fragmented_delivery_inprogress == 0) {
2516 sctp_process_data(struct mbuf **mm, int iphlen, int *offset, int length,
2517 struct sockaddr *src, struct sockaddr *dst,
2518 struct sctphdr *sh, struct sctp_inpcb *inp,
2519 struct sctp_tcb *stcb, struct sctp_nets *net, uint32_t * high_tsn,
2520 uint8_t use_mflowid, uint32_t mflowid,
2521 uint32_t vrf_id, uint16_t port)
2523 struct sctp_data_chunk *ch, chunk_buf;
2524 struct sctp_association *asoc;
2525 int num_chunks = 0; /* number of control chunks processed */
2527 int chk_length, break_flag, last_chunk;
2528 int abort_flag = 0, was_a_gap;
2530 uint32_t highest_tsn;
2533 sctp_set_rwnd(stcb, &stcb->asoc);
2536 SCTP_TCB_LOCK_ASSERT(stcb);
2538 if (SCTP_TSN_GT(asoc->highest_tsn_inside_nr_map, asoc->highest_tsn_inside_map)) {
2539 highest_tsn = asoc->highest_tsn_inside_nr_map;
2541 highest_tsn = asoc->highest_tsn_inside_map;
2543 was_a_gap = SCTP_TSN_GT(highest_tsn, stcb->asoc.cumulative_tsn);
2545 * setup where we got the last DATA packet from for any SACK that
2546 * may need to go out. Don't bump the net. This is done ONLY when a
2547 * chunk is assigned.
2549 asoc->last_data_chunk_from = net;
2552 * Now before we proceed we must figure out if this is a wasted
2553 * cluster... i.e. it is a small packet sent in and yet the driver
2554 * underneath allocated a full cluster for it. If so we must copy it
2555 * to a smaller mbuf and free up the cluster mbuf. This will help
2556 * with cluster starvation. Note for __Panda__ we don't do this
2557 * since it has clusters all the way down to 64 bytes.
2559 if (SCTP_BUF_LEN(m) < (long)MLEN && SCTP_BUF_NEXT(m) == NULL) {
2560 /* we only handle mbufs that are singletons.. not chains */
2561 m = sctp_get_mbuf_for_msg(SCTP_BUF_LEN(m), 0, M_DONTWAIT, 1, MT_DATA);
2563 /* ok lets see if we can copy the data up */
2566 /* get the pointers and copy */
2567 to = mtod(m, caddr_t *);
2568 from = mtod((*mm), caddr_t *);
2569 memcpy(to, from, SCTP_BUF_LEN((*mm)));
2570 /* copy the length and free up the old */
2571 SCTP_BUF_LEN(m) = SCTP_BUF_LEN((*mm));
2573 /* sucess, back copy */
2576 /* We are in trouble in the mbuf world .. yikes */
2580 /* get pointer to the first chunk header */
2581 ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2582 sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2587 * process all DATA chunks...
2589 *high_tsn = asoc->cumulative_tsn;
2591 asoc->data_pkts_seen++;
2592 while (stop_proc == 0) {
2593 /* validate chunk length */
2594 chk_length = ntohs(ch->ch.chunk_length);
2595 if (length - *offset < chk_length) {
2596 /* all done, mutulated chunk */
2600 if (ch->ch.chunk_type == SCTP_DATA) {
2601 if ((size_t)chk_length < sizeof(struct sctp_data_chunk) + 1) {
2603 * Need to send an abort since we had a
2604 * invalid data chunk.
2606 struct mbuf *op_err;
2608 op_err = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 2 * sizeof(uint32_t)),
2609 0, M_DONTWAIT, 1, MT_DATA);
2612 struct sctp_paramhdr *ph;
2615 SCTP_BUF_LEN(op_err) = sizeof(struct sctp_paramhdr) +
2616 (2 * sizeof(uint32_t));
2617 ph = mtod(op_err, struct sctp_paramhdr *);
2619 htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
2620 ph->param_length = htons(SCTP_BUF_LEN(op_err));
2621 ippp = (uint32_t *) (ph + 1);
2622 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_19);
2624 *ippp = asoc->cumulative_tsn;
2627 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_19;
2628 sctp_abort_association(inp, stcb, m, iphlen,
2629 src, dst, sh, op_err,
2630 use_mflowid, mflowid,
2634 #ifdef SCTP_AUDITING_ENABLED
2635 sctp_audit_log(0xB1, 0);
2637 if (SCTP_SIZE32(chk_length) == (length - *offset)) {
2642 if (sctp_process_a_data_chunk(stcb, asoc, mm, *offset, ch,
2643 chk_length, net, high_tsn, &abort_flag, &break_flag,
2652 * Set because of out of rwnd space and no
2653 * drop rep space left.
2659 /* not a data chunk in the data region */
2660 switch (ch->ch.chunk_type) {
2661 case SCTP_INITIATION:
2662 case SCTP_INITIATION_ACK:
2663 case SCTP_SELECTIVE_ACK:
2664 case SCTP_NR_SELECTIVE_ACK:
2665 case SCTP_HEARTBEAT_REQUEST:
2666 case SCTP_HEARTBEAT_ACK:
2667 case SCTP_ABORT_ASSOCIATION:
2669 case SCTP_SHUTDOWN_ACK:
2670 case SCTP_OPERATION_ERROR:
2671 case SCTP_COOKIE_ECHO:
2672 case SCTP_COOKIE_ACK:
2675 case SCTP_SHUTDOWN_COMPLETE:
2676 case SCTP_AUTHENTICATION:
2677 case SCTP_ASCONF_ACK:
2678 case SCTP_PACKET_DROPPED:
2679 case SCTP_STREAM_RESET:
2680 case SCTP_FORWARD_CUM_TSN:
2683 * Now, what do we do with KNOWN chunks that
2684 * are NOT in the right place?
2686 * For now, I do nothing but ignore them. We
2687 * may later want to add sysctl stuff to
2688 * switch out and do either an ABORT() or
2689 * possibly process them.
2691 if (SCTP_BASE_SYSCTL(sctp_strict_data_order)) {
2692 struct mbuf *op_err;
2694 op_err = sctp_generate_invmanparam(SCTP_CAUSE_PROTOCOL_VIOLATION);
2695 sctp_abort_association(inp, stcb,
2699 use_mflowid, mflowid,
2705 /* unknown chunk type, use bit rules */
2706 if (ch->ch.chunk_type & 0x40) {
2707 /* Add a error report to the queue */
2709 struct sctp_paramhdr *phd;
2711 merr = sctp_get_mbuf_for_msg(sizeof(*phd), 0, M_DONTWAIT, 1, MT_DATA);
2713 phd = mtod(merr, struct sctp_paramhdr *);
2715 * We cheat and use param
2716 * type since we did not
2717 * bother to define a error
2718 * cause struct. They are
2719 * the same basic format
2720 * with different names.
2723 htons(SCTP_CAUSE_UNRECOG_CHUNK);
2725 htons(chk_length + sizeof(*phd));
2726 SCTP_BUF_LEN(merr) = sizeof(*phd);
2727 SCTP_BUF_NEXT(merr) = SCTP_M_COPYM(m, *offset, chk_length, M_DONTWAIT);
2728 if (SCTP_BUF_NEXT(merr)) {
2729 if (sctp_pad_lastmbuf(SCTP_BUF_NEXT(merr), SCTP_SIZE32(chk_length) - chk_length, NULL)) {
2732 sctp_queue_op_err(stcb, merr);
2739 if ((ch->ch.chunk_type & 0x80) == 0) {
2740 /* discard the rest of this packet */
2742 } /* else skip this bad chunk and
2745 } /* switch of chunk type */
2747 *offset += SCTP_SIZE32(chk_length);
2748 if ((*offset >= length) || stop_proc) {
2749 /* no more data left in the mbuf chain */
2753 ch = (struct sctp_data_chunk *)sctp_m_getptr(m, *offset,
2754 sizeof(struct sctp_data_chunk), (uint8_t *) & chunk_buf);
2763 * we need to report rwnd overrun drops.
2765 sctp_send_packet_dropped(stcb, net, *mm, length, iphlen, 0);
2769 * Did we get data, if so update the time for auto-close and
2770 * give peer credit for being alive.
2772 SCTP_STAT_INCR(sctps_recvpktwithdata);
2773 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
2774 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
2775 stcb->asoc.overall_error_count,
2777 SCTP_FROM_SCTP_INDATA,
2780 stcb->asoc.overall_error_count = 0;
2781 (void)SCTP_GETTIME_TIMEVAL(&stcb->asoc.time_last_rcvd);
2783 /* now service all of the reassm queue if needed */
2784 if (!(TAILQ_EMPTY(&asoc->reasmqueue)))
2785 sctp_service_queues(stcb, asoc);
2787 if (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_SENT) {
2788 /* Assure that we ack right away */
2789 stcb->asoc.send_sack = 1;
2791 /* Start a sack timer or QUEUE a SACK for sending */
2792 sctp_sack_check(stcb, was_a_gap);
2797 sctp_process_segment_range(struct sctp_tcb *stcb, struct sctp_tmit_chunk **p_tp1, uint32_t last_tsn,
2798 uint16_t frag_strt, uint16_t frag_end, int nr_sacking,
2800 uint32_t * biggest_newly_acked_tsn,
2801 uint32_t * this_sack_lowest_newack,
2804 struct sctp_tmit_chunk *tp1;
2805 unsigned int theTSN;
2806 int j, wake_him = 0, circled = 0;
2808 /* Recover the tp1 we last saw */
2811 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
2813 for (j = frag_strt; j <= frag_end; j++) {
2814 theTSN = j + last_tsn;
2816 if (tp1->rec.data.doing_fast_retransmit)
2820 * CMT: CUCv2 algorithm. For each TSN being
2821 * processed from the sent queue, track the
2822 * next expected pseudo-cumack, or
2823 * rtx_pseudo_cumack, if required. Separate
2824 * cumack trackers for first transmissions,
2825 * and retransmissions.
2827 if ((tp1->whoTo->find_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
2828 (tp1->snd_count == 1)) {
2829 tp1->whoTo->pseudo_cumack = tp1->rec.data.TSN_seq;
2830 tp1->whoTo->find_pseudo_cumack = 0;
2832 if ((tp1->whoTo->find_rtx_pseudo_cumack == 1) && (tp1->sent < SCTP_DATAGRAM_RESEND) &&
2833 (tp1->snd_count > 1)) {
2834 tp1->whoTo->rtx_pseudo_cumack = tp1->rec.data.TSN_seq;
2835 tp1->whoTo->find_rtx_pseudo_cumack = 0;
2837 if (tp1->rec.data.TSN_seq == theTSN) {
2838 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
2840 * must be held until
2843 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
2845 * If it is less than RESEND, it is
2846 * now no-longer in flight.
2847 * Higher values may already be set
2848 * via previous Gap Ack Blocks...
2849 * i.e. ACKED or RESEND.
2851 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2852 *biggest_newly_acked_tsn)) {
2853 *biggest_newly_acked_tsn = tp1->rec.data.TSN_seq;
2856 * CMT: SFR algo (and HTNA) - set
2857 * saw_newack to 1 for dest being
2858 * newly acked. update
2859 * this_sack_highest_newack if
2862 if (tp1->rec.data.chunk_was_revoked == 0)
2863 tp1->whoTo->saw_newack = 1;
2865 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2866 tp1->whoTo->this_sack_highest_newack)) {
2867 tp1->whoTo->this_sack_highest_newack =
2868 tp1->rec.data.TSN_seq;
2871 * CMT DAC algo: also update
2872 * this_sack_lowest_newack
2874 if (*this_sack_lowest_newack == 0) {
2875 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2876 sctp_log_sack(*this_sack_lowest_newack,
2878 tp1->rec.data.TSN_seq,
2881 SCTP_LOG_TSN_ACKED);
2883 *this_sack_lowest_newack = tp1->rec.data.TSN_seq;
2886 * CMT: CUCv2 algorithm. If (rtx-)pseudo-cumack for corresp
2887 * dest is being acked, then we have a new (rtx-)pseudo-cumack. Set
2888 * new_(rtx_)pseudo_cumack to TRUE so that the cwnd for this dest can be
2889 * updated. Also trigger search for the next expected (rtx-)pseudo-cumack.
2890 * Separate pseudo_cumack trackers for first transmissions and
2893 if (tp1->rec.data.TSN_seq == tp1->whoTo->pseudo_cumack) {
2894 if (tp1->rec.data.chunk_was_revoked == 0) {
2895 tp1->whoTo->new_pseudo_cumack = 1;
2897 tp1->whoTo->find_pseudo_cumack = 1;
2899 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
2900 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
2902 if (tp1->rec.data.TSN_seq == tp1->whoTo->rtx_pseudo_cumack) {
2903 if (tp1->rec.data.chunk_was_revoked == 0) {
2904 tp1->whoTo->new_pseudo_cumack = 1;
2906 tp1->whoTo->find_rtx_pseudo_cumack = 1;
2908 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
2909 sctp_log_sack(*biggest_newly_acked_tsn,
2911 tp1->rec.data.TSN_seq,
2914 SCTP_LOG_TSN_ACKED);
2916 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
2917 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_GAP,
2918 tp1->whoTo->flight_size,
2920 (uintptr_t) tp1->whoTo,
2921 tp1->rec.data.TSN_seq);
2923 sctp_flight_size_decrease(tp1);
2924 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
2925 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
2928 sctp_total_flight_decrease(stcb, tp1);
2930 tp1->whoTo->net_ack += tp1->send_size;
2931 if (tp1->snd_count < 2) {
2933 * True non-retransmited chunk
2935 tp1->whoTo->net_ack2 += tp1->send_size;
2943 sctp_calculate_rto(stcb,
2946 &tp1->sent_rcv_time,
2947 sctp_align_safe_nocopy,
2948 SCTP_RTT_FROM_DATA);
2951 if (tp1->whoTo->rto_needed == 0) {
2952 tp1->whoTo->rto_needed = 1;
2958 if (tp1->sent <= SCTP_DATAGRAM_RESEND) {
2959 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
2960 stcb->asoc.this_sack_highest_gap)) {
2961 stcb->asoc.this_sack_highest_gap =
2962 tp1->rec.data.TSN_seq;
2964 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
2965 sctp_ucount_decr(stcb->asoc.sent_queue_retran_cnt);
2966 #ifdef SCTP_AUDITING_ENABLED
2967 sctp_audit_log(0xB2,
2968 (stcb->asoc.sent_queue_retran_cnt & 0x000000ff));
2973 * All chunks NOT UNSENT fall through here and are marked
2974 * (leave PR-SCTP ones that are to skip alone though)
2976 if ((tp1->sent != SCTP_FORWARD_TSN_SKIP) &&
2977 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
2978 tp1->sent = SCTP_DATAGRAM_MARKED;
2980 if (tp1->rec.data.chunk_was_revoked) {
2981 /* deflate the cwnd */
2982 tp1->whoTo->cwnd -= tp1->book_size;
2983 tp1->rec.data.chunk_was_revoked = 0;
2985 /* NR Sack code here */
2987 (tp1->sent != SCTP_DATAGRAM_NR_ACKED)) {
2988 if (stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
2989 stcb->asoc.strmout[tp1->rec.data.stream_number].chunks_on_queues--;
2992 panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
2995 tp1->sent = SCTP_DATAGRAM_NR_ACKED;
3001 sctp_free_bufspace(stcb, &stcb->asoc, tp1, 1);
3002 sctp_m_freem(tp1->data);
3009 } /* if (tp1->TSN_seq == theTSN) */
3010 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, theTSN)) {
3013 tp1 = TAILQ_NEXT(tp1, sctp_next);
3014 if ((tp1 == NULL) && (circled == 0)) {
3016 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3018 } /* end while (tp1) */
3021 tp1 = TAILQ_FIRST(&stcb->asoc.sent_queue);
3023 /* In case the fragments were not in order we must reset */
3024 } /* end for (j = fragStart */
3026 return (wake_him); /* Return value only used for nr-sack */
3031 sctp_handle_segments(struct mbuf *m, int *offset, struct sctp_tcb *stcb, struct sctp_association *asoc,
3032 uint32_t last_tsn, uint32_t * biggest_tsn_acked,
3033 uint32_t * biggest_newly_acked_tsn, uint32_t * this_sack_lowest_newack,
3034 int num_seg, int num_nr_seg, int *rto_ok)
3036 struct sctp_gap_ack_block *frag, block;
3037 struct sctp_tmit_chunk *tp1;
3042 uint16_t frag_strt, frag_end, prev_frag_end;
3044 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3048 for (i = 0; i < (num_seg + num_nr_seg); i++) {
3051 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3053 frag = (struct sctp_gap_ack_block *)sctp_m_getptr(m, *offset,
3054 sizeof(struct sctp_gap_ack_block), (uint8_t *) & block);
3055 *offset += sizeof(block);
3057 return (chunk_freed);
3059 frag_strt = ntohs(frag->start);
3060 frag_end = ntohs(frag->end);
3062 if (frag_strt > frag_end) {
3063 /* This gap report is malformed, skip it. */
3066 if (frag_strt <= prev_frag_end) {
3067 /* This gap report is not in order, so restart. */
3068 tp1 = TAILQ_FIRST(&asoc->sent_queue);
3070 if (SCTP_TSN_GT((last_tsn + frag_end), *biggest_tsn_acked)) {
3071 *biggest_tsn_acked = last_tsn + frag_end;
3078 if (sctp_process_segment_range(stcb, &tp1, last_tsn, frag_strt, frag_end,
3079 non_revocable, &num_frs, biggest_newly_acked_tsn,
3080 this_sack_lowest_newack, rto_ok)) {
3083 prev_frag_end = frag_end;
3085 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3087 sctp_log_fr(*biggest_tsn_acked,
3088 *biggest_newly_acked_tsn,
3089 last_tsn, SCTP_FR_LOG_BIGGEST_TSNS);
3091 return (chunk_freed);
3095 sctp_check_for_revoked(struct sctp_tcb *stcb,
3096 struct sctp_association *asoc, uint32_t cumack,
3097 uint32_t biggest_tsn_acked)
3099 struct sctp_tmit_chunk *tp1;
3101 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3102 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cumack)) {
3104 * ok this guy is either ACK or MARKED. If it is
3105 * ACKED it has been previously acked but not this
3106 * time i.e. revoked. If it is MARKED it was ACK'ed
3109 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked)) {
3112 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
3113 /* it has been revoked */
3114 tp1->sent = SCTP_DATAGRAM_SENT;
3115 tp1->rec.data.chunk_was_revoked = 1;
3117 * We must add this stuff back in to assure
3118 * timers and such get started.
3120 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3121 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
3122 tp1->whoTo->flight_size,
3124 (uintptr_t) tp1->whoTo,
3125 tp1->rec.data.TSN_seq);
3127 sctp_flight_size_increase(tp1);
3128 sctp_total_flight_increase(stcb, tp1);
3130 * We inflate the cwnd to compensate for our
3131 * artificial inflation of the flight_size.
3133 tp1->whoTo->cwnd += tp1->book_size;
3134 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3135 sctp_log_sack(asoc->last_acked_seq,
3137 tp1->rec.data.TSN_seq,
3140 SCTP_LOG_TSN_REVOKED);
3142 } else if (tp1->sent == SCTP_DATAGRAM_MARKED) {
3143 /* it has been re-acked in this SACK */
3144 tp1->sent = SCTP_DATAGRAM_ACKED;
3147 if (tp1->sent == SCTP_DATAGRAM_UNSENT)
3154 sctp_strike_gap_ack_chunks(struct sctp_tcb *stcb, struct sctp_association *asoc,
3155 uint32_t biggest_tsn_acked, uint32_t biggest_tsn_newly_acked, uint32_t this_sack_lowest_newack, int accum_moved)
3157 struct sctp_tmit_chunk *tp1;
3158 int strike_flag = 0;
3160 int tot_retrans = 0;
3161 uint32_t sending_seq;
3162 struct sctp_nets *net;
3163 int num_dests_sacked = 0;
3166 * select the sending_seq, this is either the next thing ready to be
3167 * sent but not transmitted, OR, the next seq we assign.
3169 tp1 = TAILQ_FIRST(&stcb->asoc.send_queue);
3171 sending_seq = asoc->sending_seq;
3173 sending_seq = tp1->rec.data.TSN_seq;
3176 /* CMT DAC algo: finding out if SACK is a mixed SACK */
3177 if ((asoc->sctp_cmt_on_off > 0) &&
3178 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3179 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3180 if (net->saw_newack)
3184 if (stcb->asoc.peer_supports_prsctp) {
3185 (void)SCTP_GETTIME_TIMEVAL(&now);
3187 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
3189 if (tp1->no_fr_allowed) {
3190 /* this one had a timeout or something */
3193 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3194 if (tp1->sent < SCTP_DATAGRAM_RESEND)
3195 sctp_log_fr(biggest_tsn_newly_acked,
3196 tp1->rec.data.TSN_seq,
3198 SCTP_FR_LOG_CHECK_STRIKE);
3200 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, biggest_tsn_acked) ||
3201 tp1->sent == SCTP_DATAGRAM_UNSENT) {
3205 if (stcb->asoc.peer_supports_prsctp) {
3206 if ((PR_SCTP_TTL_ENABLED(tp1->flags)) && tp1->sent < SCTP_DATAGRAM_ACKED) {
3207 /* Is it expired? */
3208 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3209 /* Yes so drop it */
3210 if (tp1->data != NULL) {
3211 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3212 SCTP_SO_NOT_LOCKED);
3218 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->this_sack_highest_gap)) {
3219 /* we are beyond the tsn in the sack */
3222 if (tp1->sent >= SCTP_DATAGRAM_RESEND) {
3223 /* either a RESEND, ACKED, or MARKED */
3225 if (tp1->sent == SCTP_FORWARD_TSN_SKIP) {
3226 /* Continue strikin FWD-TSN chunks */
3227 tp1->rec.data.fwd_tsn_cnt++;
3232 * CMT : SFR algo (covers part of DAC and HTNA as well)
3234 if (tp1->whoTo && tp1->whoTo->saw_newack == 0) {
3236 * No new acks were receieved for data sent to this
3237 * dest. Therefore, according to the SFR algo for
3238 * CMT, no data sent to this dest can be marked for
3239 * FR using this SACK.
3242 } else if (tp1->whoTo && SCTP_TSN_GT(tp1->rec.data.TSN_seq,
3243 tp1->whoTo->this_sack_highest_newack)) {
3245 * CMT: New acks were receieved for data sent to
3246 * this dest. But no new acks were seen for data
3247 * sent after tp1. Therefore, according to the SFR
3248 * algo for CMT, tp1 cannot be marked for FR using
3249 * this SACK. This step covers part of the DAC algo
3250 * and the HTNA algo as well.
3255 * Here we check to see if we were have already done a FR
3256 * and if so we see if the biggest TSN we saw in the sack is
3257 * smaller than the recovery point. If so we don't strike
3258 * the tsn... otherwise we CAN strike the TSN.
3261 * @@@ JRI: Check for CMT if (accum_moved &&
3262 * asoc->fast_retran_loss_recovery && (sctp_cmt_on_off ==
3265 if (accum_moved && asoc->fast_retran_loss_recovery) {
3267 * Strike the TSN if in fast-recovery and cum-ack
3270 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3271 sctp_log_fr(biggest_tsn_newly_acked,
3272 tp1->rec.data.TSN_seq,
3274 SCTP_FR_LOG_STRIKE_CHUNK);
3276 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3279 if ((asoc->sctp_cmt_on_off > 0) &&
3280 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3282 * CMT DAC algorithm: If SACK flag is set to
3283 * 0, then lowest_newack test will not pass
3284 * because it would have been set to the
3285 * cumack earlier. If not already to be
3286 * rtx'd, If not a mixed sack and if tp1 is
3287 * not between two sacked TSNs, then mark by
3288 * one more. NOTE that we are marking by one
3289 * additional time since the SACK DAC flag
3290 * indicates that two packets have been
3291 * received after this missing TSN.
3293 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3294 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3295 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3296 sctp_log_fr(16 + num_dests_sacked,
3297 tp1->rec.data.TSN_seq,
3299 SCTP_FR_LOG_STRIKE_CHUNK);
3304 } else if ((tp1->rec.data.doing_fast_retransmit) &&
3305 (asoc->sctp_cmt_on_off == 0)) {
3307 * For those that have done a FR we must take
3308 * special consideration if we strike. I.e the
3309 * biggest_newly_acked must be higher than the
3310 * sending_seq at the time we did the FR.
3313 #ifdef SCTP_FR_TO_ALTERNATE
3315 * If FR's go to new networks, then we must only do
3316 * this for singly homed asoc's. However if the FR's
3317 * go to the same network (Armando's work) then its
3318 * ok to FR multiple times.
3326 if (SCTP_TSN_GE(biggest_tsn_newly_acked,
3327 tp1->rec.data.fast_retran_tsn)) {
3329 * Strike the TSN, since this ack is
3330 * beyond where things were when we
3333 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3334 sctp_log_fr(biggest_tsn_newly_acked,
3335 tp1->rec.data.TSN_seq,
3337 SCTP_FR_LOG_STRIKE_CHUNK);
3339 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3343 if ((asoc->sctp_cmt_on_off > 0) &&
3344 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3346 * CMT DAC algorithm: If
3347 * SACK flag is set to 0,
3348 * then lowest_newack test
3349 * will not pass because it
3350 * would have been set to
3351 * the cumack earlier. If
3352 * not already to be rtx'd,
3353 * If not a mixed sack and
3354 * if tp1 is not between two
3355 * sacked TSNs, then mark by
3356 * one more. NOTE that we
3357 * are marking by one
3358 * additional time since the
3359 * SACK DAC flag indicates
3360 * that two packets have
3361 * been received after this
3364 if ((tp1->sent < SCTP_DATAGRAM_RESEND) &&
3365 (num_dests_sacked == 1) &&
3366 SCTP_TSN_GT(this_sack_lowest_newack,
3367 tp1->rec.data.TSN_seq)) {
3368 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3369 sctp_log_fr(32 + num_dests_sacked,
3370 tp1->rec.data.TSN_seq,
3372 SCTP_FR_LOG_STRIKE_CHUNK);
3374 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3382 * JRI: TODO: remove code for HTNA algo. CMT's SFR
3385 } else if (SCTP_TSN_GT(tp1->rec.data.TSN_seq,
3386 biggest_tsn_newly_acked)) {
3388 * We don't strike these: This is the HTNA
3389 * algorithm i.e. we don't strike If our TSN is
3390 * larger than the Highest TSN Newly Acked.
3394 /* Strike the TSN */
3395 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3396 sctp_log_fr(biggest_tsn_newly_acked,
3397 tp1->rec.data.TSN_seq,
3399 SCTP_FR_LOG_STRIKE_CHUNK);
3401 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3404 if ((asoc->sctp_cmt_on_off > 0) &&
3405 SCTP_BASE_SYSCTL(sctp_cmt_use_dac)) {
3407 * CMT DAC algorithm: If SACK flag is set to
3408 * 0, then lowest_newack test will not pass
3409 * because it would have been set to the
3410 * cumack earlier. If not already to be
3411 * rtx'd, If not a mixed sack and if tp1 is
3412 * not between two sacked TSNs, then mark by
3413 * one more. NOTE that we are marking by one
3414 * additional time since the SACK DAC flag
3415 * indicates that two packets have been
3416 * received after this missing TSN.
3418 if ((tp1->sent < SCTP_DATAGRAM_RESEND) && (num_dests_sacked == 1) &&
3419 SCTP_TSN_GT(this_sack_lowest_newack, tp1->rec.data.TSN_seq)) {
3420 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3421 sctp_log_fr(48 + num_dests_sacked,
3422 tp1->rec.data.TSN_seq,
3424 SCTP_FR_LOG_STRIKE_CHUNK);
3430 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3431 struct sctp_nets *alt;
3433 /* fix counts and things */
3434 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3435 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND,
3436 (tp1->whoTo ? (tp1->whoTo->flight_size) : 0),
3438 (uintptr_t) tp1->whoTo,
3439 tp1->rec.data.TSN_seq);
3442 tp1->whoTo->net_ack++;
3443 sctp_flight_size_decrease(tp1);
3444 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3445 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3449 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
3450 sctp_log_rwnd(SCTP_INCREASE_PEER_RWND,
3451 asoc->peers_rwnd, tp1->send_size, SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3453 /* add back to the rwnd */
3454 asoc->peers_rwnd += (tp1->send_size + SCTP_BASE_SYSCTL(sctp_peer_chunk_oh));
3456 /* remove from the total flight */
3457 sctp_total_flight_decrease(stcb, tp1);
3459 if ((stcb->asoc.peer_supports_prsctp) &&
3460 (PR_SCTP_RTX_ENABLED(tp1->flags))) {
3462 * Has it been retransmitted tv_sec times? -
3463 * we store the retran count there.
3465 if (tp1->snd_count > tp1->rec.data.timetodrop.tv_sec) {
3466 /* Yes, so drop it */
3467 if (tp1->data != NULL) {
3468 (void)sctp_release_pr_sctp_chunk(stcb, tp1, 1,
3469 SCTP_SO_NOT_LOCKED);
3471 /* Make sure to flag we had a FR */
3472 tp1->whoTo->net_ack++;
3477 * SCTP_PRINTF("OK, we are now ready to FR this
3480 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
3481 sctp_log_fr(tp1->rec.data.TSN_seq, tp1->snd_count,
3485 /* This is a subsequent FR */
3486 SCTP_STAT_INCR(sctps_sendmultfastretrans);
3488 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
3489 if (asoc->sctp_cmt_on_off > 0) {
3491 * CMT: Using RTX_SSTHRESH policy for CMT.
3492 * If CMT is being used, then pick dest with
3493 * largest ssthresh for any retransmission.
3495 tp1->no_fr_allowed = 1;
3497 /* sa_ignore NO_NULL_CHK */
3498 if (asoc->sctp_cmt_pf > 0) {
3500 * JRS 5/18/07 - If CMT PF is on,
3501 * use the PF version of
3504 alt = sctp_find_alternate_net(stcb, alt, 2);
3507 * JRS 5/18/07 - If only CMT is on,
3508 * use the CMT version of
3511 /* sa_ignore NO_NULL_CHK */
3512 alt = sctp_find_alternate_net(stcb, alt, 1);
3518 * CUCv2: If a different dest is picked for
3519 * the retransmission, then new
3520 * (rtx-)pseudo_cumack needs to be tracked
3521 * for orig dest. Let CUCv2 track new (rtx-)
3522 * pseudo-cumack always.
3525 tp1->whoTo->find_pseudo_cumack = 1;
3526 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3528 } else {/* CMT is OFF */
3530 #ifdef SCTP_FR_TO_ALTERNATE
3531 /* Can we find an alternate? */
3532 alt = sctp_find_alternate_net(stcb, tp1->whoTo, 0);
3535 * default behavior is to NOT retransmit
3536 * FR's to an alternate. Armando Caro's
3537 * paper details why.
3543 tp1->rec.data.doing_fast_retransmit = 1;
3545 /* mark the sending seq for possible subsequent FR's */
3547 * SCTP_PRINTF("Marking TSN for FR new value %x\n",
3548 * (uint32_t)tpi->rec.data.TSN_seq);
3550 if (TAILQ_EMPTY(&asoc->send_queue)) {
3552 * If the queue of send is empty then its
3553 * the next sequence number that will be
3554 * assigned so we subtract one from this to
3555 * get the one we last sent.
3557 tp1->rec.data.fast_retran_tsn = sending_seq;
3560 * If there are chunks on the send queue
3561 * (unsent data that has made it from the
3562 * stream queues but not out the door, we
3563 * take the first one (which will have the
3564 * lowest TSN) and subtract one to get the
3567 struct sctp_tmit_chunk *ttt;
3569 ttt = TAILQ_FIRST(&asoc->send_queue);
3570 tp1->rec.data.fast_retran_tsn =
3571 ttt->rec.data.TSN_seq;
3576 * this guy had a RTO calculation pending on
3579 if ((tp1->whoTo != NULL) &&
3580 (tp1->whoTo->rto_needed == 0)) {
3581 tp1->whoTo->rto_needed = 1;
3585 if (alt != tp1->whoTo) {
3586 /* yes, there is an alternate. */
3587 sctp_free_remote_addr(tp1->whoTo);
3588 /* sa_ignore FREED_MEMORY */
3590 atomic_add_int(&alt->ref_count, 1);
3596 struct sctp_tmit_chunk *
3597 sctp_try_advance_peer_ack_point(struct sctp_tcb *stcb,
3598 struct sctp_association *asoc)
3600 struct sctp_tmit_chunk *tp1, *tp2, *a_adv = NULL;
3604 if (asoc->peer_supports_prsctp == 0) {
3607 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3608 if (tp1->sent != SCTP_FORWARD_TSN_SKIP &&
3609 tp1->sent != SCTP_DATAGRAM_RESEND &&
3610 tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3611 /* no chance to advance, out of here */
3614 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
3615 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3616 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3617 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
3618 asoc->advanced_peer_ack_point,
3619 tp1->rec.data.TSN_seq, 0, 0);
3622 if (!PR_SCTP_ENABLED(tp1->flags)) {
3624 * We can't fwd-tsn past any that are reliable aka
3625 * retransmitted until the asoc fails.
3630 (void)SCTP_GETTIME_TIMEVAL(&now);
3634 * now we got a chunk which is marked for another
3635 * retransmission to a PR-stream but has run out its chances
3636 * already maybe OR has been marked to skip now. Can we skip
3637 * it if its a resend?
3639 if (tp1->sent == SCTP_DATAGRAM_RESEND &&
3640 (PR_SCTP_TTL_ENABLED(tp1->flags))) {
3642 * Now is this one marked for resend and its time is
3645 if (timevalcmp(&now, &tp1->rec.data.timetodrop, >)) {
3646 /* Yes so drop it */
3648 (void)sctp_release_pr_sctp_chunk(stcb, tp1,
3649 1, SCTP_SO_NOT_LOCKED);
3653 * No, we are done when hit one for resend
3654 * whos time as not expired.
3660 * Ok now if this chunk is marked to drop it we can clean up
3661 * the chunk, advance our peer ack point and we can check
3664 if ((tp1->sent == SCTP_FORWARD_TSN_SKIP) ||
3665 (tp1->sent == SCTP_DATAGRAM_NR_ACKED)) {
3666 /* advance PeerAckPoint goes forward */
3667 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, asoc->advanced_peer_ack_point)) {
3668 asoc->advanced_peer_ack_point = tp1->rec.data.TSN_seq;
3670 } else if (tp1->rec.data.TSN_seq == asoc->advanced_peer_ack_point) {
3671 /* No update but we do save the chk */
3676 * If it is still in RESEND we can advance no
3686 sctp_fs_audit(struct sctp_association *asoc)
3688 struct sctp_tmit_chunk *chk;
3689 int inflight = 0, resend = 0, inbetween = 0, acked = 0, above = 0;
3690 int entry_flight, entry_cnt, ret;
3692 entry_flight = asoc->total_flight;
3693 entry_cnt = asoc->total_flight_count;
3696 if (asoc->pr_sctp_cnt >= asoc->sent_queue_cnt)
3699 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
3700 if (chk->sent < SCTP_DATAGRAM_RESEND) {
3701 SCTP_PRINTF("Chk TSN:%u size:%d inflight cnt:%d\n",
3702 chk->rec.data.TSN_seq,
3706 } else if (chk->sent == SCTP_DATAGRAM_RESEND) {
3708 } else if (chk->sent < SCTP_DATAGRAM_ACKED) {
3710 } else if (chk->sent > SCTP_DATAGRAM_ACKED) {
3717 if ((inflight > 0) || (inbetween > 0)) {
3719 panic("Flight size-express incorrect? \n");
3721 SCTP_PRINTF("asoc->total_flight:%d cnt:%d\n",
3722 entry_flight, entry_cnt);
3724 SCTP_PRINTF("Flight size-express incorrect F:%d I:%d R:%d Ab:%d ACK:%d\n",
3725 inflight, inbetween, resend, above, acked);
3734 sctp_window_probe_recovery(struct sctp_tcb *stcb,
3735 struct sctp_association *asoc,
3736 struct sctp_tmit_chunk *tp1)
3738 tp1->window_probe = 0;
3739 if ((tp1->sent >= SCTP_DATAGRAM_ACKED) || (tp1->data == NULL)) {
3740 /* TSN's skipped we do NOT move back. */
3741 sctp_misc_ints(SCTP_FLIGHT_LOG_DWN_WP_FWD,
3742 tp1->whoTo->flight_size,
3744 (uintptr_t) tp1->whoTo,
3745 tp1->rec.data.TSN_seq);
3748 /* First setup this by shrinking flight */
3749 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3750 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3753 sctp_flight_size_decrease(tp1);
3754 sctp_total_flight_decrease(stcb, tp1);
3755 /* Now mark for resend */
3756 tp1->sent = SCTP_DATAGRAM_RESEND;
3757 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
3759 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3760 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_WP,
3761 tp1->whoTo->flight_size,
3763 (uintptr_t) tp1->whoTo,
3764 tp1->rec.data.TSN_seq);
3769 sctp_express_handle_sack(struct sctp_tcb *stcb, uint32_t cumack,
3770 uint32_t rwnd, int *abort_now, int ecne_seen)
3772 struct sctp_nets *net;
3773 struct sctp_association *asoc;
3774 struct sctp_tmit_chunk *tp1, *tp2;
3776 int win_probe_recovery = 0;
3777 int win_probe_recovered = 0;
3778 int j, done_once = 0;
3781 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
3782 sctp_misc_ints(SCTP_SACK_LOG_EXPRESS, cumack,
3783 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
3785 SCTP_TCB_LOCK_ASSERT(stcb);
3786 #ifdef SCTP_ASOCLOG_OF_TSNS
3787 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cumack;
3788 stcb->asoc.cumack_log_at++;
3789 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
3790 stcb->asoc.cumack_log_at = 0;
3794 old_rwnd = asoc->peers_rwnd;
3795 if (SCTP_TSN_GT(asoc->last_acked_seq, cumack)) {
3798 } else if (asoc->last_acked_seq == cumack) {
3799 /* Window update sack */
3800 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
3801 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
3802 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
3803 /* SWS sender side engages */
3804 asoc->peers_rwnd = 0;
3806 if (asoc->peers_rwnd > old_rwnd) {
3811 /* First setup for CC stuff */
3812 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
3813 if (SCTP_TSN_GT(cumack, net->cwr_window_tsn)) {
3814 /* Drag along the window_tsn for cwr's */
3815 net->cwr_window_tsn = cumack;
3817 net->prev_cwnd = net->cwnd;
3822 * CMT: Reset CUC and Fast recovery algo variables before
3825 net->new_pseudo_cumack = 0;
3826 net->will_exit_fast_recovery = 0;
3827 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
3828 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
3831 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
3834 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
3835 tp1 = TAILQ_LAST(&asoc->sent_queue,
3836 sctpchunk_listhead);
3837 send_s = tp1->rec.data.TSN_seq + 1;
3839 send_s = asoc->sending_seq;
3841 if (SCTP_TSN_GE(cumack, send_s)) {
3847 panic("Impossible sack 1");
3852 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
3853 0, M_DONTWAIT, 1, MT_DATA);
3855 struct sctp_paramhdr *ph;
3858 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
3860 ph = mtod(oper, struct sctp_paramhdr *);
3861 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
3862 ph->param_length = htons(SCTP_BUF_LEN(oper));
3863 ippp = (uint32_t *) (ph + 1);
3864 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
3866 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
3867 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
3872 asoc->this_sack_highest_gap = cumack;
3873 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
3874 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
3875 stcb->asoc.overall_error_count,
3877 SCTP_FROM_SCTP_INDATA,
3880 stcb->asoc.overall_error_count = 0;
3881 if (SCTP_TSN_GT(cumack, asoc->last_acked_seq)) {
3882 /* process the new consecutive TSN first */
3883 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
3884 if (SCTP_TSN_GE(cumack, tp1->rec.data.TSN_seq)) {
3885 if (tp1->sent == SCTP_DATAGRAM_UNSENT) {
3886 SCTP_PRINTF("Warning, an unsent is now acked?\n");
3888 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
3890 * If it is less than ACKED, it is
3891 * now no-longer in flight. Higher
3892 * values may occur during marking
3894 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
3895 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
3896 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
3897 tp1->whoTo->flight_size,
3899 (uintptr_t) tp1->whoTo,
3900 tp1->rec.data.TSN_seq);
3902 sctp_flight_size_decrease(tp1);
3903 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
3904 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
3907 /* sa_ignore NO_NULL_CHK */
3908 sctp_total_flight_decrease(stcb, tp1);
3910 tp1->whoTo->net_ack += tp1->send_size;
3911 if (tp1->snd_count < 2) {
3913 * True non-retransmited
3916 tp1->whoTo->net_ack2 +=
3919 /* update RTO too? */
3928 sctp_calculate_rto(stcb,
3930 &tp1->sent_rcv_time,
3931 sctp_align_safe_nocopy,
3932 SCTP_RTT_FROM_DATA);
3935 if (tp1->whoTo->rto_needed == 0) {
3936 tp1->whoTo->rto_needed = 1;
3942 * CMT: CUCv2 algorithm. From the
3943 * cumack'd TSNs, for each TSN being
3944 * acked for the first time, set the
3945 * following variables for the
3946 * corresp destination.
3947 * new_pseudo_cumack will trigger a
3949 * find_(rtx_)pseudo_cumack will
3950 * trigger search for the next
3951 * expected (rtx-)pseudo-cumack.
3953 tp1->whoTo->new_pseudo_cumack = 1;
3954 tp1->whoTo->find_pseudo_cumack = 1;
3955 tp1->whoTo->find_rtx_pseudo_cumack = 1;
3957 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
3958 /* sa_ignore NO_NULL_CHK */
3959 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
3962 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
3963 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
3965 if (tp1->rec.data.chunk_was_revoked) {
3966 /* deflate the cwnd */
3967 tp1->whoTo->cwnd -= tp1->book_size;
3968 tp1->rec.data.chunk_was_revoked = 0;
3970 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
3971 if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
3972 asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--;
3975 panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
3979 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
3981 /* sa_ignore NO_NULL_CHK */
3982 sctp_free_bufspace(stcb, asoc, tp1, 1);
3983 sctp_m_freem(tp1->data);
3986 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
3987 sctp_log_sack(asoc->last_acked_seq,
3989 tp1->rec.data.TSN_seq,
3992 SCTP_LOG_FREE_SENT);
3994 asoc->sent_queue_cnt--;
3995 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4002 /* sa_ignore NO_NULL_CHK */
4003 if (stcb->sctp_socket) {
4004 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4008 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4009 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4010 /* sa_ignore NO_NULL_CHK */
4011 sctp_wakeup_log(stcb, 1, SCTP_WAKESND_FROM_SACK);
4013 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4014 so = SCTP_INP_SO(stcb->sctp_ep);
4015 atomic_add_int(&stcb->asoc.refcnt, 1);
4016 SCTP_TCB_UNLOCK(stcb);
4017 SCTP_SOCKET_LOCK(so, 1);
4018 SCTP_TCB_LOCK(stcb);
4019 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4020 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4021 /* assoc was freed while we were unlocked */
4022 SCTP_SOCKET_UNLOCK(so, 1);
4026 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4027 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4028 SCTP_SOCKET_UNLOCK(so, 1);
4031 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4032 sctp_wakeup_log(stcb, 1, SCTP_NOWAKE_FROM_SACK);
4036 /* JRS - Use the congestion control given in the CC module */
4037 if ((asoc->last_acked_seq != cumack) && (ecne_seen == 0)) {
4038 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4039 if (net->net_ack2 > 0) {
4041 * Karn's rule applies to clearing error
4042 * count, this is optional.
4044 net->error_count = 0;
4045 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4046 /* addr came good */
4047 net->dest_state |= SCTP_ADDR_REACHABLE;
4048 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4049 0, (void *)net, SCTP_SO_NOT_LOCKED);
4051 if (net == stcb->asoc.primary_destination) {
4052 if (stcb->asoc.alternate) {
4054 * release the alternate,
4057 sctp_free_remote_addr(stcb->asoc.alternate);
4058 stcb->asoc.alternate = NULL;
4061 if (net->dest_state & SCTP_ADDR_PF) {
4062 net->dest_state &= ~SCTP_ADDR_PF;
4063 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_3);
4064 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4065 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4066 /* Done with this net */
4069 /* restore any doubled timers */
4070 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4071 if (net->RTO < stcb->asoc.minrto) {
4072 net->RTO = stcb->asoc.minrto;
4074 if (net->RTO > stcb->asoc.maxrto) {
4075 net->RTO = stcb->asoc.maxrto;
4079 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, 1, 0, 0);
4081 asoc->last_acked_seq = cumack;
4083 if (TAILQ_EMPTY(&asoc->sent_queue)) {
4084 /* nothing left in-flight */
4085 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4086 net->flight_size = 0;
4087 net->partial_bytes_acked = 0;
4089 asoc->total_flight = 0;
4090 asoc->total_flight_count = 0;
4093 asoc->peers_rwnd = sctp_sbspace_sub(rwnd,
4094 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
4095 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4096 /* SWS sender side engages */
4097 asoc->peers_rwnd = 0;
4099 if (asoc->peers_rwnd > old_rwnd) {
4100 win_probe_recovery = 1;
4102 /* Now assure a timer where data is queued at */
4105 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4108 if (win_probe_recovery && (net->window_probe)) {
4109 win_probe_recovered = 1;
4111 * Find first chunk that was used with window probe
4112 * and clear the sent
4114 /* sa_ignore FREED_MEMORY */
4115 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4116 if (tp1->window_probe) {
4117 /* move back to data send queue */
4118 sctp_window_probe_recovery(stcb, asoc, tp1);
4123 if (net->RTO == 0) {
4124 to_ticks = MSEC_TO_TICKS(stcb->asoc.initial_rto);
4126 to_ticks = MSEC_TO_TICKS(net->RTO);
4128 if (net->flight_size) {
4130 (void)SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4131 sctp_timeout_handler, &net->rxt_timer);
4132 if (net->window_probe) {
4133 net->window_probe = 0;
4136 if (net->window_probe) {
4138 * In window probes we must assure a timer
4139 * is still running there
4141 net->window_probe = 0;
4142 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4143 SCTP_OS_TIMER_START(&net->rxt_timer.timer, to_ticks,
4144 sctp_timeout_handler, &net->rxt_timer);
4146 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
4147 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4149 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
4154 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
4155 (asoc->sent_queue_retran_cnt == 0) &&
4156 (win_probe_recovered == 0) &&
4159 * huh, this should not happen unless all packets are
4160 * PR-SCTP and marked to skip of course.
4162 if (sctp_fs_audit(asoc)) {
4163 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4164 net->flight_size = 0;
4166 asoc->total_flight = 0;
4167 asoc->total_flight_count = 0;
4168 asoc->sent_queue_retran_cnt = 0;
4169 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4170 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4171 sctp_flight_size_increase(tp1);
4172 sctp_total_flight_increase(stcb, tp1);
4173 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4174 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
4181 /**********************************/
4182 /* Now what about shutdown issues */
4183 /**********************************/
4184 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4185 /* nothing left on sendqueue.. consider done */
4187 if ((asoc->stream_queue_cnt == 1) &&
4188 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4189 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4190 (asoc->locked_on_sending)
4192 struct sctp_stream_queue_pending *sp;
4195 * I may be in a state where we got all across.. but
4196 * cannot write more due to a shutdown... we abort
4197 * since the user did not indicate EOR in this case.
4198 * The sp will be cleaned during free of the asoc.
4200 sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4202 if ((sp) && (sp->length == 0)) {
4203 /* Let cleanup code purge it */
4204 if (sp->msg_is_complete) {
4205 asoc->stream_queue_cnt--;
4207 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4208 asoc->locked_on_sending = NULL;
4209 asoc->stream_queue_cnt--;
4213 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4214 (asoc->stream_queue_cnt == 0)) {
4215 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4216 /* Need to abort here */
4222 oper = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
4223 0, M_DONTWAIT, 1, MT_DATA);
4225 struct sctp_paramhdr *ph;
4227 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr);
4228 ph = mtod(oper, struct sctp_paramhdr *);
4229 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
4230 ph->param_length = htons(SCTP_BUF_LEN(oper));
4232 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_24;
4233 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
4235 struct sctp_nets *netp;
4237 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4238 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4239 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4241 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4242 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4243 sctp_stop_timers_for_shutdown(stcb);
4244 if (asoc->alternate) {
4245 netp = asoc->alternate;
4247 netp = asoc->primary_destination;
4249 sctp_send_shutdown(stcb, netp);
4250 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4251 stcb->sctp_ep, stcb, netp);
4252 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4253 stcb->sctp_ep, stcb, netp);
4255 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4256 (asoc->stream_queue_cnt == 0)) {
4257 struct sctp_nets *netp;
4259 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4262 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4263 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4264 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4265 sctp_stop_timers_for_shutdown(stcb);
4266 if (asoc->alternate) {
4267 netp = asoc->alternate;
4269 netp = asoc->primary_destination;
4271 sctp_send_shutdown_ack(stcb, netp);
4272 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
4273 stcb->sctp_ep, stcb, netp);
4276 /*********************************************/
4277 /* Here we perform PR-SCTP procedures */
4279 /*********************************************/
4280 /* C1. update advancedPeerAckPoint */
4281 if (SCTP_TSN_GT(cumack, asoc->advanced_peer_ack_point)) {
4282 asoc->advanced_peer_ack_point = cumack;
4284 /* PR-Sctp issues need to be addressed too */
4285 if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
4286 struct sctp_tmit_chunk *lchk;
4287 uint32_t old_adv_peer_ack_point;
4289 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
4290 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
4291 /* C3. See if we need to send a Fwd-TSN */
4292 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cumack)) {
4294 * ISSUE with ECN, see FWD-TSN processing.
4296 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
4297 send_forward_tsn(stcb, asoc);
4299 /* try to FR fwd-tsn's that get lost too */
4300 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
4301 send_forward_tsn(stcb, asoc);
4306 /* Assure a timer is up */
4307 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
4308 stcb->sctp_ep, stcb, lchk->whoTo);
4311 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
4312 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
4314 stcb->asoc.peers_rwnd,
4315 stcb->asoc.total_flight,
4316 stcb->asoc.total_output_queue_size);
4321 sctp_handle_sack(struct mbuf *m, int offset_seg, int offset_dup,
4322 struct sctp_tcb *stcb,
4323 uint16_t num_seg, uint16_t num_nr_seg, uint16_t num_dup,
4324 int *abort_now, uint8_t flags,
4325 uint32_t cum_ack, uint32_t rwnd, int ecne_seen)
4327 struct sctp_association *asoc;
4328 struct sctp_tmit_chunk *tp1, *tp2;
4329 uint32_t last_tsn, biggest_tsn_acked, biggest_tsn_newly_acked, this_sack_lowest_newack;
4330 uint16_t wake_him = 0;
4331 uint32_t send_s = 0;
4333 int accum_moved = 0;
4334 int will_exit_fast_recovery = 0;
4335 uint32_t a_rwnd, old_rwnd;
4336 int win_probe_recovery = 0;
4337 int win_probe_recovered = 0;
4338 struct sctp_nets *net = NULL;
4341 uint8_t reneged_all = 0;
4342 uint8_t cmt_dac_flag;
4345 * we take any chance we can to service our queues since we cannot
4346 * get awoken when the socket is read from :<
4349 * Now perform the actual SACK handling: 1) Verify that it is not an
4350 * old sack, if so discard. 2) If there is nothing left in the send
4351 * queue (cum-ack is equal to last acked) then you have a duplicate
4352 * too, update any rwnd change and verify no timers are running.
4353 * then return. 3) Process any new consequtive data i.e. cum-ack
4354 * moved process these first and note that it moved. 4) Process any
4355 * sack blocks. 5) Drop any acked from the queue. 6) Check for any
4356 * revoked blocks and mark. 7) Update the cwnd. 8) Nothing left,
4357 * sync up flightsizes and things, stop all timers and also check
4358 * for shutdown_pending state. If so then go ahead and send off the
4359 * shutdown. If in shutdown recv, send off the shutdown-ack and
4360 * start that timer, Ret. 9) Strike any non-acked things and do FR
4361 * procedure if needed being sure to set the FR flag. 10) Do pr-sctp
4362 * procedures. 11) Apply any FR penalties. 12) Assure we will SACK
4363 * if in shutdown_recv state.
4365 SCTP_TCB_LOCK_ASSERT(stcb);
4367 this_sack_lowest_newack = 0;
4368 SCTP_STAT_INCR(sctps_slowpath_sack);
4370 cmt_dac_flag = flags & SCTP_SACK_CMT_DAC;
4371 #ifdef SCTP_ASOCLOG_OF_TSNS
4372 stcb->asoc.cumack_log[stcb->asoc.cumack_log_at] = cum_ack;
4373 stcb->asoc.cumack_log_at++;
4374 if (stcb->asoc.cumack_log_at > SCTP_TSN_LOG_SIZE) {
4375 stcb->asoc.cumack_log_at = 0;
4380 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_SACK_ARRIVALS_ENABLE) {
4381 sctp_misc_ints(SCTP_SACK_LOG_NORMAL, cum_ack,
4382 rwnd, stcb->asoc.last_acked_seq, stcb->asoc.peers_rwnd);
4384 old_rwnd = stcb->asoc.peers_rwnd;
4385 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
4386 sctp_misc_ints(SCTP_THRESHOLD_CLEAR,
4387 stcb->asoc.overall_error_count,
4389 SCTP_FROM_SCTP_INDATA,
4392 stcb->asoc.overall_error_count = 0;
4394 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4395 sctp_log_sack(asoc->last_acked_seq,
4402 if ((num_dup) && (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE)) {
4404 uint32_t *dupdata, dblock;
4406 for (i = 0; i < num_dup; i++) {
4407 dupdata = (uint32_t *) sctp_m_getptr(m, offset_dup + i * sizeof(uint32_t),
4408 sizeof(uint32_t), (uint8_t *) & dblock);
4409 if (dupdata == NULL) {
4412 sctp_log_fr(*dupdata, 0, 0, SCTP_FR_DUPED);
4415 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4417 if (!TAILQ_EMPTY(&asoc->sent_queue)) {
4418 tp1 = TAILQ_LAST(&asoc->sent_queue,
4419 sctpchunk_listhead);
4420 send_s = tp1->rec.data.TSN_seq + 1;
4423 send_s = asoc->sending_seq;
4425 if (SCTP_TSN_GE(cum_ack, send_s)) {
4429 * no way, we have not even sent this TSN out yet.
4430 * Peer is hopelessly messed up with us.
4432 SCTP_PRINTF("NEW cum_ack:%x send_s:%x is smaller or equal\n",
4435 SCTP_PRINTF("Got send_s from tsn:%x + 1 of tp1:%p\n",
4436 tp1->rec.data.TSN_seq, (void *)tp1);
4441 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
4442 0, M_DONTWAIT, 1, MT_DATA);
4444 struct sctp_paramhdr *ph;
4447 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
4449 ph = mtod(oper, struct sctp_paramhdr *);
4450 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
4451 ph->param_length = htons(SCTP_BUF_LEN(oper));
4452 ippp = (uint32_t *) (ph + 1);
4453 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_25);
4455 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_25;
4456 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
4460 /**********************/
4461 /* 1) check the range */
4462 /**********************/
4463 if (SCTP_TSN_GT(asoc->last_acked_seq, last_tsn)) {
4464 /* acking something behind */
4467 /* update the Rwnd of the peer */
4468 if (TAILQ_EMPTY(&asoc->sent_queue) &&
4469 TAILQ_EMPTY(&asoc->send_queue) &&
4470 (asoc->stream_queue_cnt == 0)) {
4471 /* nothing left on send/sent and strmq */
4472 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4473 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4474 asoc->peers_rwnd, 0, 0, a_rwnd);
4476 asoc->peers_rwnd = a_rwnd;
4477 if (asoc->sent_queue_retran_cnt) {
4478 asoc->sent_queue_retran_cnt = 0;
4480 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4481 /* SWS sender side engages */
4482 asoc->peers_rwnd = 0;
4484 /* stop any timers */
4485 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4486 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4487 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_26);
4488 net->partial_bytes_acked = 0;
4489 net->flight_size = 0;
4491 asoc->total_flight = 0;
4492 asoc->total_flight_count = 0;
4496 * We init netAckSz and netAckSz2 to 0. These are used to track 2
4497 * things. The total byte count acked is tracked in netAckSz AND
4498 * netAck2 is used to track the total bytes acked that are un-
4499 * amibguious and were never retransmitted. We track these on a per
4500 * destination address basis.
4502 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4503 if (SCTP_TSN_GT(cum_ack, net->cwr_window_tsn)) {
4504 /* Drag along the window_tsn for cwr's */
4505 net->cwr_window_tsn = cum_ack;
4507 net->prev_cwnd = net->cwnd;
4512 * CMT: Reset CUC and Fast recovery algo variables before
4515 net->new_pseudo_cumack = 0;
4516 net->will_exit_fast_recovery = 0;
4517 if (stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) {
4518 (*stcb->asoc.cc_functions.sctp_cwnd_prepare_net_for_sack) (stcb, net);
4521 /* process the new consecutive TSN first */
4522 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4523 if (SCTP_TSN_GE(last_tsn, tp1->rec.data.TSN_seq)) {
4524 if (tp1->sent != SCTP_DATAGRAM_UNSENT) {
4526 if (tp1->sent < SCTP_DATAGRAM_ACKED) {
4528 * If it is less than ACKED, it is
4529 * now no-longer in flight. Higher
4530 * values may occur during marking
4532 if ((tp1->whoTo->dest_state &
4533 SCTP_ADDR_UNCONFIRMED) &&
4534 (tp1->snd_count < 2)) {
4536 * If there was no retran
4537 * and the address is
4538 * un-confirmed and we sent
4540 * sacked.. its confirmed,
4543 tp1->whoTo->dest_state &=
4544 ~SCTP_ADDR_UNCONFIRMED;
4546 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
4547 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4548 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_CA,
4549 tp1->whoTo->flight_size,
4551 (uintptr_t) tp1->whoTo,
4552 tp1->rec.data.TSN_seq);
4554 sctp_flight_size_decrease(tp1);
4555 sctp_total_flight_decrease(stcb, tp1);
4556 if (stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) {
4557 (*stcb->asoc.cc_functions.sctp_cwnd_update_tsn_acknowledged) (tp1->whoTo,
4561 tp1->whoTo->net_ack += tp1->send_size;
4563 /* CMT SFR and DAC algos */
4564 this_sack_lowest_newack = tp1->rec.data.TSN_seq;
4565 tp1->whoTo->saw_newack = 1;
4567 if (tp1->snd_count < 2) {
4569 * True non-retransmited
4572 tp1->whoTo->net_ack2 +=
4575 /* update RTO too? */
4579 sctp_calculate_rto(stcb,
4581 &tp1->sent_rcv_time,
4582 sctp_align_safe_nocopy,
4583 SCTP_RTT_FROM_DATA);
4586 if (tp1->whoTo->rto_needed == 0) {
4587 tp1->whoTo->rto_needed = 1;
4593 * CMT: CUCv2 algorithm. From the
4594 * cumack'd TSNs, for each TSN being
4595 * acked for the first time, set the
4596 * following variables for the
4597 * corresp destination.
4598 * new_pseudo_cumack will trigger a
4600 * find_(rtx_)pseudo_cumack will
4601 * trigger search for the next
4602 * expected (rtx-)pseudo-cumack.
4604 tp1->whoTo->new_pseudo_cumack = 1;
4605 tp1->whoTo->find_pseudo_cumack = 1;
4606 tp1->whoTo->find_rtx_pseudo_cumack = 1;
4609 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4610 sctp_log_sack(asoc->last_acked_seq,
4612 tp1->rec.data.TSN_seq,
4615 SCTP_LOG_TSN_ACKED);
4617 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
4618 sctp_log_cwnd(stcb, tp1->whoTo, tp1->rec.data.TSN_seq, SCTP_CWND_LOG_FROM_SACK);
4621 if (tp1->sent == SCTP_DATAGRAM_RESEND) {
4622 sctp_ucount_decr(asoc->sent_queue_retran_cnt);
4623 #ifdef SCTP_AUDITING_ENABLED
4624 sctp_audit_log(0xB3,
4625 (asoc->sent_queue_retran_cnt & 0x000000ff));
4628 if (tp1->rec.data.chunk_was_revoked) {
4629 /* deflate the cwnd */
4630 tp1->whoTo->cwnd -= tp1->book_size;
4631 tp1->rec.data.chunk_was_revoked = 0;
4633 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4634 tp1->sent = SCTP_DATAGRAM_ACKED;
4641 biggest_tsn_newly_acked = biggest_tsn_acked = last_tsn;
4642 /* always set this up to cum-ack */
4643 asoc->this_sack_highest_gap = last_tsn;
4645 if ((num_seg > 0) || (num_nr_seg > 0)) {
4648 * CMT: SFR algo (and HTNA) - this_sack_highest_newack has
4649 * to be greater than the cumack. Also reset saw_newack to 0
4652 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4653 net->saw_newack = 0;
4654 net->this_sack_highest_newack = last_tsn;
4658 * thisSackHighestGap will increase while handling NEW
4659 * segments this_sack_highest_newack will increase while
4660 * handling NEWLY ACKED chunks. this_sack_lowest_newack is
4661 * used for CMT DAC algo. saw_newack will also change.
4663 if (sctp_handle_segments(m, &offset_seg, stcb, asoc, last_tsn, &biggest_tsn_acked,
4664 &biggest_tsn_newly_acked, &this_sack_lowest_newack,
4665 num_seg, num_nr_seg, &rto_ok)) {
4668 if (SCTP_BASE_SYSCTL(sctp_strict_sacks)) {
4670 * validate the biggest_tsn_acked in the gap acks if
4671 * strict adherence is wanted.
4673 if (SCTP_TSN_GE(biggest_tsn_acked, send_s)) {
4675 * peer is either confused or we are under
4676 * attack. We must abort.
4678 SCTP_PRINTF("Hopeless peer! biggest_tsn_acked:%x largest seq:%x\n",
4679 biggest_tsn_acked, send_s);
4684 /*******************************************/
4685 /* cancel ALL T3-send timer if accum moved */
4686 /*******************************************/
4687 if (asoc->sctp_cmt_on_off > 0) {
4688 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4689 if (net->new_pseudo_cumack)
4690 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4692 SCTP_FROM_SCTP_INDATA + SCTP_LOC_27);
4697 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4698 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4699 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_28);
4703 /********************************************/
4704 /* drop the acked chunks from the sentqueue */
4705 /********************************************/
4706 asoc->last_acked_seq = cum_ack;
4708 TAILQ_FOREACH_SAFE(tp1, &asoc->sent_queue, sctp_next, tp2) {
4709 if (SCTP_TSN_GT(tp1->rec.data.TSN_seq, cum_ack)) {
4712 if (tp1->sent != SCTP_DATAGRAM_NR_ACKED) {
4713 if (asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues > 0) {
4714 asoc->strmout[tp1->rec.data.stream_number].chunks_on_queues--;
4717 panic("No chunks on the queues for sid %u.", tp1->rec.data.stream_number);
4721 TAILQ_REMOVE(&asoc->sent_queue, tp1, sctp_next);
4722 if (tp1->pr_sctp_on) {
4723 if (asoc->pr_sctp_cnt != 0)
4724 asoc->pr_sctp_cnt--;
4726 asoc->sent_queue_cnt--;
4728 /* sa_ignore NO_NULL_CHK */
4729 sctp_free_bufspace(stcb, asoc, tp1, 1);
4730 sctp_m_freem(tp1->data);
4732 if (asoc->peer_supports_prsctp && PR_SCTP_BUF_ENABLED(tp1->flags)) {
4733 asoc->sent_queue_cnt_removeable--;
4736 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_LOGGING_ENABLE) {
4737 sctp_log_sack(asoc->last_acked_seq,
4739 tp1->rec.data.TSN_seq,
4742 SCTP_LOG_FREE_SENT);
4744 sctp_free_a_chunk(stcb, tp1, SCTP_SO_NOT_LOCKED);
4747 if (TAILQ_EMPTY(&asoc->sent_queue) && (asoc->total_flight > 0)) {
4749 panic("Warning flight size is postive and should be 0");
4751 SCTP_PRINTF("Warning flight size incorrect should be 0 is %d\n",
4752 asoc->total_flight);
4754 asoc->total_flight = 0;
4756 /* sa_ignore NO_NULL_CHK */
4757 if ((wake_him) && (stcb->sctp_socket)) {
4758 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4762 SOCKBUF_LOCK(&stcb->sctp_socket->so_snd);
4763 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4764 sctp_wakeup_log(stcb, wake_him, SCTP_WAKESND_FROM_SACK);
4766 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4767 so = SCTP_INP_SO(stcb->sctp_ep);
4768 atomic_add_int(&stcb->asoc.refcnt, 1);
4769 SCTP_TCB_UNLOCK(stcb);
4770 SCTP_SOCKET_LOCK(so, 1);
4771 SCTP_TCB_LOCK(stcb);
4772 atomic_subtract_int(&stcb->asoc.refcnt, 1);
4773 if (stcb->asoc.state & SCTP_STATE_CLOSED_SOCKET) {
4774 /* assoc was freed while we were unlocked */
4775 SCTP_SOCKET_UNLOCK(so, 1);
4779 sctp_sowwakeup_locked(stcb->sctp_ep, stcb->sctp_socket);
4780 #if defined(__APPLE__) || defined(SCTP_SO_LOCK_TESTING)
4781 SCTP_SOCKET_UNLOCK(so, 1);
4784 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_WAKE_LOGGING_ENABLE) {
4785 sctp_wakeup_log(stcb, wake_him, SCTP_NOWAKE_FROM_SACK);
4789 if (asoc->fast_retran_loss_recovery && accum_moved) {
4790 if (SCTP_TSN_GE(asoc->last_acked_seq, asoc->fast_recovery_tsn)) {
4791 /* Setup so we will exit RFC2582 fast recovery */
4792 will_exit_fast_recovery = 1;
4796 * Check for revoked fragments:
4798 * if Previous sack - Had no frags then we can't have any revoked if
4799 * Previous sack - Had frag's then - If we now have frags aka
4800 * num_seg > 0 call sctp_check_for_revoked() to tell if peer revoked
4801 * some of them. else - The peer revoked all ACKED fragments, since
4802 * we had some before and now we have NONE.
4806 sctp_check_for_revoked(stcb, asoc, cum_ack, biggest_tsn_acked);
4807 asoc->saw_sack_with_frags = 1;
4808 } else if (asoc->saw_sack_with_frags) {
4809 int cnt_revoked = 0;
4811 /* Peer revoked all dg's marked or acked */
4812 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
4813 if (tp1->sent == SCTP_DATAGRAM_ACKED) {
4814 tp1->sent = SCTP_DATAGRAM_SENT;
4815 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
4816 sctp_misc_ints(SCTP_FLIGHT_LOG_UP_REVOKE,
4817 tp1->whoTo->flight_size,
4819 (uintptr_t) tp1->whoTo,
4820 tp1->rec.data.TSN_seq);
4822 sctp_flight_size_increase(tp1);
4823 sctp_total_flight_increase(stcb, tp1);
4824 tp1->rec.data.chunk_was_revoked = 1;
4826 * To ensure that this increase in
4827 * flightsize, which is artificial, does not
4828 * throttle the sender, we also increase the
4829 * cwnd artificially.
4831 tp1->whoTo->cwnd += tp1->book_size;
4838 asoc->saw_sack_with_frags = 0;
4841 asoc->saw_sack_with_nr_frags = 1;
4843 asoc->saw_sack_with_nr_frags = 0;
4845 /* JRS - Use the congestion control given in the CC module */
4846 if (ecne_seen == 0) {
4847 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4848 if (net->net_ack2 > 0) {
4850 * Karn's rule applies to clearing error
4851 * count, this is optional.
4853 net->error_count = 0;
4854 if (!(net->dest_state & SCTP_ADDR_REACHABLE)) {
4855 /* addr came good */
4856 net->dest_state |= SCTP_ADDR_REACHABLE;
4857 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_UP, stcb,
4858 0, (void *)net, SCTP_SO_NOT_LOCKED);
4860 if (net == stcb->asoc.primary_destination) {
4861 if (stcb->asoc.alternate) {
4863 * release the alternate,
4866 sctp_free_remote_addr(stcb->asoc.alternate);
4867 stcb->asoc.alternate = NULL;
4870 if (net->dest_state & SCTP_ADDR_PF) {
4871 net->dest_state &= ~SCTP_ADDR_PF;
4872 sctp_timer_stop(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net, SCTP_FROM_SCTP_INPUT + SCTP_LOC_3);
4873 sctp_timer_start(SCTP_TIMER_TYPE_HEARTBEAT, stcb->sctp_ep, stcb, net);
4874 asoc->cc_functions.sctp_cwnd_update_exit_pf(stcb, net);
4875 /* Done with this net */
4878 /* restore any doubled timers */
4879 net->RTO = (net->lastsa >> SCTP_RTT_SHIFT) + net->lastsv;
4880 if (net->RTO < stcb->asoc.minrto) {
4881 net->RTO = stcb->asoc.minrto;
4883 if (net->RTO > stcb->asoc.maxrto) {
4884 net->RTO = stcb->asoc.maxrto;
4888 asoc->cc_functions.sctp_cwnd_update_after_sack(stcb, asoc, accum_moved, reneged_all, will_exit_fast_recovery);
4890 if (TAILQ_EMPTY(&asoc->sent_queue)) {
4891 /* nothing left in-flight */
4892 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
4893 /* stop all timers */
4894 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
4895 stcb, net, SCTP_FROM_SCTP_INDATA + SCTP_LOC_30);
4896 net->flight_size = 0;
4897 net->partial_bytes_acked = 0;
4899 asoc->total_flight = 0;
4900 asoc->total_flight_count = 0;
4902 /**********************************/
4903 /* Now what about shutdown issues */
4904 /**********************************/
4905 if (TAILQ_EMPTY(&asoc->send_queue) && TAILQ_EMPTY(&asoc->sent_queue)) {
4906 /* nothing left on sendqueue.. consider done */
4907 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
4908 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
4909 asoc->peers_rwnd, 0, 0, a_rwnd);
4911 asoc->peers_rwnd = a_rwnd;
4912 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
4913 /* SWS sender side engages */
4914 asoc->peers_rwnd = 0;
4917 if ((asoc->stream_queue_cnt == 1) &&
4918 ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) ||
4919 (asoc->state & SCTP_STATE_SHUTDOWN_RECEIVED)) &&
4920 (asoc->locked_on_sending)
4922 struct sctp_stream_queue_pending *sp;
4925 * I may be in a state where we got all across.. but
4926 * cannot write more due to a shutdown... we abort
4927 * since the user did not indicate EOR in this case.
4929 sp = TAILQ_LAST(&((asoc->locked_on_sending)->outqueue),
4931 if ((sp) && (sp->length == 0)) {
4932 asoc->locked_on_sending = NULL;
4933 if (sp->msg_is_complete) {
4934 asoc->stream_queue_cnt--;
4936 asoc->state |= SCTP_STATE_PARTIAL_MSG_LEFT;
4937 asoc->stream_queue_cnt--;
4941 if ((asoc->state & SCTP_STATE_SHUTDOWN_PENDING) &&
4942 (asoc->stream_queue_cnt == 0)) {
4943 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4944 /* Need to abort here */
4950 oper = sctp_get_mbuf_for_msg(sizeof(struct sctp_paramhdr),
4951 0, M_DONTWAIT, 1, MT_DATA);
4953 struct sctp_paramhdr *ph;
4955 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr);
4956 ph = mtod(oper, struct sctp_paramhdr *);
4957 ph->param_type = htons(SCTP_CAUSE_USER_INITIATED_ABT);
4958 ph->param_length = htons(SCTP_BUF_LEN(oper));
4960 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_31;
4961 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
4964 struct sctp_nets *netp;
4966 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
4967 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
4968 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4970 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
4971 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4972 sctp_stop_timers_for_shutdown(stcb);
4973 if (asoc->alternate) {
4974 netp = asoc->alternate;
4976 netp = asoc->primary_destination;
4978 sctp_send_shutdown(stcb, netp);
4979 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
4980 stcb->sctp_ep, stcb, netp);
4981 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
4982 stcb->sctp_ep, stcb, netp);
4985 } else if ((SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED) &&
4986 (asoc->stream_queue_cnt == 0)) {
4987 struct sctp_nets *netp;
4989 if (asoc->state & SCTP_STATE_PARTIAL_MSG_LEFT) {
4992 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
4993 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_ACK_SENT);
4994 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
4995 sctp_stop_timers_for_shutdown(stcb);
4996 if (asoc->alternate) {
4997 netp = asoc->alternate;
4999 netp = asoc->primary_destination;
5001 sctp_send_shutdown_ack(stcb, netp);
5002 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK,
5003 stcb->sctp_ep, stcb, netp);
5008 * Now here we are going to recycle net_ack for a different use...
5011 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5016 * CMT DAC algorithm: If SACK DAC flag was 0, then no extra marking
5017 * to be done. Setting this_sack_lowest_newack to the cum_ack will
5018 * automatically ensure that.
5020 if ((asoc->sctp_cmt_on_off > 0) &&
5021 SCTP_BASE_SYSCTL(sctp_cmt_use_dac) &&
5022 (cmt_dac_flag == 0)) {
5023 this_sack_lowest_newack = cum_ack;
5025 if ((num_seg > 0) || (num_nr_seg > 0)) {
5026 sctp_strike_gap_ack_chunks(stcb, asoc, biggest_tsn_acked,
5027 biggest_tsn_newly_acked, this_sack_lowest_newack, accum_moved);
5029 /* JRS - Use the congestion control given in the CC module */
5030 asoc->cc_functions.sctp_cwnd_update_after_fr(stcb, asoc);
5032 /* Now are we exiting loss recovery ? */
5033 if (will_exit_fast_recovery) {
5034 /* Ok, we must exit fast recovery */
5035 asoc->fast_retran_loss_recovery = 0;
5037 if ((asoc->sat_t3_loss_recovery) &&
5038 SCTP_TSN_GE(asoc->last_acked_seq, asoc->sat_t3_recovery_tsn)) {
5039 /* end satellite t3 loss recovery */
5040 asoc->sat_t3_loss_recovery = 0;
5045 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5046 if (net->will_exit_fast_recovery) {
5047 /* Ok, we must exit fast recovery */
5048 net->fast_retran_loss_recovery = 0;
5052 /* Adjust and set the new rwnd value */
5053 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_RWND_ENABLE) {
5054 sctp_log_rwnd_set(SCTP_SET_PEER_RWND_VIA_SACK,
5055 asoc->peers_rwnd, asoc->total_flight, (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh)), a_rwnd);
5057 asoc->peers_rwnd = sctp_sbspace_sub(a_rwnd,
5058 (uint32_t) (asoc->total_flight + (asoc->total_flight_count * SCTP_BASE_SYSCTL(sctp_peer_chunk_oh))));
5059 if (asoc->peers_rwnd < stcb->sctp_ep->sctp_ep.sctp_sws_sender) {
5060 /* SWS sender side engages */
5061 asoc->peers_rwnd = 0;
5063 if (asoc->peers_rwnd > old_rwnd) {
5064 win_probe_recovery = 1;
5067 * Now we must setup so we have a timer up for anyone with
5073 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5074 if (win_probe_recovery && (net->window_probe)) {
5075 win_probe_recovered = 1;
5077 * Find first chunk that was used with
5078 * window probe and clear the event. Put
5079 * it back into the send queue as if has
5082 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5083 if (tp1->window_probe) {
5084 sctp_window_probe_recovery(stcb, asoc, tp1);
5089 if (net->flight_size) {
5091 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5092 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5093 stcb->sctp_ep, stcb, net);
5095 if (net->window_probe) {
5096 net->window_probe = 0;
5099 if (net->window_probe) {
5101 * In window probes we must assure a timer
5102 * is still running there
5104 if (!SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5105 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5106 stcb->sctp_ep, stcb, net);
5109 } else if (SCTP_OS_TIMER_PENDING(&net->rxt_timer.timer)) {
5110 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
5112 SCTP_FROM_SCTP_INDATA + SCTP_LOC_22);
5117 (!TAILQ_EMPTY(&asoc->sent_queue)) &&
5118 (asoc->sent_queue_retran_cnt == 0) &&
5119 (win_probe_recovered == 0) &&
5122 * huh, this should not happen unless all packets are
5123 * PR-SCTP and marked to skip of course.
5125 if (sctp_fs_audit(asoc)) {
5126 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
5127 net->flight_size = 0;
5129 asoc->total_flight = 0;
5130 asoc->total_flight_count = 0;
5131 asoc->sent_queue_retran_cnt = 0;
5132 TAILQ_FOREACH(tp1, &asoc->sent_queue, sctp_next) {
5133 if (tp1->sent < SCTP_DATAGRAM_RESEND) {
5134 sctp_flight_size_increase(tp1);
5135 sctp_total_flight_increase(stcb, tp1);
5136 } else if (tp1->sent == SCTP_DATAGRAM_RESEND) {
5137 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
5144 /*********************************************/
5145 /* Here we perform PR-SCTP procedures */
5147 /*********************************************/
5148 /* C1. update advancedPeerAckPoint */
5149 if (SCTP_TSN_GT(cum_ack, asoc->advanced_peer_ack_point)) {
5150 asoc->advanced_peer_ack_point = cum_ack;
5152 /* C2. try to further move advancedPeerAckPoint ahead */
5153 if ((asoc->peer_supports_prsctp) && (asoc->pr_sctp_cnt > 0)) {
5154 struct sctp_tmit_chunk *lchk;
5155 uint32_t old_adv_peer_ack_point;
5157 old_adv_peer_ack_point = asoc->advanced_peer_ack_point;
5158 lchk = sctp_try_advance_peer_ack_point(stcb, asoc);
5159 /* C3. See if we need to send a Fwd-TSN */
5160 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, cum_ack)) {
5162 * ISSUE with ECN, see FWD-TSN processing.
5164 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_LOG_TRY_ADVANCE) {
5165 sctp_misc_ints(SCTP_FWD_TSN_CHECK,
5166 0xee, cum_ack, asoc->advanced_peer_ack_point,
5167 old_adv_peer_ack_point);
5169 if (SCTP_TSN_GT(asoc->advanced_peer_ack_point, old_adv_peer_ack_point)) {
5170 send_forward_tsn(stcb, asoc);
5172 /* try to FR fwd-tsn's that get lost too */
5173 if (lchk->rec.data.fwd_tsn_cnt >= 3) {
5174 send_forward_tsn(stcb, asoc);
5179 /* Assure a timer is up */
5180 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
5181 stcb->sctp_ep, stcb, lchk->whoTo);
5184 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_SACK_RWND_LOGGING_ENABLE) {
5185 sctp_misc_ints(SCTP_SACK_RWND_UPDATE,
5187 stcb->asoc.peers_rwnd,
5188 stcb->asoc.total_flight,
5189 stcb->asoc.total_output_queue_size);
5194 sctp_update_acked(struct sctp_tcb *stcb, struct sctp_shutdown_chunk *cp, int *abort_flag)
5197 uint32_t cum_ack, a_rwnd;
5199 cum_ack = ntohl(cp->cumulative_tsn_ack);
5200 /* Arrange so a_rwnd does NOT change */
5201 a_rwnd = stcb->asoc.peers_rwnd + stcb->asoc.total_flight;
5203 /* Now call the express sack handling */
5204 sctp_express_handle_sack(stcb, cum_ack, a_rwnd, abort_flag, 0);
5208 sctp_kick_prsctp_reorder_queue(struct sctp_tcb *stcb,
5209 struct sctp_stream_in *strmin)
5211 struct sctp_queued_to_read *ctl, *nctl;
5212 struct sctp_association *asoc;
5216 tt = strmin->last_sequence_delivered;
5218 * First deliver anything prior to and including the stream no that
5221 TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next, nctl) {
5222 if (SCTP_SSN_GE(tt, ctl->sinfo_ssn)) {
5223 /* this is deliverable now */
5224 TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5225 /* subtract pending on streams */
5226 asoc->size_on_all_streams -= ctl->length;
5227 sctp_ucount_decr(asoc->cnt_on_all_streams);
5228 /* deliver it to at least the delivery-q */
5229 if (stcb->sctp_socket) {
5230 sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5231 sctp_add_to_readq(stcb->sctp_ep, stcb,
5233 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5236 /* no more delivery now. */
5241 * now we must deliver things in queue the normal way if any are
5244 tt = strmin->last_sequence_delivered + 1;
5245 TAILQ_FOREACH_SAFE(ctl, &strmin->inqueue, next, nctl) {
5246 if (tt == ctl->sinfo_ssn) {
5247 /* this is deliverable now */
5248 TAILQ_REMOVE(&strmin->inqueue, ctl, next);
5249 /* subtract pending on streams */
5250 asoc->size_on_all_streams -= ctl->length;
5251 sctp_ucount_decr(asoc->cnt_on_all_streams);
5252 /* deliver it to at least the delivery-q */
5253 strmin->last_sequence_delivered = ctl->sinfo_ssn;
5254 if (stcb->sctp_socket) {
5255 sctp_mark_non_revokable(asoc, ctl->sinfo_tsn);
5256 sctp_add_to_readq(stcb->sctp_ep, stcb,
5258 &stcb->sctp_socket->so_rcv, 1, SCTP_READ_LOCK_HELD, SCTP_SO_NOT_LOCKED);
5261 tt = strmin->last_sequence_delivered + 1;
5269 sctp_flush_reassm_for_str_seq(struct sctp_tcb *stcb,
5270 struct sctp_association *asoc,
5271 uint16_t stream, uint16_t seq)
5273 struct sctp_tmit_chunk *chk, *nchk;
5275 /* For each one on here see if we need to toss it */
5277 * For now large messages held on the reasmqueue that are complete
5278 * will be tossed too. We could in theory do more work to spin
5279 * through and stop after dumping one msg aka seeing the start of a
5280 * new msg at the head, and call the delivery function... to see if
5281 * it can be delivered... But for now we just dump everything on the
5284 TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
5286 * Do not toss it if on a different stream or marked for
5287 * unordered delivery in which case the stream sequence
5288 * number has no meaning.
5290 if ((chk->rec.data.stream_number != stream) ||
5291 ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) == SCTP_DATA_UNORDERED)) {
5294 if (chk->rec.data.stream_seq == seq) {
5295 /* It needs to be tossed */
5296 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5297 if (SCTP_TSN_GT(chk->rec.data.TSN_seq, asoc->tsn_last_delivered)) {
5298 asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
5299 asoc->str_of_pdapi = chk->rec.data.stream_number;
5300 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
5301 asoc->fragment_flags = chk->rec.data.rcv_flags;
5303 asoc->size_on_reasm_queue -= chk->send_size;
5304 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5306 /* Clear up any stream problem */
5307 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != SCTP_DATA_UNORDERED &&
5308 SCTP_SSN_GT(chk->rec.data.stream_seq, asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered)) {
5310 * We must dump forward this streams
5311 * sequence number if the chunk is not
5312 * unordered that is being skipped. There is
5313 * a chance that if the peer does not
5314 * include the last fragment in its FWD-TSN
5315 * we WILL have a problem here since you
5316 * would have a partial chunk in queue that
5317 * may not be deliverable. Also if a Partial
5318 * delivery API as started the user may get
5319 * a partial chunk. The next read returning
5320 * a new chunk... really ugly but I see no
5321 * way around it! Maybe a notify??
5323 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = chk->rec.data.stream_seq;
5326 sctp_m_freem(chk->data);
5329 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5330 } else if (SCTP_SSN_GT(chk->rec.data.stream_seq, seq)) {
5332 * If the stream_seq is > than the purging one, we
5342 sctp_handle_forward_tsn(struct sctp_tcb *stcb,
5343 struct sctp_forward_tsn_chunk *fwd,
5344 int *abort_flag, struct mbuf *m, int offset)
5346 /* The pr-sctp fwd tsn */
5348 * here we will perform all the data receiver side steps for
5349 * processing FwdTSN, as required in by pr-sctp draft:
5351 * Assume we get FwdTSN(x):
5353 * 1) update local cumTSN to x 2) try to further advance cumTSN to x +
5354 * others we have 3) examine and update re-ordering queue on
5355 * pr-in-streams 4) clean up re-assembly queue 5) Send a sack to
5356 * report where we are.
5358 struct sctp_association *asoc;
5359 uint32_t new_cum_tsn, gap;
5360 unsigned int i, fwd_sz, m_size;
5362 struct sctp_stream_in *strm;
5363 struct sctp_tmit_chunk *chk, *nchk;
5364 struct sctp_queued_to_read *ctl, *sv;
5367 if ((fwd_sz = ntohs(fwd->ch.chunk_length)) < sizeof(struct sctp_forward_tsn_chunk)) {
5368 SCTPDBG(SCTP_DEBUG_INDATA1,
5369 "Bad size too small/big fwd-tsn\n");
5372 m_size = (stcb->asoc.mapping_array_size << 3);
5373 /*************************************************************/
5374 /* 1. Here we update local cumTSN and shift the bitmap array */
5375 /*************************************************************/
5376 new_cum_tsn = ntohl(fwd->new_cumulative_tsn);
5378 if (SCTP_TSN_GE(asoc->cumulative_tsn, new_cum_tsn)) {
5379 /* Already got there ... */
5383 * now we know the new TSN is more advanced, let's find the actual
5386 SCTP_CALC_TSN_TO_GAP(gap, new_cum_tsn, asoc->mapping_array_base_tsn);
5387 asoc->cumulative_tsn = new_cum_tsn;
5388 if (gap >= m_size) {
5389 if ((long)gap > sctp_sbspace(&stcb->asoc, &stcb->sctp_socket->so_rcv)) {
5393 * out of range (of single byte chunks in the rwnd I
5394 * give out). This must be an attacker.
5397 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + 3 * sizeof(uint32_t)),
5398 0, M_DONTWAIT, 1, MT_DATA);
5400 struct sctp_paramhdr *ph;
5403 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
5404 (sizeof(uint32_t) * 3);
5405 ph = mtod(oper, struct sctp_paramhdr *);
5406 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
5407 ph->param_length = htons(SCTP_BUF_LEN(oper));
5408 ippp = (uint32_t *) (ph + 1);
5409 *ippp = htonl(SCTP_FROM_SCTP_INDATA + SCTP_LOC_33);
5411 *ippp = asoc->highest_tsn_inside_map;
5413 *ippp = new_cum_tsn;
5415 stcb->sctp_ep->last_abort_code = SCTP_FROM_SCTP_INDATA + SCTP_LOC_33;
5416 sctp_abort_an_association(stcb->sctp_ep, stcb, oper, SCTP_SO_NOT_LOCKED);
5419 SCTP_STAT_INCR(sctps_fwdtsn_map_over);
5421 memset(stcb->asoc.mapping_array, 0, stcb->asoc.mapping_array_size);
5422 asoc->mapping_array_base_tsn = new_cum_tsn + 1;
5423 asoc->highest_tsn_inside_map = new_cum_tsn;
5425 memset(stcb->asoc.nr_mapping_array, 0, stcb->asoc.mapping_array_size);
5426 asoc->highest_tsn_inside_nr_map = new_cum_tsn;
5428 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_MAP_LOGGING_ENABLE) {
5429 sctp_log_map(0, 3, asoc->highest_tsn_inside_map, SCTP_MAP_SLIDE_RESULT);
5432 SCTP_TCB_LOCK_ASSERT(stcb);
5433 for (i = 0; i <= gap; i++) {
5434 if (!SCTP_IS_TSN_PRESENT(asoc->mapping_array, i) &&
5435 !SCTP_IS_TSN_PRESENT(asoc->nr_mapping_array, i)) {
5436 SCTP_SET_TSN_PRESENT(asoc->nr_mapping_array, i);
5437 if (SCTP_TSN_GT(asoc->mapping_array_base_tsn + i, asoc->highest_tsn_inside_nr_map)) {
5438 asoc->highest_tsn_inside_nr_map = asoc->mapping_array_base_tsn + i;
5443 /*************************************************************/
5444 /* 2. Clear up re-assembly queue */
5445 /*************************************************************/
5447 * First service it if pd-api is up, just in case we can progress it
5450 if (asoc->fragmented_delivery_inprogress) {
5451 sctp_service_reassembly(stcb, asoc);
5453 /* For each one on here see if we need to toss it */
5455 * For now large messages held on the reasmqueue that are complete
5456 * will be tossed too. We could in theory do more work to spin
5457 * through and stop after dumping one msg aka seeing the start of a
5458 * new msg at the head, and call the delivery function... to see if
5459 * it can be delivered... But for now we just dump everything on the
5462 TAILQ_FOREACH_SAFE(chk, &asoc->reasmqueue, sctp_next, nchk) {
5463 if (SCTP_TSN_GE(new_cum_tsn, chk->rec.data.TSN_seq)) {
5464 /* It needs to be tossed */
5465 TAILQ_REMOVE(&asoc->reasmqueue, chk, sctp_next);
5466 if (SCTP_TSN_GT(chk->rec.data.TSN_seq, asoc->tsn_last_delivered)) {
5467 asoc->tsn_last_delivered = chk->rec.data.TSN_seq;
5468 asoc->str_of_pdapi = chk->rec.data.stream_number;
5469 asoc->ssn_of_pdapi = chk->rec.data.stream_seq;
5470 asoc->fragment_flags = chk->rec.data.rcv_flags;
5472 asoc->size_on_reasm_queue -= chk->send_size;
5473 sctp_ucount_decr(asoc->cnt_on_reasm_queue);
5475 /* Clear up any stream problem */
5476 if ((chk->rec.data.rcv_flags & SCTP_DATA_UNORDERED) != SCTP_DATA_UNORDERED &&
5477 SCTP_SSN_GT(chk->rec.data.stream_seq, asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered)) {
5479 * We must dump forward this streams
5480 * sequence number if the chunk is not
5481 * unordered that is being skipped. There is
5482 * a chance that if the peer does not
5483 * include the last fragment in its FWD-TSN
5484 * we WILL have a problem here since you
5485 * would have a partial chunk in queue that
5486 * may not be deliverable. Also if a Partial
5487 * delivery API as started the user may get
5488 * a partial chunk. The next read returning
5489 * a new chunk... really ugly but I see no
5490 * way around it! Maybe a notify??
5492 asoc->strmin[chk->rec.data.stream_number].last_sequence_delivered = chk->rec.data.stream_seq;
5495 sctp_m_freem(chk->data);
5498 sctp_free_a_chunk(stcb, chk, SCTP_SO_NOT_LOCKED);
5501 * Ok we have gone beyond the end of the fwd-tsn's
5507 /*******************************************************/
5508 /* 3. Update the PR-stream re-ordering queues and fix */
5509 /* delivery issues as needed. */
5510 /*******************************************************/
5511 fwd_sz -= sizeof(*fwd);
5514 unsigned int num_str;
5515 struct sctp_strseq *stseq, strseqbuf;
5517 offset += sizeof(*fwd);
5519 SCTP_INP_READ_LOCK(stcb->sctp_ep);
5520 num_str = fwd_sz / sizeof(struct sctp_strseq);
5521 for (i = 0; i < num_str; i++) {
5524 stseq = (struct sctp_strseq *)sctp_m_getptr(m, offset,
5525 sizeof(struct sctp_strseq),
5526 (uint8_t *) & strseqbuf);
5527 offset += sizeof(struct sctp_strseq);
5528 if (stseq == NULL) {
5532 st = ntohs(stseq->stream);
5534 st = ntohs(stseq->sequence);
5535 stseq->sequence = st;
5540 * Ok we now look for the stream/seq on the read
5541 * queue where its not all delivered. If we find it
5542 * we transmute the read entry into a PDI_ABORTED.
5544 if (stseq->stream >= asoc->streamincnt) {
5545 /* screwed up streams, stop! */
5548 if ((asoc->str_of_pdapi == stseq->stream) &&
5549 (asoc->ssn_of_pdapi == stseq->sequence)) {
5551 * If this is the one we were partially
5552 * delivering now then we no longer are.
5553 * Note this will change with the reassembly
5556 asoc->fragmented_delivery_inprogress = 0;
5558 sctp_flush_reassm_for_str_seq(stcb, asoc, stseq->stream, stseq->sequence);
5559 TAILQ_FOREACH(ctl, &stcb->sctp_ep->read_queue, next) {
5560 if ((ctl->sinfo_stream == stseq->stream) &&
5561 (ctl->sinfo_ssn == stseq->sequence)) {
5562 str_seq = (stseq->stream << 16) | stseq->sequence;
5564 ctl->pdapi_aborted = 1;
5565 sv = stcb->asoc.control_pdapi;
5566 stcb->asoc.control_pdapi = ctl;
5567 sctp_ulp_notify(SCTP_NOTIFY_PARTIAL_DELVIERY_INDICATION,
5569 SCTP_PARTIAL_DELIVERY_ABORTED,
5571 SCTP_SO_NOT_LOCKED);
5572 stcb->asoc.control_pdapi = sv;
5574 } else if ((ctl->sinfo_stream == stseq->stream) &&
5575 SCTP_SSN_GT(ctl->sinfo_ssn, stseq->sequence)) {
5576 /* We are past our victim SSN */
5580 strm = &asoc->strmin[stseq->stream];
5581 if (SCTP_SSN_GT(stseq->sequence, strm->last_sequence_delivered)) {
5582 /* Update the sequence number */
5583 strm->last_sequence_delivered = stseq->sequence;
5585 /* now kick the stream the new way */
5586 /* sa_ignore NO_NULL_CHK */
5587 sctp_kick_prsctp_reorder_queue(stcb, strm);
5589 SCTP_INP_READ_UNLOCK(stcb->sctp_ep);
5592 * Now slide thing forward.
5594 sctp_slide_mapping_arrays(stcb);
5596 if (!TAILQ_EMPTY(&asoc->reasmqueue)) {
5597 /* now lets kick out and check for more fragmented delivery */
5598 /* sa_ignore NO_NULL_CHK */
5599 sctp_deliver_reasm_check(stcb, &stcb->asoc);