2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
7 * a) Redistributions of source code must retain the above copyright notice,
8 * this list of conditions and the following disclaimer.
10 * b) Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the distribution.
14 * c) Neither the name of Cisco Systems, Inc. nor the names of its
15 * contributors may be used to endorse or promote products derived
16 * from this software without specific prior written permission.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGE.
31 /* $KAME: sctp_timer.c,v 1.29 2005/03/06 16:04:18 itojun Exp $ */
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
37 #include <netinet/sctp_os.h>
38 #include <netinet/sctp_pcb.h>
41 #include <netinet/sctp_var.h>
42 #include <netinet/sctp_sysctl.h>
43 #include <netinet/sctp_timer.h>
44 #include <netinet/sctputil.h>
45 #include <netinet/sctp_output.h>
46 #include <netinet/sctp_header.h>
47 #include <netinet/sctp_indata.h>
48 #include <netinet/sctp_asconf.h>
49 #include <netinet/sctp_input.h>
50 #include <netinet/sctp.h>
51 #include <netinet/sctp_uio.h>
52 #include <netinet/udp.h>
56 sctp_early_fr_timer(struct sctp_inpcb *inp,
57 struct sctp_tcb *stcb,
58 struct sctp_nets *net)
60 struct sctp_tmit_chunk *chk, *tp2;
61 struct timeval now, min_wait, tv;
62 unsigned int cur_rtt, cnt = 0, cnt_resend = 0;
64 /* an early FR is occuring. */
65 (void)SCTP_GETTIME_TIMEVAL(&now);
66 /* get cur rto in micro-seconds */
67 if (net->lastsa == 0) {
68 /* Hmm no rtt estimate yet? */
69 cur_rtt = stcb->asoc.initial_rto >> 2;
72 cur_rtt = ((net->lastsa >> 2) + net->lastsv) >> 1;
74 if (cur_rtt < SCTP_BASE_SYSCTL(sctp_early_fr_msec)) {
75 cur_rtt = SCTP_BASE_SYSCTL(sctp_early_fr_msec);
78 tv.tv_sec = cur_rtt / 1000000;
79 tv.tv_usec = cur_rtt % 1000000;
81 timevalsub(&min_wait, &tv);
82 if (min_wait.tv_sec < 0 || min_wait.tv_usec < 0) {
84 * if we hit here, we don't have enough seconds on the clock
85 * to account for the RTO. We just let the lower seconds be
86 * the bounds and don't worry about it. This may mean we
87 * will mark a lot more than we should.
89 min_wait.tv_sec = min_wait.tv_usec = 0;
91 chk = TAILQ_LAST(&stcb->asoc.sent_queue, sctpchunk_listhead);
92 for (; chk != NULL; chk = tp2) {
93 tp2 = TAILQ_PREV(chk, sctpchunk_listhead, sctp_next);
94 if (chk->whoTo != net) {
97 if (chk->sent == SCTP_DATAGRAM_RESEND)
99 else if ((chk->sent > SCTP_DATAGRAM_UNSENT) &&
100 (chk->sent < SCTP_DATAGRAM_RESEND)) {
101 /* pending, may need retran */
102 if (chk->sent_rcv_time.tv_sec > min_wait.tv_sec) {
104 * we have reached a chunk that was sent
105 * some seconds past our min.. forget it we
106 * will find no more to send.
109 } else if (chk->sent_rcv_time.tv_sec == min_wait.tv_sec) {
111 * we must look at the micro seconds to
114 if (chk->sent_rcv_time.tv_usec >= min_wait.tv_usec) {
116 * ok it was sent after our boundary
122 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_EARLYFR_LOGGING_ENABLE) {
123 sctp_log_fr(chk->rec.data.TSN_seq, chk->snd_count,
124 4, SCTP_FR_MARKED_EARLY);
126 SCTP_STAT_INCR(sctps_earlyfrmrkretrans);
127 chk->sent = SCTP_DATAGRAM_RESEND;
128 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
129 /* double book size since we are doing an early FR */
130 chk->book_size_scale++;
131 cnt += chk->send_size;
132 if ((cnt + net->flight_size) > net->cwnd) {
133 /* Mark all we could possibly resend */
140 * JRS - Use the congestion control given in the congestion
143 stcb->asoc.cc_functions.sctp_cwnd_update_after_fr_timer(inp, stcb, net);
144 } else if (cnt_resend) {
145 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_EARLY_FR_TMR, SCTP_SO_NOT_LOCKED);
148 if (net->flight_size < net->cwnd) {
149 SCTP_STAT_INCR(sctps_earlyfrstrtmr);
150 sctp_timer_start(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net);
155 sctp_audit_retranmission_queue(struct sctp_association *asoc)
157 struct sctp_tmit_chunk *chk;
159 SCTPDBG(SCTP_DEBUG_TIMER4, "Audit invoked on send queue cnt:%d onqueue:%d\n",
160 asoc->sent_queue_retran_cnt,
161 asoc->sent_queue_cnt);
162 asoc->sent_queue_retran_cnt = 0;
163 asoc->sent_queue_cnt = 0;
164 TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
165 if (chk->sent == SCTP_DATAGRAM_RESEND) {
166 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
168 asoc->sent_queue_cnt++;
170 TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
171 if (chk->sent == SCTP_DATAGRAM_RESEND) {
172 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
175 TAILQ_FOREACH(chk, &asoc->asconf_send_queue, sctp_next) {
176 if (chk->sent == SCTP_DATAGRAM_RESEND) {
177 sctp_ucount_incr(asoc->sent_queue_retran_cnt);
180 SCTPDBG(SCTP_DEBUG_TIMER4, "Audit completes retran:%d onqueue:%d\n",
181 asoc->sent_queue_retran_cnt,
182 asoc->sent_queue_cnt);
186 sctp_threshold_management(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
187 struct sctp_nets *net, uint16_t threshold)
191 SCTPDBG(SCTP_DEBUG_TIMER4, "Error count for %p now %d thresh:%d\n",
192 net, net->error_count,
193 net->failure_threshold);
194 if (net->error_count > net->failure_threshold) {
195 /* We had a threshold failure */
196 if (net->dest_state & SCTP_ADDR_REACHABLE) {
197 net->dest_state &= ~SCTP_ADDR_REACHABLE;
198 net->dest_state |= SCTP_ADDR_NOT_REACHABLE;
199 net->dest_state &= ~SCTP_ADDR_REQ_PRIMARY;
200 if (net == stcb->asoc.primary_destination) {
201 net->dest_state |= SCTP_ADDR_WAS_PRIMARY;
204 * JRS 5/14/07 - If a destination is
205 * unreachable, the PF bit is turned off.
206 * This allows an unambiguous use of the PF
207 * bit for destinations that are reachable
208 * but potentially failed. If the
209 * destination is set to the unreachable
210 * state, also set the destination to the PF
214 * Add debug message here if destination is
217 /* Stop any running T3 timers here? */
218 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_pf)) {
219 net->dest_state &= ~SCTP_ADDR_PF;
220 SCTPDBG(SCTP_DEBUG_TIMER4, "Destination %p moved from PF to unreachable.\n",
223 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN,
225 SCTP_FAILED_THRESHOLD,
226 (void *)net, SCTP_SO_NOT_LOCKED);
229 /*********HOLD THIS COMMENT FOR PATCH OF ALTERNATE
230 *********ROUTING CODE
232 /*********HOLD THIS COMMENT FOR END OF PATCH OF ALTERNATE
233 *********ROUTING CODE
240 if ((net->dest_state & SCTP_ADDR_UNCONFIRMED) == 0) {
241 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
242 sctp_misc_ints(SCTP_THRESHOLD_INCR,
243 stcb->asoc.overall_error_count,
244 (stcb->asoc.overall_error_count + 1),
245 SCTP_FROM_SCTP_TIMER,
248 stcb->asoc.overall_error_count++;
251 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_THRESHOLD_LOGGING) {
252 sctp_misc_ints(SCTP_THRESHOLD_INCR,
253 stcb->asoc.overall_error_count,
254 (stcb->asoc.overall_error_count + 1),
255 SCTP_FROM_SCTP_TIMER,
258 stcb->asoc.overall_error_count++;
260 SCTPDBG(SCTP_DEBUG_TIMER4, "Overall error count for %p now %d thresh:%u state:%x\n",
261 &stcb->asoc, stcb->asoc.overall_error_count,
262 (uint32_t) threshold,
263 ((net == NULL) ? (uint32_t) 0 : (uint32_t) net->dest_state));
265 * We specifically do not do >= to give the assoc one more change
268 if (stcb->asoc.overall_error_count > threshold) {
269 /* Abort notification sends a ULP notify */
272 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
273 0, M_DONTWAIT, 1, MT_DATA);
275 struct sctp_paramhdr *ph;
278 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
280 ph = mtod(oper, struct sctp_paramhdr *);
281 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
282 ph->param_length = htons(SCTP_BUF_LEN(oper));
283 ippp = (uint32_t *) (ph + 1);
284 *ippp = htonl(SCTP_FROM_SCTP_TIMER + SCTP_LOC_1);
286 inp->last_abort_code = SCTP_FROM_SCTP_TIMER + SCTP_LOC_1;
287 sctp_abort_an_association(inp, stcb, SCTP_FAILED_THRESHOLD, oper, SCTP_SO_NOT_LOCKED);
294 sctp_find_alternate_net(struct sctp_tcb *stcb,
295 struct sctp_nets *net,
298 /* Find and return an alternate network if possible */
299 struct sctp_nets *alt, *mnet, *min_errors_net = NULL, *max_cwnd_net = NULL;
302 /* JRS 5/14/07 - Initialize min_errors to an impossible value. */
304 uint32_t max_cwnd = 0;
306 if (stcb->asoc.numnets == 1) {
307 /* No others but net */
308 return (TAILQ_FIRST(&stcb->asoc.nets));
311 * JRS 5/14/07 - If mode is set to 2, use the CMT PF find alternate
312 * net algorithm. This algorithm chooses the active destination (not
313 * in PF state) with the largest cwnd value. If all destinations are
314 * in PF state, unreachable, or unconfirmed, choose the desination
315 * that is in PF state with the lowest error count. In case of a
316 * tie, choose the destination that was most recently active.
319 TAILQ_FOREACH(mnet, &stcb->asoc.nets, sctp_next) {
321 * JRS 5/14/07 - If the destination is unreachable
322 * or unconfirmed, skip it.
324 if (((mnet->dest_state & SCTP_ADDR_REACHABLE) != SCTP_ADDR_REACHABLE) ||
325 (mnet->dest_state & SCTP_ADDR_UNCONFIRMED)) {
329 * JRS 5/14/07 - If the destination is reachable
330 * but in PF state, compare the error count of the
331 * destination to the minimum error count seen thus
332 * far. Store the destination with the lower error
333 * count. If the error counts are equal, store the
334 * destination that was most recently active.
336 if (mnet->dest_state & SCTP_ADDR_PF) {
338 * JRS 5/14/07 - If the destination under
339 * consideration is the current destination,
340 * work as if the error count is one higher.
341 * The actual error count will not be
342 * incremented until later in the t3
346 if (min_errors == -1) {
347 min_errors = mnet->error_count + 1;
348 min_errors_net = mnet;
349 } else if (mnet->error_count + 1 < min_errors) {
350 min_errors = mnet->error_count + 1;
351 min_errors_net = mnet;
352 } else if (mnet->error_count + 1 == min_errors
353 && mnet->last_active > min_errors_net->last_active) {
354 min_errors_net = mnet;
355 min_errors = mnet->error_count + 1;
359 if (min_errors == -1) {
360 min_errors = mnet->error_count;
361 min_errors_net = mnet;
362 } else if (mnet->error_count < min_errors) {
363 min_errors = mnet->error_count;
364 min_errors_net = mnet;
365 } else if (mnet->error_count == min_errors
366 && mnet->last_active > min_errors_net->last_active) {
367 min_errors_net = mnet;
368 min_errors = mnet->error_count;
374 * JRS 5/14/07 - If the destination is reachable and
375 * not in PF state, compare the cwnd of the
376 * destination to the highest cwnd seen thus far.
377 * Store the destination with the higher cwnd value.
378 * If the cwnd values are equal, randomly choose one
379 * of the two destinations.
381 if (max_cwnd < mnet->cwnd) {
383 max_cwnd = mnet->cwnd;
384 } else if (max_cwnd == mnet->cwnd) {
388 if (stcb->asoc.hb_random_idx > 3) {
389 rndval = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
390 memcpy(stcb->asoc.hb_random_values, &rndval, sizeof(stcb->asoc.hb_random_values));
391 this_random = stcb->asoc.hb_random_values[0];
392 stcb->asoc.hb_random_idx++;
393 stcb->asoc.hb_ect_randombit = 0;
395 this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx];
396 stcb->asoc.hb_random_idx++;
397 stcb->asoc.hb_ect_randombit = 0;
399 if (this_random % 2 == 1) {
401 max_cwnd = mnet->cwnd; /* Useless? */
406 * JRS 5/14/07 - After all destination have been considered
407 * as alternates, check to see if there was some active
408 * destination (not in PF state). If not, check to see if
409 * there was some PF destination with the minimum number of
410 * errors. If not, return the original destination. If
411 * there is a min_errors_net, remove the PF flag from that
412 * destination, set the cwnd to one or two MTUs, and return
413 * the destination as an alt. If there was some active
414 * destination with a highest cwnd, return the destination
417 if (max_cwnd_net == NULL) {
418 if (min_errors_net == NULL) {
421 min_errors_net->dest_state &= ~SCTP_ADDR_PF;
422 min_errors_net->cwnd = min_errors_net->mtu * SCTP_BASE_SYSCTL(sctp_cmt_pf);
423 if (SCTP_OS_TIMER_PENDING(&min_errors_net->rxt_timer.timer)) {
424 sctp_timer_stop(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep,
425 stcb, min_errors_net,
426 SCTP_FROM_SCTP_TIMER + SCTP_LOC_2);
428 SCTPDBG(SCTP_DEBUG_TIMER4, "Destination %p moved from PF to active with %d errors.\n",
429 min_errors_net, min_errors_net->error_count);
430 return (min_errors_net);
432 return (max_cwnd_net);
436 * JRS 5/14/07 - If mode is set to 1, use the CMT policy for
437 * choosing an alternate net.
439 else if (mode == 1) {
440 TAILQ_FOREACH(mnet, &stcb->asoc.nets, sctp_next) {
441 if (((mnet->dest_state & SCTP_ADDR_REACHABLE) != SCTP_ADDR_REACHABLE) ||
442 (mnet->dest_state & SCTP_ADDR_UNCONFIRMED)
445 * will skip ones that are not-reachable or
450 if (max_cwnd < mnet->cwnd) {
452 max_cwnd = mnet->cwnd;
453 } else if (max_cwnd == mnet->cwnd) {
457 if (stcb->asoc.hb_random_idx > 3) {
458 rndval = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
459 memcpy(stcb->asoc.hb_random_values, &rndval,
460 sizeof(stcb->asoc.hb_random_values));
461 this_random = stcb->asoc.hb_random_values[0];
462 stcb->asoc.hb_random_idx = 0;
463 stcb->asoc.hb_ect_randombit = 0;
465 this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx];
466 stcb->asoc.hb_random_idx++;
467 stcb->asoc.hb_ect_randombit = 0;
469 if (this_random % 2) {
471 max_cwnd = mnet->cwnd;
476 return (max_cwnd_net);
483 mnet = TAILQ_FIRST(&stcb->asoc.nets);
486 alt = TAILQ_NEXT(mnet, sctp_next);
492 alt = TAILQ_FIRST(&stcb->asoc.nets);
494 if (alt->ro.ro_rt == NULL) {
495 if (alt->ro._s_addr) {
496 sctp_free_ifa(alt->ro._s_addr);
497 alt->ro._s_addr = NULL;
499 alt->src_addr_selected = 0;
502 ((alt->dest_state & SCTP_ADDR_REACHABLE) == SCTP_ADDR_REACHABLE) &&
503 (alt->ro.ro_rt != NULL) &&
504 /* sa_ignore NO_NULL_CHK */
505 (!(alt->dest_state & SCTP_ADDR_UNCONFIRMED))
507 /* Found a reachable address */
511 } while (alt != NULL);
514 /* Case where NO insv network exists (dormant state) */
515 /* we rotate destinations */
519 alt = TAILQ_NEXT(mnet, sctp_next);
525 alt = TAILQ_FIRST(&stcb->asoc.nets);
527 /* sa_ignore NO_NULL_CHK */
528 if ((!(alt->dest_state & SCTP_ADDR_UNCONFIRMED)) &&
530 /* Found an alternate address */
534 } while (alt != NULL);
545 sctp_backoff_on_timeout(struct sctp_tcb *stcb,
546 struct sctp_nets *net,
551 net->RTO = stcb->asoc.minrto;
554 if (net->RTO > stcb->asoc.maxrto) {
555 net->RTO = stcb->asoc.maxrto;
557 if ((win_probe == 0) && num_marked) {
558 /* We don't apply penalty to window probe scenarios */
559 /* JRS - Use the congestion control given in the CC module */
560 stcb->asoc.cc_functions.sctp_cwnd_update_after_timeout(stcb, net);
566 sctp_recover_sent_list(struct sctp_tcb *stcb)
568 struct sctp_tmit_chunk *chk, *tp2;
569 struct sctp_association *asoc;
572 chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
573 for (; chk != NULL; chk = tp2) {
574 tp2 = TAILQ_NEXT(chk, sctp_next);
575 if ((compare_with_wrap(stcb->asoc.last_acked_seq,
576 chk->rec.data.TSN_seq,
578 (stcb->asoc.last_acked_seq == chk->rec.data.TSN_seq)) {
580 SCTP_PRINTF("Found chk:%p tsn:%x <= last_acked_seq:%x\n",
581 chk, chk->rec.data.TSN_seq, stcb->asoc.last_acked_seq);
582 TAILQ_REMOVE(&asoc->sent_queue, chk, sctp_next);
583 if (chk->pr_sctp_on) {
584 if (asoc->pr_sctp_cnt != 0)
588 /* sa_ignore NO_NULL_CHK */
589 sctp_free_bufspace(stcb, asoc, chk, 1);
590 sctp_m_freem(chk->data);
591 if (asoc->peer_supports_prsctp && PR_SCTP_BUF_ENABLED(chk->flags)) {
592 asoc->sent_queue_cnt_removeable--;
596 asoc->sent_queue_cnt--;
597 sctp_free_a_chunk(stcb, chk);
600 SCTP_PRINTF("after recover order is as follows\n");
601 chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
602 for (; chk != NULL; chk = tp2) {
603 tp2 = TAILQ_NEXT(chk, sctp_next);
604 SCTP_PRINTF("chk:%p TSN:%x\n", chk, chk->rec.data.TSN_seq);
611 sctp_mark_all_for_resend(struct sctp_tcb *stcb,
612 struct sctp_nets *net,
613 struct sctp_nets *alt,
619 * Mark all chunks (well not all) that were sent to *net for
620 * retransmission. Move them to alt for there destination as well...
621 * We only mark chunks that have been outstanding long enough to
622 * have received feed-back.
624 struct sctp_tmit_chunk *chk, *tp2, *could_be_sent = NULL;
625 struct sctp_nets *lnets;
626 struct timeval now, min_wait, tv;
628 int audit_tf, num_mk, fir;
630 uint32_t orig_flight, orig_tf;
631 uint32_t tsnlast, tsnfirst;
632 int recovery_cnt = 0;
635 /* none in flight now */
639 * figure out how long a data chunk must be pending before we can
642 (void)SCTP_GETTIME_TIMEVAL(&now);
643 /* get cur rto in micro-seconds */
644 cur_rtt = (((net->lastsa >> 2) + net->lastsv) >> 1);
646 if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) {
648 stcb->asoc.peers_rwnd,
650 SCTP_FR_T3_MARK_TIME);
651 sctp_log_fr(net->flight_size,
652 SCTP_OS_TIMER_PENDING(&net->fr_timer.timer),
653 SCTP_OS_TIMER_ACTIVE(&net->fr_timer.timer),
654 SCTP_FR_CWND_REPORT);
655 sctp_log_fr(net->flight_size, net->cwnd, stcb->asoc.total_flight, SCTP_FR_CWND_REPORT);
657 tv.tv_sec = cur_rtt / 1000000;
658 tv.tv_usec = cur_rtt % 1000000;
660 timevalsub(&min_wait, &tv);
661 if (min_wait.tv_sec < 0 || min_wait.tv_usec < 0) {
663 * if we hit here, we don't have enough seconds on the clock
664 * to account for the RTO. We just let the lower seconds be
665 * the bounds and don't worry about it. This may mean we
666 * will mark a lot more than we should.
668 min_wait.tv_sec = min_wait.tv_usec = 0;
670 if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) {
671 sctp_log_fr(cur_rtt, now.tv_sec, now.tv_usec, SCTP_FR_T3_MARK_TIME);
672 sctp_log_fr(0, min_wait.tv_sec, min_wait.tv_usec, SCTP_FR_T3_MARK_TIME);
675 * Our rwnd will be incorrect here since we are not adding back the
676 * cnt * mbuf but we will fix that down below.
678 orig_flight = net->flight_size;
679 orig_tf = stcb->asoc.total_flight;
681 net->fast_retran_ip = 0;
682 /* Now on to each chunk */
684 tsnfirst = tsnlast = 0;
688 chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
689 for (; chk != NULL; chk = tp2) {
690 tp2 = TAILQ_NEXT(chk, sctp_next);
691 if ((compare_with_wrap(stcb->asoc.last_acked_seq,
692 chk->rec.data.TSN_seq,
694 (stcb->asoc.last_acked_seq == chk->rec.data.TSN_seq)) {
695 /* Strange case our list got out of order? */
696 SCTP_PRINTF("Our list is out of order? last_acked:%x chk:%x",
697 (unsigned int)stcb->asoc.last_acked_seq, (unsigned int)chk->rec.data.TSN_seq);
700 panic("last acked >= chk on sent-Q");
702 SCTP_PRINTF("Recover attempts a restart cnt:%d\n", recovery_cnt);
703 sctp_recover_sent_list(stcb);
704 if (recovery_cnt < 10) {
707 SCTP_PRINTF("Recovery fails %d times??\n", recovery_cnt);
711 if ((chk->whoTo == net) && (chk->sent < SCTP_DATAGRAM_ACKED)) {
713 * found one to mark: If it is less than
714 * DATAGRAM_ACKED it MUST not be a skipped or marked
715 * TSN but instead one that is either already set
716 * for retransmission OR one that needs
720 /* validate its been outstanding long enough */
721 if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) {
722 sctp_log_fr(chk->rec.data.TSN_seq,
723 chk->sent_rcv_time.tv_sec,
724 chk->sent_rcv_time.tv_usec,
725 SCTP_FR_T3_MARK_TIME);
727 if ((chk->sent_rcv_time.tv_sec > min_wait.tv_sec) && (window_probe == 0)) {
729 * we have reached a chunk that was sent
730 * some seconds past our min.. forget it we
731 * will find no more to send.
733 if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) {
735 chk->sent_rcv_time.tv_sec,
736 chk->sent_rcv_time.tv_usec,
740 } else if ((chk->sent_rcv_time.tv_sec == min_wait.tv_sec) &&
741 (window_probe == 0)) {
743 * we must look at the micro seconds to
746 if (chk->sent_rcv_time.tv_usec >= min_wait.tv_usec) {
748 * ok it was sent after our boundary
751 if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) {
753 chk->sent_rcv_time.tv_sec,
754 chk->sent_rcv_time.tv_usec,
760 if (stcb->asoc.peer_supports_prsctp && PR_SCTP_TTL_ENABLED(chk->flags)) {
762 if ((now.tv_sec > chk->rec.data.timetodrop.tv_sec) ||
763 ((chk->rec.data.timetodrop.tv_sec == now.tv_sec) &&
764 (now.tv_usec > chk->rec.data.timetodrop.tv_usec))) {
767 (void)sctp_release_pr_sctp_chunk(stcb,
769 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
775 if (stcb->asoc.peer_supports_prsctp && PR_SCTP_RTX_ENABLED(chk->flags)) {
776 /* Has it been retransmitted tv_sec times? */
777 if (chk->snd_count > chk->rec.data.timetodrop.tv_sec) {
779 (void)sctp_release_pr_sctp_chunk(stcb,
781 (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
787 if (chk->sent < SCTP_DATAGRAM_RESEND) {
788 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
792 tsnfirst = chk->rec.data.TSN_seq;
794 tsnlast = chk->rec.data.TSN_seq;
795 if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) {
796 sctp_log_fr(chk->rec.data.TSN_seq, chk->snd_count,
797 0, SCTP_FR_T3_MARKED);
799 if (chk->rec.data.chunk_was_revoked) {
800 /* deflate the cwnd */
801 chk->whoTo->cwnd -= chk->book_size;
802 chk->rec.data.chunk_was_revoked = 0;
804 net->marked_retrans++;
805 stcb->asoc.marked_retrans++;
806 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
807 sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND_TO,
808 chk->whoTo->flight_size,
810 (uintptr_t) chk->whoTo,
811 chk->rec.data.TSN_seq);
813 sctp_flight_size_decrease(chk);
814 sctp_total_flight_decrease(stcb, chk);
815 stcb->asoc.peers_rwnd += chk->send_size;
816 stcb->asoc.peers_rwnd += SCTP_BASE_SYSCTL(sctp_peer_chunk_oh);
818 chk->sent = SCTP_DATAGRAM_RESEND;
819 SCTP_STAT_INCR(sctps_markedretrans);
821 /* reset the TSN for striking and other FR stuff */
822 chk->rec.data.doing_fast_retransmit = 0;
823 /* Clear any time so NO RTT is being done */
826 sctp_free_remote_addr(chk->whoTo);
827 chk->no_fr_allowed = 1;
829 atomic_add_int(&alt->ref_count, 1);
831 chk->no_fr_allowed = 0;
832 if (TAILQ_EMPTY(&stcb->asoc.send_queue)) {
833 chk->rec.data.fast_retran_tsn = stcb->asoc.sending_seq;
835 chk->rec.data.fast_retran_tsn = (TAILQ_FIRST(&stcb->asoc.send_queue))->rec.data.TSN_seq;
839 * CMT: Do not allow FRs on retransmitted TSNs.
841 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) == 1) {
842 chk->no_fr_allowed = 1;
844 } else if (chk->sent == SCTP_DATAGRAM_ACKED) {
845 /* remember highest acked one */
848 if (chk->sent == SCTP_DATAGRAM_RESEND) {
852 if ((orig_flight - net->flight_size) != (orig_tf - stcb->asoc.total_flight)) {
853 /* we did not subtract the same things? */
856 if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) {
857 sctp_log_fr(tsnfirst, tsnlast, num_mk, SCTP_FR_T3_TIMEOUT);
861 SCTPDBG(SCTP_DEBUG_TIMER1, "LAST TSN marked was %x\n",
863 SCTPDBG(SCTP_DEBUG_TIMER1, "Num marked for retransmission was %d peer-rwd:%ld\n",
864 num_mk, (u_long)stcb->asoc.peers_rwnd);
865 SCTPDBG(SCTP_DEBUG_TIMER1, "LAST TSN marked was %x\n",
867 SCTPDBG(SCTP_DEBUG_TIMER1, "Num marked for retransmission was %d peer-rwd:%d\n",
869 (int)stcb->asoc.peers_rwnd);
872 *num_marked = num_mk;
873 if ((stcb->asoc.sent_queue_retran_cnt == 0) && (could_be_sent)) {
874 /* fix it so we retransmit the highest acked anyway */
875 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
877 could_be_sent->sent = SCTP_DATAGRAM_RESEND;
879 if (stcb->asoc.sent_queue_retran_cnt != cnt_mk) {
881 SCTP_PRINTF("Local Audit says there are %d for retran asoc cnt:%d we marked:%d this time\n",
882 cnt_mk, stcb->asoc.sent_queue_retran_cnt, num_mk);
884 #ifndef SCTP_AUDITING_ENABLED
885 stcb->asoc.sent_queue_retran_cnt = cnt_mk;
888 /* Now check for a ECN Echo that may be stranded */
889 TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
890 if ((chk->whoTo == net) &&
891 (chk->rec.chunk_id.id == SCTP_ECN_ECHO)) {
892 sctp_free_remote_addr(chk->whoTo);
894 if (chk->sent != SCTP_DATAGRAM_RESEND) {
895 chk->sent = SCTP_DATAGRAM_RESEND;
896 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
898 atomic_add_int(&alt->ref_count, 1);
902 SCTPDBG(SCTP_DEBUG_TIMER4,
903 "Audit total flight due to negative value net:%p\n",
905 stcb->asoc.total_flight = 0;
906 stcb->asoc.total_flight_count = 0;
907 /* Clear all networks flight size */
908 TAILQ_FOREACH(lnets, &stcb->asoc.nets, sctp_next) {
909 lnets->flight_size = 0;
910 SCTPDBG(SCTP_DEBUG_TIMER4,
911 "Net:%p c-f cwnd:%d ssthresh:%d\n",
912 lnets, lnets->cwnd, lnets->ssthresh);
914 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
915 if (chk->sent < SCTP_DATAGRAM_RESEND) {
916 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FLIGHT_LOGGING_ENABLE) {
917 sctp_misc_ints(SCTP_FLIGHT_LOG_UP,
918 chk->whoTo->flight_size,
920 (uintptr_t) chk->whoTo,
921 chk->rec.data.TSN_seq);
923 sctp_flight_size_increase(chk);
924 sctp_total_flight_increase(stcb, chk);
929 * Setup the ecn nonce re-sync point. We do this since
930 * retranmissions are NOT setup for ECN. This means that do to
931 * Karn's rule, we don't know the total of the peers ecn bits.
933 chk = TAILQ_FIRST(&stcb->asoc.send_queue);
935 stcb->asoc.nonce_resync_tsn = stcb->asoc.sending_seq;
937 stcb->asoc.nonce_resync_tsn = chk->rec.data.TSN_seq;
939 stcb->asoc.nonce_wait_for_ecne = 0;
940 stcb->asoc.nonce_sum_check = 0;
941 /* We return 1 if we only have a window probe outstanding */
946 sctp_move_all_chunks_to_alt(struct sctp_tcb *stcb,
947 struct sctp_nets *net,
948 struct sctp_nets *alt)
950 struct sctp_association *asoc;
951 struct sctp_stream_out *outs;
952 struct sctp_tmit_chunk *chk;
953 struct sctp_stream_queue_pending *sp;
962 * now through all the streams checking for chunks sent to our bad
965 TAILQ_FOREACH(outs, &asoc->out_wheel, next_spoke) {
966 /* now clean up any chunks here */
967 TAILQ_FOREACH(sp, &outs->outqueue, next) {
968 if (sp->net == net) {
969 sctp_free_remote_addr(sp->net);
971 atomic_add_int(&alt->ref_count, 1);
975 /* Now check the pending queue */
976 TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
977 if (chk->whoTo == net) {
978 sctp_free_remote_addr(chk->whoTo);
980 atomic_add_int(&alt->ref_count, 1);
987 sctp_t3rxt_timer(struct sctp_inpcb *inp,
988 struct sctp_tcb *stcb,
989 struct sctp_nets *net)
991 struct sctp_nets *alt;
992 int win_probe, num_mk;
994 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_FR_LOGGING_ENABLE) {
995 sctp_log_fr(0, 0, 0, SCTP_FR_T3_TIMEOUT);
997 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
998 struct sctp_nets *lnet;
1000 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
1002 sctp_log_cwnd(stcb, lnet, 1, SCTP_CWND_LOG_FROM_T3);
1004 sctp_log_cwnd(stcb, lnet, 0, SCTP_CWND_LOG_FROM_T3);
1008 /* Find an alternate and mark those for retransmission */
1009 if ((stcb->asoc.peers_rwnd == 0) &&
1010 (stcb->asoc.total_flight < net->mtu)) {
1011 SCTP_STAT_INCR(sctps_timowindowprobe);
1018 * JRS 5/14/07 - If CMT PF is on and the destination if not already
1019 * in PF state, set the destination to PF state and store the
1020 * current time as the time that the destination was last active. In
1021 * addition, find an alternate destination with PF-based
1024 if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_pf)) {
1025 if ((net->dest_state & SCTP_ADDR_PF) != SCTP_ADDR_PF) {
1026 net->dest_state |= SCTP_ADDR_PF;
1027 net->last_active = sctp_get_tick_count();
1028 SCTPDBG(SCTP_DEBUG_TIMER4, "Destination %p moved from active to PF.\n",
1031 alt = sctp_find_alternate_net(stcb, net, 2);
1032 } else if (SCTP_BASE_SYSCTL(sctp_cmt_on_off)) {
1034 * CMT: Using RTX_SSTHRESH policy for CMT. If CMT is being
1035 * used, then pick dest with largest ssthresh for any
1039 alt = sctp_find_alternate_net(stcb, alt, 1);
1041 * CUCv2: If a different dest is picked for the
1042 * retransmission, then new (rtx-)pseudo_cumack needs to be
1043 * tracked for orig dest. Let CUCv2 track new (rtx-)
1044 * pseudo-cumack always.
1046 net->find_pseudo_cumack = 1;
1047 net->find_rtx_pseudo_cumack = 1;
1048 } else { /* CMT is OFF */
1049 alt = sctp_find_alternate_net(stcb, net, 0);
1052 (void)sctp_mark_all_for_resend(stcb, net, alt, win_probe, &num_mk);
1053 /* FR Loss recovery just ended with the T3. */
1054 stcb->asoc.fast_retran_loss_recovery = 0;
1056 /* CMT FR loss recovery ended with the T3 */
1057 net->fast_retran_loss_recovery = 0;
1060 * setup the sat loss recovery that prevents satellite cwnd advance.
1062 stcb->asoc.sat_t3_loss_recovery = 1;
1063 stcb->asoc.sat_t3_recovery_tsn = stcb->asoc.sending_seq;
1065 /* Backoff the timer and cwnd */
1066 sctp_backoff_on_timeout(stcb, net, win_probe, num_mk);
1067 if (win_probe == 0) {
1068 /* We don't do normal threshold management on window probes */
1069 if (sctp_threshold_management(inp, stcb, net,
1070 stcb->asoc.max_send_times)) {
1071 /* Association was destroyed */
1074 if (net != stcb->asoc.primary_destination) {
1075 /* send a immediate HB if our RTO is stale */
1077 unsigned int ms_goneby;
1079 (void)SCTP_GETTIME_TIMEVAL(&now);
1080 if (net->last_sent_time.tv_sec) {
1081 ms_goneby = (now.tv_sec - net->last_sent_time.tv_sec) * 1000;
1085 if ((ms_goneby > net->RTO) || (net->RTO == 0)) {
1087 * no recent feed back in an RTO or
1088 * more, request a RTT update
1090 if (sctp_send_hb(stcb, 1, net) < 0)
1092 * Less than 0 means we lost
1101 * For a window probe we don't penalize the net's but only
1102 * the association. This may fail it if SACKs are not coming
1103 * back. If sack's are coming with rwnd locked at 0, we will
1104 * continue to hold things waiting for rwnd to raise
1106 if (sctp_threshold_management(inp, stcb, NULL,
1107 stcb->asoc.max_send_times)) {
1108 /* Association was destroyed */
1112 if (net->dest_state & SCTP_ADDR_NOT_REACHABLE) {
1113 /* Move all pending over too */
1114 sctp_move_all_chunks_to_alt(stcb, net, alt);
1117 * Get the address that failed, to force a new src address
1118 * selecton and a route allocation.
1120 if (net->ro._s_addr) {
1121 sctp_free_ifa(net->ro._s_addr);
1122 net->ro._s_addr = NULL;
1124 net->src_addr_selected = 0;
1126 /* Force a route allocation too */
1127 if (net->ro.ro_rt) {
1128 RTFREE(net->ro.ro_rt);
1129 net->ro.ro_rt = NULL;
1131 /* Was it our primary? */
1132 if ((stcb->asoc.primary_destination == net) && (alt != net)) {
1134 * Yes, note it as such and find an alternate note:
1135 * this means HB code must use this to resent the
1136 * primary if it goes active AND if someone does a
1137 * change-primary then this flag must be cleared
1138 * from any net structures.
1140 if (sctp_set_primary_addr(stcb,
1141 (struct sockaddr *)NULL,
1143 net->dest_state |= SCTP_ADDR_WAS_PRIMARY;
1146 } else if (SCTP_BASE_SYSCTL(sctp_cmt_on_off) && SCTP_BASE_SYSCTL(sctp_cmt_pf) && (net->dest_state & SCTP_ADDR_PF) == SCTP_ADDR_PF) {
1148 * JRS 5/14/07 - If the destination hasn't failed completely
1149 * but is in PF state, a PF-heartbeat needs to be sent
1152 if (sctp_send_hb(stcb, 1, net) < 0)
1153 /* Return less than 0 means we lost the association */
1157 * Special case for cookie-echo'ed case, we don't do output but must
1158 * await the COOKIE-ACK before retransmission
1160 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED) {
1162 * Here we just reset the timer and start again since we
1163 * have not established the asoc
1165 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
1168 if (stcb->asoc.peer_supports_prsctp) {
1169 struct sctp_tmit_chunk *lchk;
1171 lchk = sctp_try_advance_peer_ack_point(stcb, &stcb->asoc);
1172 /* C3. See if we need to send a Fwd-TSN */
1173 if (compare_with_wrap(stcb->asoc.advanced_peer_ack_point,
1174 stcb->asoc.last_acked_seq, MAX_TSN)) {
1176 * ISSUE with ECN, see FWD-TSN processing for notes
1177 * on issues that will occur when the ECN NONCE
1178 * stuff is put into SCTP for cross checking.
1180 send_forward_tsn(stcb, &stcb->asoc);
1182 /* Assure a timer is up */
1183 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, lchk->whoTo);
1187 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
1188 sctp_log_cwnd(stcb, net, net->cwnd, SCTP_CWND_LOG_FROM_RTX);
1194 sctp_t1init_timer(struct sctp_inpcb *inp,
1195 struct sctp_tcb *stcb,
1196 struct sctp_nets *net)
1198 /* bump the thresholds */
1199 if (stcb->asoc.delayed_connection) {
1201 * special hook for delayed connection. The library did NOT
1202 * complete the rest of its sends.
1204 stcb->asoc.delayed_connection = 0;
1205 sctp_send_initiate(inp, stcb, SCTP_SO_NOT_LOCKED);
1208 if (SCTP_GET_STATE((&stcb->asoc)) != SCTP_STATE_COOKIE_WAIT) {
1211 if (sctp_threshold_management(inp, stcb, net,
1212 stcb->asoc.max_init_times)) {
1213 /* Association was destroyed */
1216 stcb->asoc.dropped_special_cnt = 0;
1217 sctp_backoff_on_timeout(stcb, stcb->asoc.primary_destination, 1, 0);
1218 if (stcb->asoc.initial_init_rto_max < net->RTO) {
1219 net->RTO = stcb->asoc.initial_init_rto_max;
1221 if (stcb->asoc.numnets > 1) {
1222 /* If we have more than one addr use it */
1223 struct sctp_nets *alt;
1225 alt = sctp_find_alternate_net(stcb, stcb->asoc.primary_destination, 0);
1226 if ((alt != NULL) && (alt != stcb->asoc.primary_destination)) {
1227 sctp_move_all_chunks_to_alt(stcb, stcb->asoc.primary_destination, alt);
1228 stcb->asoc.primary_destination = alt;
1231 /* Send out a new init */
1232 sctp_send_initiate(inp, stcb, SCTP_SO_NOT_LOCKED);
1237 * For cookie and asconf we actually need to find and mark for resend, then
1238 * increment the resend counter (after all the threshold management stuff of
1242 sctp_cookie_timer(struct sctp_inpcb *inp,
1243 struct sctp_tcb *stcb,
1244 struct sctp_nets *net)
1246 struct sctp_nets *alt;
1247 struct sctp_tmit_chunk *cookie;
1249 /* first before all else we must find the cookie */
1250 TAILQ_FOREACH(cookie, &stcb->asoc.control_send_queue, sctp_next) {
1251 if (cookie->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
1255 if (cookie == NULL) {
1256 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED) {
1260 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
1261 0, M_DONTWAIT, 1, MT_DATA);
1263 struct sctp_paramhdr *ph;
1266 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
1268 ph = mtod(oper, struct sctp_paramhdr *);
1269 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1270 ph->param_length = htons(SCTP_BUF_LEN(oper));
1271 ippp = (uint32_t *) (ph + 1);
1272 *ippp = htonl(SCTP_FROM_SCTP_TIMER + SCTP_LOC_3);
1274 inp->last_abort_code = SCTP_FROM_SCTP_TIMER + SCTP_LOC_4;
1275 sctp_abort_an_association(inp, stcb, SCTP_INTERNAL_ERROR,
1276 oper, SCTP_SO_NOT_LOCKED);
1279 panic("Cookie timer expires in wrong state?");
1281 SCTP_PRINTF("Strange in state %d not cookie-echoed yet c-e timer expires?\n", SCTP_GET_STATE(&stcb->asoc));
1287 /* Ok we found the cookie, threshold management next */
1288 if (sctp_threshold_management(inp, stcb, cookie->whoTo,
1289 stcb->asoc.max_init_times)) {
1294 * cleared theshold management now lets backoff the address & select
1297 stcb->asoc.dropped_special_cnt = 0;
1298 sctp_backoff_on_timeout(stcb, cookie->whoTo, 1, 0);
1299 alt = sctp_find_alternate_net(stcb, cookie->whoTo, 0);
1300 if (alt != cookie->whoTo) {
1301 sctp_free_remote_addr(cookie->whoTo);
1302 cookie->whoTo = alt;
1303 atomic_add_int(&alt->ref_count, 1);
1305 /* Now mark the retran info */
1306 if (cookie->sent != SCTP_DATAGRAM_RESEND) {
1307 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
1309 cookie->sent = SCTP_DATAGRAM_RESEND;
1311 * Now call the output routine to kick out the cookie again, Note we
1312 * don't mark any chunks for retran so that FR will need to kick in
1313 * to move these (or a send timer).
1319 sctp_strreset_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1320 struct sctp_nets *net)
1322 struct sctp_nets *alt;
1323 struct sctp_tmit_chunk *strrst = NULL, *chk = NULL;
1325 if (stcb->asoc.stream_reset_outstanding == 0) {
1328 /* find the existing STRRESET, we use the seq number we sent out on */
1329 (void)sctp_find_stream_reset(stcb, stcb->asoc.str_reset_seq_out, &strrst);
1330 if (strrst == NULL) {
1333 /* do threshold management */
1334 if (sctp_threshold_management(inp, stcb, strrst->whoTo,
1335 stcb->asoc.max_send_times)) {
1340 * cleared theshold management now lets backoff the address & select
1343 sctp_backoff_on_timeout(stcb, strrst->whoTo, 1, 0);
1344 alt = sctp_find_alternate_net(stcb, strrst->whoTo, 0);
1345 sctp_free_remote_addr(strrst->whoTo);
1346 strrst->whoTo = alt;
1347 atomic_add_int(&alt->ref_count, 1);
1349 /* See if a ECN Echo is also stranded */
1350 TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
1351 if ((chk->whoTo == net) &&
1352 (chk->rec.chunk_id.id == SCTP_ECN_ECHO)) {
1353 sctp_free_remote_addr(chk->whoTo);
1354 if (chk->sent != SCTP_DATAGRAM_RESEND) {
1355 chk->sent = SCTP_DATAGRAM_RESEND;
1356 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
1359 atomic_add_int(&alt->ref_count, 1);
1362 if (net->dest_state & SCTP_ADDR_NOT_REACHABLE) {
1364 * If the address went un-reachable, we need to move to
1365 * alternates for ALL chk's in queue
1367 sctp_move_all_chunks_to_alt(stcb, net, alt);
1369 /* mark the retran info */
1370 if (strrst->sent != SCTP_DATAGRAM_RESEND)
1371 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
1372 strrst->sent = SCTP_DATAGRAM_RESEND;
1374 /* restart the timer */
1375 sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, inp, stcb, strrst->whoTo);
1380 sctp_asconf_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1381 struct sctp_nets *net)
1383 struct sctp_nets *alt;
1384 struct sctp_tmit_chunk *asconf, *chk, *nchk;
1386 /* is this a first send, or a retransmission? */
1387 if (TAILQ_EMPTY(&stcb->asoc.asconf_send_queue)) {
1388 /* compose a new ASCONF chunk and send it */
1389 sctp_send_asconf(stcb, net, SCTP_ADDR_NOT_LOCKED);
1392 * Retransmission of the existing ASCONF is needed
1395 /* find the existing ASCONF */
1396 asconf = TAILQ_FIRST(&stcb->asoc.asconf_send_queue);
1397 if (asconf == NULL) {
1400 /* do threshold management */
1401 if (sctp_threshold_management(inp, stcb, asconf->whoTo,
1402 stcb->asoc.max_send_times)) {
1406 if (asconf->snd_count > stcb->asoc.max_send_times) {
1408 * Something is rotten: our peer is not responding
1409 * to ASCONFs but apparently is to other chunks.
1410 * i.e. it is not properly handling the chunk type
1411 * upper bits. Mark this peer as ASCONF incapable
1414 SCTPDBG(SCTP_DEBUG_TIMER1, "asconf_timer: Peer has not responded to our repeated ASCONFs\n");
1415 sctp_asconf_cleanup(stcb, net);
1419 * cleared threshold management, so now backoff the net and
1420 * select an alternate
1422 sctp_backoff_on_timeout(stcb, asconf->whoTo, 1, 0);
1423 alt = sctp_find_alternate_net(stcb, asconf->whoTo, 0);
1424 if (asconf->whoTo != alt) {
1425 sctp_free_remote_addr(asconf->whoTo);
1426 asconf->whoTo = alt;
1427 atomic_add_int(&alt->ref_count, 1);
1429 /* See if an ECN Echo is also stranded */
1430 TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
1431 if ((chk->whoTo == net) &&
1432 (chk->rec.chunk_id.id == SCTP_ECN_ECHO)) {
1433 sctp_free_remote_addr(chk->whoTo);
1435 if (chk->sent != SCTP_DATAGRAM_RESEND) {
1436 chk->sent = SCTP_DATAGRAM_RESEND;
1437 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
1439 atomic_add_int(&alt->ref_count, 1);
1442 for (chk = asconf; chk; chk = nchk) {
1443 nchk = TAILQ_NEXT(chk, sctp_next);
1444 if (chk->whoTo != alt) {
1445 sctp_free_remote_addr(chk->whoTo);
1447 atomic_add_int(&alt->ref_count, 1);
1449 if (asconf->sent != SCTP_DATAGRAM_RESEND && chk->sent != SCTP_DATAGRAM_UNSENT)
1450 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
1451 chk->sent = SCTP_DATAGRAM_RESEND;
1453 if (net->dest_state & SCTP_ADDR_NOT_REACHABLE) {
1455 * If the address went un-reachable, we need to move
1456 * to the alternate for ALL chunks in queue
1458 sctp_move_all_chunks_to_alt(stcb, net, alt);
1461 /* mark the retran info */
1462 if (asconf->sent != SCTP_DATAGRAM_RESEND)
1463 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
1464 asconf->sent = SCTP_DATAGRAM_RESEND;
1466 /* send another ASCONF if any and we can do */
1467 sctp_send_asconf(stcb, alt, SCTP_ADDR_NOT_LOCKED);
1472 /* Mobility adaptation */
1474 sctp_delete_prim_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1475 struct sctp_nets *net)
1477 if (stcb->asoc.deleted_primary == NULL) {
1478 SCTPDBG(SCTP_DEBUG_ASCONF1, "delete_prim_timer: deleted_primary is not stored...\n");
1479 sctp_mobility_feature_off(inp, SCTP_MOBILITY_PRIM_DELETED);
1482 SCTPDBG(SCTP_DEBUG_ASCONF1, "delete_prim_timer: finished to keep deleted primary ");
1483 SCTPDBG_ADDR(SCTP_DEBUG_ASCONF1, &stcb->asoc.deleted_primary->ro._l_addr.sa);
1484 sctp_free_remote_addr(stcb->asoc.deleted_primary);
1485 stcb->asoc.deleted_primary = NULL;
1486 sctp_mobility_feature_off(inp, SCTP_MOBILITY_PRIM_DELETED);
1491 * For the shutdown and shutdown-ack, we do not keep one around on the
1492 * control queue. This means we must generate a new one and call the general
1493 * chunk output routine, AFTER having done threshold management.
1496 sctp_shutdown_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1497 struct sctp_nets *net)
1499 struct sctp_nets *alt;
1501 /* first threshold managment */
1502 if (sctp_threshold_management(inp, stcb, net, stcb->asoc.max_send_times)) {
1506 /* second select an alternative */
1507 alt = sctp_find_alternate_net(stcb, net, 0);
1509 /* third generate a shutdown into the queue for out net */
1511 sctp_send_shutdown(stcb, alt);
1514 * if alt is NULL, there is no dest to send to??
1518 /* fourth restart timer */
1519 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, inp, stcb, alt);
1524 sctp_shutdownack_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1525 struct sctp_nets *net)
1527 struct sctp_nets *alt;
1529 /* first threshold managment */
1530 if (sctp_threshold_management(inp, stcb, net, stcb->asoc.max_send_times)) {
1534 /* second select an alternative */
1535 alt = sctp_find_alternate_net(stcb, net, 0);
1537 /* third generate a shutdown into the queue for out net */
1538 sctp_send_shutdown_ack(stcb, alt);
1540 /* fourth restart timer */
1541 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, inp, stcb, alt);
1546 sctp_audit_stream_queues_for_size(struct sctp_inpcb *inp,
1547 struct sctp_tcb *stcb)
1549 struct sctp_stream_out *outs;
1550 struct sctp_stream_queue_pending *sp;
1551 unsigned int chks_in_queue = 0;
1552 int being_filled = 0;
1555 * This function is ONLY called when the send/sent queues are empty.
1557 if ((stcb == NULL) || (inp == NULL))
1560 if (stcb->asoc.sent_queue_retran_cnt) {
1561 SCTP_PRINTF("Hmm, sent_queue_retran_cnt is non-zero %d\n",
1562 stcb->asoc.sent_queue_retran_cnt);
1563 stcb->asoc.sent_queue_retran_cnt = 0;
1565 SCTP_TCB_SEND_LOCK(stcb);
1566 if (TAILQ_EMPTY(&stcb->asoc.out_wheel)) {
1569 /* Check to see if a spoke fell off the wheel */
1570 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
1571 if (!TAILQ_EMPTY(&stcb->asoc.strmout[i].outqueue)) {
1572 sctp_insert_on_wheel(stcb, &stcb->asoc, &stcb->asoc.strmout[i], 1);
1577 /* yep, we lost a spoke or two */
1578 SCTP_PRINTF("Found an additional %d streams NOT on outwheel, corrected\n", cnt);
1580 /* no spokes lost, */
1581 stcb->asoc.total_output_queue_size = 0;
1583 SCTP_TCB_SEND_UNLOCK(stcb);
1586 SCTP_TCB_SEND_UNLOCK(stcb);
1587 /* Check to see if some data queued, if so report it */
1588 TAILQ_FOREACH(outs, &stcb->asoc.out_wheel, next_spoke) {
1589 if (!TAILQ_EMPTY(&outs->outqueue)) {
1590 TAILQ_FOREACH(sp, &outs->outqueue, next) {
1591 if (sp->msg_is_complete)
1597 if (chks_in_queue != stcb->asoc.stream_queue_cnt) {
1598 SCTP_PRINTF("Hmm, stream queue cnt at %d I counted %d in stream out wheel\n",
1599 stcb->asoc.stream_queue_cnt, chks_in_queue);
1601 if (chks_in_queue) {
1602 /* call the output queue function */
1603 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1604 if ((TAILQ_EMPTY(&stcb->asoc.send_queue)) &&
1605 (TAILQ_EMPTY(&stcb->asoc.sent_queue))) {
1607 * Probably should go in and make it go back through
1608 * and add fragments allowed
1610 if (being_filled == 0) {
1611 SCTP_PRINTF("Still nothing moved %d chunks are stuck\n",
1616 SCTP_PRINTF("Found no chunks on any queue tot:%lu\n",
1617 (u_long)stcb->asoc.total_output_queue_size);
1618 stcb->asoc.total_output_queue_size = 0;
1623 sctp_heartbeat_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1624 struct sctp_nets *net, int cnt_of_unconf)
1629 if (net->hb_responded == 0) {
1630 if (net->ro._s_addr) {
1632 * Invalidate the src address if we did not
1633 * get a response last time.
1635 sctp_free_ifa(net->ro._s_addr);
1636 net->ro._s_addr = NULL;
1637 net->src_addr_selected = 0;
1639 sctp_backoff_on_timeout(stcb, net, 1, 0);
1641 /* Zero PBA, if it needs it */
1642 if (net->partial_bytes_acked) {
1643 net->partial_bytes_acked = 0;
1646 if ((stcb->asoc.total_output_queue_size > 0) &&
1647 (TAILQ_EMPTY(&stcb->asoc.send_queue)) &&
1648 (TAILQ_EMPTY(&stcb->asoc.sent_queue))) {
1649 sctp_audit_stream_queues_for_size(inp, stcb);
1651 /* Send a new HB, this will do threshold managment, pick a new dest */
1652 if (cnt_of_unconf == 0) {
1653 if (sctp_send_hb(stcb, 0, NULL) < 0) {
1658 * this will send out extra hb's up to maxburst if there are
1659 * any unconfirmed addresses.
1661 uint32_t cnt_sent = 0;
1663 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
1664 if ((net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1665 (net->dest_state & SCTP_ADDR_REACHABLE)) {
1667 if (net->hb_responded == 0) {
1668 /* Did we respond last time? */
1669 if (net->ro._s_addr) {
1670 sctp_free_ifa(net->ro._s_addr);
1671 net->ro._s_addr = NULL;
1672 net->src_addr_selected = 0;
1675 ret = sctp_send_hb(stcb, 1, net);
1678 else if (ret == 0) {
1681 if (cnt_sent >= SCTP_BASE_SYSCTL(sctp_hb_maxburst))
1690 sctp_is_hb_timer_running(struct sctp_tcb *stcb)
1692 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.hb_timer.timer)) {
1702 sctp_is_sack_timer_running(struct sctp_tcb *stcb)
1704 if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
1713 #define SCTP_NUMBER_OF_MTU_SIZES 18
1714 static uint32_t mtu_sizes[] = {
1737 sctp_getnext_mtu(struct sctp_inpcb *inp, uint32_t cur_mtu)
1739 /* select another MTU that is just bigger than this one */
1742 for (i = 0; i < SCTP_NUMBER_OF_MTU_SIZES; i++) {
1743 if (cur_mtu < mtu_sizes[i]) {
1744 /* no max_mtu is bigger than this one */
1745 return (mtu_sizes[i]);
1748 /* here return the highest allowable */
1754 sctp_pathmtu_timer(struct sctp_inpcb *inp,
1755 struct sctp_tcb *stcb,
1756 struct sctp_nets *net)
1758 uint32_t next_mtu, mtu;
1760 next_mtu = sctp_getnext_mtu(inp, net->mtu);
1762 if ((next_mtu > net->mtu) && (net->port == 0)) {
1763 if ((net->src_addr_selected == 0) ||
1764 (net->ro._s_addr == NULL) ||
1765 (net->ro._s_addr->localifa_flags & SCTP_BEING_DELETED)) {
1766 if ((net->ro._s_addr != NULL) && (net->ro._s_addr->localifa_flags & SCTP_BEING_DELETED)) {
1767 sctp_free_ifa(net->ro._s_addr);
1768 net->ro._s_addr = NULL;
1769 net->src_addr_selected = 0;
1770 } else if (net->ro._s_addr == NULL) {
1771 #if defined(INET6) && defined(SCTP_EMBEDDED_V6_SCOPE)
1772 if (net->ro._l_addr.sa.sa_family == AF_INET6) {
1773 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
1775 /* KAME hack: embed scopeid */
1776 (void)sa6_embedscope(sin6, MODULE_GLOBAL(ip6_use_defzone));
1780 net->ro._s_addr = sctp_source_address_selection(inp,
1782 (sctp_route_t *) & net->ro,
1783 net, 0, stcb->asoc.vrf_id);
1784 #if defined(INET6) && defined(SCTP_EMBEDDED_V6_SCOPE)
1785 if (net->ro._l_addr.sa.sa_family == AF_INET6) {
1786 struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)&net->ro._l_addr;
1788 (void)sa6_recoverscope(sin6);
1792 if (net->ro._s_addr)
1793 net->src_addr_selected = 1;
1795 if (net->ro._s_addr) {
1796 mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._s_addr.sa, net->ro.ro_rt);
1798 mtu -= sizeof(struct udphdr);
1800 if (mtu > next_mtu) {
1801 net->mtu = next_mtu;
1805 /* restart the timer */
1806 sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net);
1810 sctp_autoclose_timer(struct sctp_inpcb *inp,
1811 struct sctp_tcb *stcb,
1812 struct sctp_nets *net)
1814 struct timeval tn, *tim_touse;
1815 struct sctp_association *asoc;
1818 (void)SCTP_GETTIME_TIMEVAL(&tn);
1819 if (stcb->asoc.sctp_autoclose_ticks &&
1820 sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) {
1821 /* Auto close is on */
1823 /* pick the time to use */
1824 if (asoc->time_last_rcvd.tv_sec >
1825 asoc->time_last_sent.tv_sec) {
1826 tim_touse = &asoc->time_last_rcvd;
1828 tim_touse = &asoc->time_last_sent;
1830 /* Now has long enough transpired to autoclose? */
1831 ticks_gone_by = SEC_TO_TICKS(tn.tv_sec - tim_touse->tv_sec);
1832 if ((ticks_gone_by > 0) &&
1833 (ticks_gone_by >= (int)asoc->sctp_autoclose_ticks)) {
1835 * autoclose time has hit, call the output routine,
1836 * which should do nothing just to be SURE we don't
1837 * have hanging data. We can then safely check the
1838 * queues and know that we are clear to send
1841 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR, SCTP_SO_NOT_LOCKED);
1843 if (TAILQ_EMPTY(&asoc->send_queue) &&
1844 TAILQ_EMPTY(&asoc->sent_queue)) {
1846 * there is nothing queued to send, so I'm
1849 if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) {
1850 /* only send SHUTDOWN 1st time thru */
1851 sctp_send_shutdown(stcb, stcb->asoc.primary_destination);
1852 if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
1853 (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
1854 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
1856 SCTP_SET_STATE(asoc, SCTP_STATE_SHUTDOWN_SENT);
1857 SCTP_CLEAR_SUBSTATE(asoc, SCTP_STATE_SHUTDOWN_PENDING);
1858 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
1859 stcb->sctp_ep, stcb,
1860 asoc->primary_destination);
1861 sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1862 stcb->sctp_ep, stcb,
1863 asoc->primary_destination);
1868 * No auto close at this time, reset t-o to check
1873 /* fool the timer startup to use the time left */
1874 tmp = asoc->sctp_autoclose_ticks;
1875 asoc->sctp_autoclose_ticks -= ticks_gone_by;
1876 sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb,
1878 /* restore the real tick value */
1879 asoc->sctp_autoclose_ticks = tmp;
1885 sctp_iterator_timer(struct sctp_iterator *it)
1887 int iteration_count = 0;
1891 * only one iterator can run at a time. This is the only way we can
1892 * cleanly pull ep's from underneath all the running interators when
1895 SCTP_ITERATOR_LOCK();
1896 if (it->inp == NULL) {
1897 /* iterator is complete */
1899 SCTP_ITERATOR_UNLOCK();
1900 SCTP_INP_INFO_WLOCK();
1901 TAILQ_REMOVE(&SCTP_BASE_INFO(iteratorhead), it, sctp_nxt_itr);
1902 /* stopping the callout is not needed, in theory */
1903 SCTP_INP_INFO_WUNLOCK();
1904 (void)SCTP_OS_TIMER_STOP(&it->tmr.timer);
1905 if (it->function_atend != NULL) {
1906 (*it->function_atend) (it->pointer, it->val);
1908 SCTP_FREE(it, SCTP_M_ITER);
1912 SCTP_INP_WLOCK(it->inp);
1913 while (((it->pcb_flags) &&
1914 ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1915 ((it->pcb_features) &&
1916 ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1917 /* endpoint flags or features don't match, so keep looking */
1918 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1919 SCTP_INP_WUNLOCK(it->inp);
1920 goto done_with_iterator;
1922 SCTP_INP_WUNLOCK(it->inp);
1923 it->inp = LIST_NEXT(it->inp, sctp_list);
1924 if (it->inp == NULL) {
1925 goto done_with_iterator;
1927 SCTP_INP_WLOCK(it->inp);
1929 if ((it->inp->inp_starting_point_for_iterator != NULL) &&
1930 (it->inp->inp_starting_point_for_iterator != it)) {
1931 SCTP_PRINTF("Iterator collision, waiting for one at %p\n",
1933 SCTP_INP_WUNLOCK(it->inp);
1934 goto start_timer_return;
1936 /* mark the current iterator on the endpoint */
1937 it->inp->inp_starting_point_for_iterator = it;
1938 SCTP_INP_WUNLOCK(it->inp);
1939 SCTP_INP_RLOCK(it->inp);
1940 /* now go through each assoc which is in the desired state */
1941 if (it->done_current_ep == 0) {
1942 if (it->function_inp != NULL)
1943 inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1944 it->done_current_ep = 1;
1946 if (it->stcb == NULL) {
1947 /* run the per instance function */
1948 it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1950 SCTP_INP_RUNLOCK(it->inp);
1951 if ((inp_skip) || it->stcb == NULL) {
1952 if (it->function_inp_end != NULL) {
1953 inp_skip = (*it->function_inp_end) (it->inp,
1960 (it->stcb->asoc.stcb_starting_point_for_iterator == it)) {
1961 it->stcb->asoc.stcb_starting_point_for_iterator = NULL;
1964 SCTP_TCB_LOCK(it->stcb);
1965 if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1966 /* not in the right state... keep looking */
1967 SCTP_TCB_UNLOCK(it->stcb);
1970 /* mark the current iterator on the assoc */
1971 it->stcb->asoc.stcb_starting_point_for_iterator = it;
1972 /* see if we have limited out the iterator loop */
1974 if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1976 /* set a timer to continue this later */
1978 SCTP_TCB_UNLOCK(it->stcb);
1979 sctp_timer_start(SCTP_TIMER_TYPE_ITERATOR,
1980 (struct sctp_inpcb *)it, NULL, NULL);
1981 SCTP_ITERATOR_UNLOCK();
1984 /* run function on this one */
1985 (*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1988 * we lie here, it really needs to have its own type but
1989 * first I must verify that this won't effect things :-0
1991 if (it->no_chunk_output == 0)
1992 sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3, SCTP_SO_NOT_LOCKED);
1994 SCTP_TCB_UNLOCK(it->stcb);
1996 it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1997 if (it->stcb == NULL) {
1998 if (it->function_inp_end != NULL) {
1999 inp_skip = (*it->function_inp_end) (it->inp,
2006 /* done with all assocs on this endpoint, move on to next endpoint */
2007 it->done_current_ep = 0;
2008 SCTP_INP_WLOCK(it->inp);
2009 it->inp->inp_starting_point_for_iterator = NULL;
2010 SCTP_INP_WUNLOCK(it->inp);
2011 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
2014 SCTP_INP_INFO_RLOCK();
2015 it->inp = LIST_NEXT(it->inp, sctp_list);
2016 SCTP_INP_INFO_RUNLOCK();
2018 if (it->inp == NULL) {
2019 goto done_with_iterator;
2021 goto select_a_new_ep;