]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/netinet/sctp_timer.c
- Fix so ifn's are properly deleted when the ref count goes to 0.
[FreeBSD/FreeBSD.git] / sys / netinet / sctp_timer.c
1 /*-
2  * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3  *
4  * Redistribution and use in source and binary forms, with or without
5  * modification, are permitted provided that the following conditions are met:
6  *
7  * a) Redistributions of source code must retain the above copyright notice,
8  *   this list of conditions and the following disclaimer.
9  *
10  * b) Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in
12  *   the documentation and/or other materials provided with the distribution.
13  *
14  * c) Neither the name of Cisco Systems, Inc. nor the names of its
15  *    contributors may be used to endorse or promote products derived
16  *    from this software without specific prior written permission.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
22  * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23  * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26  * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28  * THE POSSIBILITY OF SUCH DAMAGE.
29  */
30
31 /* $KAME: sctp_timer.c,v 1.29 2005/03/06 16:04:18 itojun Exp $   */
32
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
35
36 #define _IP_VHL
37 #include <netinet/sctp_os.h>
38 #include <netinet/sctp_pcb.h>
39 #ifdef INET6
40 #include <netinet6/sctp6_var.h>
41 #endif
42 #include <netinet/sctp_var.h>
43 #include <netinet/sctp_sysctl.h>
44 #include <netinet/sctp_timer.h>
45 #include <netinet/sctputil.h>
46 #include <netinet/sctp_output.h>
47 #include <netinet/sctp_header.h>
48 #include <netinet/sctp_indata.h>
49 #include <netinet/sctp_asconf.h>
50 #include <netinet/sctp_input.h>
51 #include <netinet/sctp.h>
52 #include <netinet/sctp_uio.h>
53
54
55
56 void
57 sctp_early_fr_timer(struct sctp_inpcb *inp,
58     struct sctp_tcb *stcb,
59     struct sctp_nets *net)
60 {
61         struct sctp_tmit_chunk *chk, *tp2;
62         struct timeval now, min_wait, tv;
63         unsigned int cur_rtt, cnt = 0, cnt_resend = 0;
64
65         /* an early FR is occuring. */
66         (void)SCTP_GETTIME_TIMEVAL(&now);
67         /* get cur rto in micro-seconds */
68         if (net->lastsa == 0) {
69                 /* Hmm no rtt estimate yet? */
70                 cur_rtt = stcb->asoc.initial_rto >> 2;
71         } else {
72
73                 cur_rtt = ((net->lastsa >> 2) + net->lastsv) >> 1;
74         }
75         if (cur_rtt < sctp_early_fr_msec) {
76                 cur_rtt = sctp_early_fr_msec;
77         }
78         cur_rtt *= 1000;
79         tv.tv_sec = cur_rtt / 1000000;
80         tv.tv_usec = cur_rtt % 1000000;
81         min_wait = now;
82         timevalsub(&min_wait, &tv);
83         if (min_wait.tv_sec < 0 || min_wait.tv_usec < 0) {
84                 /*
85                  * if we hit here, we don't have enough seconds on the clock
86                  * to account for the RTO. We just let the lower seconds be
87                  * the bounds and don't worry about it. This may mean we
88                  * will mark a lot more than we should.
89                  */
90                 min_wait.tv_sec = min_wait.tv_usec = 0;
91         }
92         chk = TAILQ_LAST(&stcb->asoc.sent_queue, sctpchunk_listhead);
93         for (; chk != NULL; chk = tp2) {
94                 tp2 = TAILQ_PREV(chk, sctpchunk_listhead, sctp_next);
95                 if (chk->whoTo != net) {
96                         continue;
97                 }
98                 if (chk->sent == SCTP_DATAGRAM_RESEND)
99                         cnt_resend++;
100                 else if ((chk->sent > SCTP_DATAGRAM_UNSENT) &&
101                     (chk->sent < SCTP_DATAGRAM_RESEND)) {
102                         /* pending, may need retran */
103                         if (chk->sent_rcv_time.tv_sec > min_wait.tv_sec) {
104                                 /*
105                                  * we have reached a chunk that was sent
106                                  * some seconds past our min.. forget it we
107                                  * will find no more to send.
108                                  */
109                                 continue;
110                         } else if (chk->sent_rcv_time.tv_sec == min_wait.tv_sec) {
111                                 /*
112                                  * we must look at the micro seconds to
113                                  * know.
114                                  */
115                                 if (chk->sent_rcv_time.tv_usec >= min_wait.tv_usec) {
116                                         /*
117                                          * ok it was sent after our boundary
118                                          * time.
119                                          */
120                                         continue;
121                                 }
122                         }
123                         if (sctp_logging_level & SCTP_EARLYFR_LOGGING_ENABLE) {
124                                 sctp_log_fr(chk->rec.data.TSN_seq, chk->snd_count,
125                                     4, SCTP_FR_MARKED_EARLY);
126                         }
127                         SCTP_STAT_INCR(sctps_earlyfrmrkretrans);
128                         chk->sent = SCTP_DATAGRAM_RESEND;
129                         sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
130                         /* double book size since we are doing an early FR */
131                         chk->book_size_scale++;
132                         cnt += chk->send_size;
133                         if ((cnt + net->flight_size) > net->cwnd) {
134                                 /* Mark all we could possibly resend */
135                                 break;
136                         }
137                 }
138         }
139         if (cnt) {
140                 int old_cwnd;
141
142                 old_cwnd = net->cwnd;
143                 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_EARLY_FR_TMR);
144                 /*
145                  * make a small adjustment to cwnd and force to CA.
146                  */
147
148                 if (net->cwnd > net->mtu)
149                         /* drop down one MTU after sending */
150                         net->cwnd -= net->mtu;
151                 if (net->cwnd < net->ssthresh)
152                         /* still in SS move to CA */
153                         net->ssthresh = net->cwnd - 1;
154                 if (sctp_logging_level & SCTP_CWND_MONITOR_ENABLE) {
155                         sctp_log_cwnd(stcb, net, (old_cwnd - net->cwnd), SCTP_CWND_LOG_FROM_FR);
156                 }
157         } else if (cnt_resend) {
158                 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_EARLY_FR_TMR);
159         }
160         /* Restart it? */
161         if (net->flight_size < net->cwnd) {
162                 SCTP_STAT_INCR(sctps_earlyfrstrtmr);
163                 sctp_timer_start(SCTP_TIMER_TYPE_EARLYFR, stcb->sctp_ep, stcb, net);
164         }
165 }
166
167 void
168 sctp_audit_retranmission_queue(struct sctp_association *asoc)
169 {
170         struct sctp_tmit_chunk *chk;
171
172         SCTPDBG(SCTP_DEBUG_TIMER4, "Audit invoked on send queue cnt:%d onqueue:%d\n",
173             asoc->sent_queue_retran_cnt,
174             asoc->sent_queue_cnt);
175         asoc->sent_queue_retran_cnt = 0;
176         asoc->sent_queue_cnt = 0;
177         TAILQ_FOREACH(chk, &asoc->sent_queue, sctp_next) {
178                 if (chk->sent == SCTP_DATAGRAM_RESEND) {
179                         sctp_ucount_incr(asoc->sent_queue_retran_cnt);
180                 }
181                 asoc->sent_queue_cnt++;
182         }
183         TAILQ_FOREACH(chk, &asoc->control_send_queue, sctp_next) {
184                 if (chk->sent == SCTP_DATAGRAM_RESEND) {
185                         sctp_ucount_incr(asoc->sent_queue_retran_cnt);
186                 }
187         }
188         SCTPDBG(SCTP_DEBUG_TIMER4, "Audit completes retran:%d onqueue:%d\n",
189             asoc->sent_queue_retran_cnt,
190             asoc->sent_queue_cnt);
191 }
192
193 int
194 sctp_threshold_management(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
195     struct sctp_nets *net, uint16_t threshold)
196 {
197         if (net) {
198                 net->error_count++;
199                 SCTPDBG(SCTP_DEBUG_TIMER4, "Error count for %p now %d thresh:%d\n",
200                     net, net->error_count,
201                     net->failure_threshold);
202                 if (net->error_count > net->failure_threshold) {
203                         /* We had a threshold failure */
204                         if (net->dest_state & SCTP_ADDR_REACHABLE) {
205                                 net->dest_state &= ~SCTP_ADDR_REACHABLE;
206                                 net->dest_state |= SCTP_ADDR_NOT_REACHABLE;
207                                 net->dest_state &= ~SCTP_ADDR_REQ_PRIMARY;
208                                 if (net == stcb->asoc.primary_destination) {
209                                         net->dest_state |= SCTP_ADDR_WAS_PRIMARY;
210                                 }
211                                 sctp_ulp_notify(SCTP_NOTIFY_INTERFACE_DOWN,
212                                     stcb,
213                                     SCTP_FAILED_THRESHOLD,
214                                     (void *)net);
215                         }
216                 }
217                 /*********HOLD THIS COMMENT FOR PATCH OF ALTERNATE
218                  *********ROUTING CODE
219                  */
220                 /*********HOLD THIS COMMENT FOR END OF PATCH OF ALTERNATE
221                  *********ROUTING CODE
222                  */
223         }
224         if (stcb == NULL)
225                 return (0);
226
227         if (net) {
228                 if ((net->dest_state & SCTP_ADDR_UNCONFIRMED) == 0) {
229                         stcb->asoc.overall_error_count++;
230                 }
231         } else {
232                 stcb->asoc.overall_error_count++;
233         }
234         SCTPDBG(SCTP_DEBUG_TIMER4, "Overall error count for %p now %d thresh:%u state:%x\n",
235             &stcb->asoc, stcb->asoc.overall_error_count,
236             (uint32_t) threshold,
237             ((net == NULL) ? (uint32_t) 0 : (uint32_t) net->dest_state));
238         /*
239          * We specifically do not do >= to give the assoc one more change
240          * before we fail it.
241          */
242         if (stcb->asoc.overall_error_count > threshold) {
243                 /* Abort notification sends a ULP notify */
244                 struct mbuf *oper;
245
246                 oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
247                     0, M_DONTWAIT, 1, MT_DATA);
248                 if (oper) {
249                         struct sctp_paramhdr *ph;
250                         uint32_t *ippp;
251
252                         SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
253                             sizeof(uint32_t);
254                         ph = mtod(oper, struct sctp_paramhdr *);
255                         ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
256                         ph->param_length = htons(SCTP_BUF_LEN(oper));
257                         ippp = (uint32_t *) (ph + 1);
258                         *ippp = htonl(SCTP_FROM_SCTP_TIMER + SCTP_LOC_1);
259                 }
260                 inp->last_abort_code = SCTP_FROM_SCTP_TIMER + SCTP_LOC_1;
261                 sctp_abort_an_association(inp, stcb, SCTP_FAILED_THRESHOLD, oper);
262                 return (1);
263         }
264         return (0);
265 }
266
267 struct sctp_nets *
268 sctp_find_alternate_net(struct sctp_tcb *stcb,
269     struct sctp_nets *net,
270     int highest_ssthresh)
271 {
272         /* Find and return an alternate network if possible */
273         struct sctp_nets *alt, *mnet, *hthresh = NULL;
274         int once;
275         uint32_t val = 0;
276
277         if (stcb->asoc.numnets == 1) {
278                 /* No others but net */
279                 return (TAILQ_FIRST(&stcb->asoc.nets));
280         }
281         if (highest_ssthresh) {
282                 TAILQ_FOREACH(mnet, &stcb->asoc.nets, sctp_next) {
283                         if (((mnet->dest_state & SCTP_ADDR_REACHABLE) != SCTP_ADDR_REACHABLE) ||
284                             (mnet->dest_state & SCTP_ADDR_UNCONFIRMED)
285                             ) {
286                                 /*
287                                  * will skip ones that are not-reachable or
288                                  * unconfirmed
289                                  */
290                                 continue;
291                         }
292                         if (val < mnet->ssthresh) {
293                                 hthresh = mnet;
294                                 val = mnet->ssthresh;
295                         } else if (val == mnet->ssthresh) {
296                                 uint32_t rndval;
297                                 uint8_t this_random;
298
299                                 if (stcb->asoc.hb_random_idx > 3) {
300                                         rndval = sctp_select_initial_TSN(&stcb->sctp_ep->sctp_ep);
301                                         memcpy(stcb->asoc.hb_random_values, &rndval,
302                                             sizeof(stcb->asoc.hb_random_values));
303                                         this_random = stcb->asoc.hb_random_values[0];
304                                         stcb->asoc.hb_random_idx = 0;
305                                         stcb->asoc.hb_ect_randombit = 0;
306                                 } else {
307                                         this_random = stcb->asoc.hb_random_values[stcb->asoc.hb_random_idx];
308                                         stcb->asoc.hb_random_idx++;
309                                         stcb->asoc.hb_ect_randombit = 0;
310                                 }
311                                 if (this_random % 2) {
312                                         hthresh = mnet;
313                                         val = mnet->ssthresh;
314                                 }
315                         }
316                 }
317                 if (hthresh) {
318                         return (hthresh);
319                 }
320         }
321         mnet = net;
322         once = 0;
323
324         if (mnet == NULL) {
325                 mnet = TAILQ_FIRST(&stcb->asoc.nets);
326         }
327         do {
328                 alt = TAILQ_NEXT(mnet, sctp_next);
329                 if (alt == NULL) {
330                         once++;
331                         if (once > 1) {
332                                 break;
333                         }
334                         alt = TAILQ_FIRST(&stcb->asoc.nets);
335                 }
336                 if (alt->ro.ro_rt == NULL) {
337                         if (alt->ro._s_addr) {
338                                 sctp_free_ifa(alt->ro._s_addr);
339                                 alt->ro._s_addr = NULL;
340
341                         }
342                         alt->src_addr_selected = 0;
343                 }
344                 if (
345                     ((alt->dest_state & SCTP_ADDR_REACHABLE) == SCTP_ADDR_REACHABLE) &&
346                     (alt->ro.ro_rt != NULL) &&
347                 /* sa_ignore NO_NULL_CHK */
348                     (!(alt->dest_state & SCTP_ADDR_UNCONFIRMED))
349                     ) {
350                         /* Found a reachable address */
351                         break;
352                 }
353                 mnet = alt;
354         } while (alt != NULL);
355
356         if (alt == NULL) {
357                 /* Case where NO insv network exists (dormant state) */
358                 /* we rotate destinations */
359                 once = 0;
360                 mnet = net;
361                 do {
362                         alt = TAILQ_NEXT(mnet, sctp_next);
363                         if (alt == NULL) {
364                                 once++;
365                                 if (once > 1) {
366                                         break;
367                                 }
368                                 alt = TAILQ_FIRST(&stcb->asoc.nets);
369                         }
370                         /* sa_ignore NO_NULL_CHK */
371                         if ((!(alt->dest_state & SCTP_ADDR_UNCONFIRMED)) &&
372                             (alt != net)) {
373                                 /* Found an alternate address */
374                                 break;
375                         }
376                         mnet = alt;
377                 } while (alt != NULL);
378         }
379         if (alt == NULL) {
380                 return (net);
381         }
382         return (alt);
383 }
384
385 static void
386 sctp_backoff_on_timeout(struct sctp_tcb *stcb,
387     struct sctp_nets *net,
388     int win_probe,
389     int num_marked)
390 {
391         if (net->RTO == 0) {
392                 net->RTO = stcb->asoc.minrto;
393         }
394         net->RTO <<= 1;
395         if (net->RTO > stcb->asoc.maxrto) {
396                 net->RTO = stcb->asoc.maxrto;
397         }
398         if ((win_probe == 0) && num_marked) {
399                 /* We don't apply penalty to window probe scenarios */
400                 int old_cwnd = net->cwnd;
401
402                 net->ssthresh = net->cwnd >> 1;
403                 if (net->ssthresh < (net->mtu << 1)) {
404                         net->ssthresh = (net->mtu << 1);
405                 }
406                 net->cwnd = net->mtu;
407                 /* floor of 1 mtu */
408                 if (net->cwnd < net->mtu)
409                         net->cwnd = net->mtu;
410                 if (sctp_logging_level & SCTP_CWND_MONITOR_ENABLE) {
411                         sctp_log_cwnd(stcb, net, net->cwnd - old_cwnd, SCTP_CWND_LOG_FROM_RTX);
412                 }
413                 net->partial_bytes_acked = 0;
414         }
415 }
416
417 static int
418 sctp_mark_all_for_resend(struct sctp_tcb *stcb,
419     struct sctp_nets *net,
420     struct sctp_nets *alt,
421     int window_probe,
422     int *num_marked)
423 {
424
425         /*
426          * Mark all chunks (well not all) that were sent to *net for
427          * retransmission. Move them to alt for there destination as well...
428          * We only mark chunks that have been outstanding long enough to
429          * have received feed-back.
430          */
431         struct sctp_tmit_chunk *chk, *tp2, *could_be_sent = NULL;
432         struct sctp_nets *lnets;
433         struct timeval now, min_wait, tv;
434         int cur_rtt;
435         int audit_tf, num_mk, fir;
436         unsigned int cnt_mk;
437         uint32_t orig_flight, orig_tf;
438         uint32_t tsnlast, tsnfirst;
439
440         /*
441          * CMT: Using RTX_SSTHRESH policy for CMT. If CMT is being used,
442          * then pick dest with largest ssthresh for any retransmission.
443          * (iyengar@cis.udel.edu, 2005/08/12)
444          */
445         if (sctp_cmt_on_off) {
446                 alt = sctp_find_alternate_net(stcb, net, 1);
447                 /*
448                  * CUCv2: If a different dest is picked for the
449                  * retransmission, then new (rtx-)pseudo_cumack needs to be
450                  * tracked for orig dest. Let CUCv2 track new (rtx-)
451                  * pseudo-cumack always.
452                  */
453                 net->find_pseudo_cumack = 1;
454                 net->find_rtx_pseudo_cumack = 1;
455         }
456         /* none in flight now */
457         audit_tf = 0;
458         fir = 0;
459         /*
460          * figure out how long a data chunk must be pending before we can
461          * mark it ..
462          */
463         (void)SCTP_GETTIME_TIMEVAL(&now);
464         /* get cur rto in micro-seconds */
465         cur_rtt = (((net->lastsa >> 2) + net->lastsv) >> 1);
466         cur_rtt *= 1000;
467         if (sctp_logging_level & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) {
468                 sctp_log_fr(cur_rtt,
469                     stcb->asoc.peers_rwnd,
470                     window_probe,
471                     SCTP_FR_T3_MARK_TIME);
472                 sctp_log_fr(net->flight_size,
473                     SCTP_OS_TIMER_PENDING(&net->fr_timer.timer),
474                     SCTP_OS_TIMER_ACTIVE(&net->fr_timer.timer),
475                     SCTP_FR_CWND_REPORT);
476                 sctp_log_fr(net->flight_size, net->cwnd, stcb->asoc.total_flight, SCTP_FR_CWND_REPORT);
477         }
478         tv.tv_sec = cur_rtt / 1000000;
479         tv.tv_usec = cur_rtt % 1000000;
480         min_wait = now;
481         timevalsub(&min_wait, &tv);
482         if (min_wait.tv_sec < 0 || min_wait.tv_usec < 0) {
483                 /*
484                  * if we hit here, we don't have enough seconds on the clock
485                  * to account for the RTO. We just let the lower seconds be
486                  * the bounds and don't worry about it. This may mean we
487                  * will mark a lot more than we should.
488                  */
489                 min_wait.tv_sec = min_wait.tv_usec = 0;
490         }
491         if (sctp_logging_level & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) {
492                 sctp_log_fr(cur_rtt, now.tv_sec, now.tv_usec, SCTP_FR_T3_MARK_TIME);
493                 sctp_log_fr(0, min_wait.tv_sec, min_wait.tv_usec, SCTP_FR_T3_MARK_TIME);
494         }
495         /*
496          * Our rwnd will be incorrect here since we are not adding back the
497          * cnt * mbuf but we will fix that down below.
498          */
499         orig_flight = net->flight_size;
500         orig_tf = stcb->asoc.total_flight;
501
502         net->fast_retran_ip = 0;
503         /* Now on to each chunk */
504         num_mk = cnt_mk = 0;
505         tsnfirst = tsnlast = 0;
506         chk = TAILQ_FIRST(&stcb->asoc.sent_queue);
507         for (; chk != NULL; chk = tp2) {
508                 tp2 = TAILQ_NEXT(chk, sctp_next);
509                 if ((compare_with_wrap(stcb->asoc.last_acked_seq,
510                     chk->rec.data.TSN_seq,
511                     MAX_TSN)) ||
512                     (stcb->asoc.last_acked_seq == chk->rec.data.TSN_seq)) {
513                         /* Strange case our list got out of order? */
514                         SCTP_PRINTF("Our list is out of order?\n");
515                         panic("Out of order list");
516                 }
517                 if ((chk->whoTo == net) && (chk->sent < SCTP_DATAGRAM_ACKED)) {
518                         /*
519                          * found one to mark: If it is less than
520                          * DATAGRAM_ACKED it MUST not be a skipped or marked
521                          * TSN but instead one that is either already set
522                          * for retransmission OR one that needs
523                          * retransmission.
524                          */
525
526                         /* validate its been outstanding long enough */
527                         if (sctp_logging_level & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) {
528                                 sctp_log_fr(chk->rec.data.TSN_seq,
529                                     chk->sent_rcv_time.tv_sec,
530                                     chk->sent_rcv_time.tv_usec,
531                                     SCTP_FR_T3_MARK_TIME);
532                         }
533                         if ((chk->sent_rcv_time.tv_sec > min_wait.tv_sec) && (window_probe == 0)) {
534                                 /*
535                                  * we have reached a chunk that was sent
536                                  * some seconds past our min.. forget it we
537                                  * will find no more to send.
538                                  */
539                                 if (sctp_logging_level & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) {
540                                         sctp_log_fr(0,
541                                             chk->sent_rcv_time.tv_sec,
542                                             chk->sent_rcv_time.tv_usec,
543                                             SCTP_FR_T3_STOPPED);
544                                 }
545                                 continue;
546                         } else if ((chk->sent_rcv_time.tv_sec == min_wait.tv_sec) &&
547                             (window_probe == 0)) {
548                                 /*
549                                  * we must look at the micro seconds to
550                                  * know.
551                                  */
552                                 if (chk->sent_rcv_time.tv_usec >= min_wait.tv_usec) {
553                                         /*
554                                          * ok it was sent after our boundary
555                                          * time.
556                                          */
557                                         if (sctp_logging_level & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) {
558                                                 sctp_log_fr(0,
559                                                     chk->sent_rcv_time.tv_sec,
560                                                     chk->sent_rcv_time.tv_usec,
561                                                     SCTP_FR_T3_STOPPED);
562                                         }
563                                         continue;
564                                 }
565                         }
566                         if (PR_SCTP_TTL_ENABLED(chk->flags)) {
567                                 /* Is it expired? */
568                                 if ((now.tv_sec > chk->rec.data.timetodrop.tv_sec) ||
569                                     ((chk->rec.data.timetodrop.tv_sec == now.tv_sec) &&
570                                     (now.tv_usec > chk->rec.data.timetodrop.tv_usec))) {
571                                         /* Yes so drop it */
572                                         if (chk->data) {
573                                                 (void)sctp_release_pr_sctp_chunk(stcb,
574                                                     chk,
575                                                     (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
576                                                     &stcb->asoc.sent_queue);
577                                         }
578                                 }
579                                 continue;
580                         }
581                         if (PR_SCTP_RTX_ENABLED(chk->flags)) {
582                                 /* Has it been retransmitted tv_sec times? */
583                                 if (chk->snd_count > chk->rec.data.timetodrop.tv_sec) {
584                                         if (chk->data) {
585                                                 (void)sctp_release_pr_sctp_chunk(stcb,
586                                                     chk,
587                                                     (SCTP_RESPONSE_TO_USER_REQ | SCTP_NOTIFY_DATAGRAM_SENT),
588                                                     &stcb->asoc.sent_queue);
589                                         }
590                                 }
591                                 continue;
592                         }
593                         if (chk->sent < SCTP_DATAGRAM_RESEND) {
594                                 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
595                                 num_mk++;
596                                 if (fir == 0) {
597                                         fir = 1;
598                                         tsnfirst = chk->rec.data.TSN_seq;
599                                 }
600                                 tsnlast = chk->rec.data.TSN_seq;
601                                 if (sctp_logging_level & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) {
602                                         sctp_log_fr(chk->rec.data.TSN_seq, chk->snd_count,
603                                             0, SCTP_FR_T3_MARKED);
604                                 }
605                                 if (chk->rec.data.chunk_was_revoked) {
606                                         /* deflate the cwnd */
607                                         chk->whoTo->cwnd -= chk->book_size;
608                                         chk->rec.data.chunk_was_revoked = 0;
609                                 }
610                                 net->marked_retrans++;
611                                 stcb->asoc.marked_retrans++;
612                                 if (sctp_logging_level & SCTP_FLIGHT_LOGGING_ENABLE) {
613                                         sctp_misc_ints(SCTP_FLIGHT_LOG_DOWN_RSND_TO,
614                                             chk->whoTo->flight_size,
615                                             chk->book_size,
616                                             (uintptr_t) chk->whoTo,
617                                             chk->rec.data.TSN_seq);
618                                 }
619                                 sctp_flight_size_decrease(chk);
620                                 sctp_total_flight_decrease(stcb, chk);
621                                 stcb->asoc.peers_rwnd += chk->send_size;
622                                 stcb->asoc.peers_rwnd += sctp_peer_chunk_oh;
623                         }
624                         chk->sent = SCTP_DATAGRAM_RESEND;
625                         SCTP_STAT_INCR(sctps_markedretrans);
626
627                         /* reset the TSN for striking and other FR stuff */
628                         chk->rec.data.doing_fast_retransmit = 0;
629                         /* Clear any time so NO RTT is being done */
630                         chk->do_rtt = 0;
631                         if (alt != net) {
632                                 sctp_free_remote_addr(chk->whoTo);
633                                 chk->no_fr_allowed = 1;
634                                 chk->whoTo = alt;
635                                 atomic_add_int(&alt->ref_count, 1);
636                         } else {
637                                 chk->no_fr_allowed = 0;
638                                 if (TAILQ_EMPTY(&stcb->asoc.send_queue)) {
639                                         chk->rec.data.fast_retran_tsn = stcb->asoc.sending_seq;
640                                 } else {
641                                         chk->rec.data.fast_retran_tsn = (TAILQ_FIRST(&stcb->asoc.send_queue))->rec.data.TSN_seq;
642                                 }
643                         }
644                         /*
645                          * CMT: Do not allow FRs on retransmitted TSNs.
646                          */
647                         if (sctp_cmt_on_off == 1) {
648                                 chk->no_fr_allowed = 1;
649                         }
650                 } else if (chk->sent == SCTP_DATAGRAM_ACKED) {
651                         /* remember highest acked one */
652                         could_be_sent = chk;
653                 }
654                 if (chk->sent == SCTP_DATAGRAM_RESEND) {
655                         cnt_mk++;
656                 }
657         }
658         if ((orig_flight - net->flight_size) != (orig_tf - stcb->asoc.total_flight)) {
659                 /* we did not subtract the same things? */
660                 audit_tf = 1;
661         }
662         if (sctp_logging_level & (SCTP_EARLYFR_LOGGING_ENABLE | SCTP_FR_LOGGING_ENABLE)) {
663                 sctp_log_fr(tsnfirst, tsnlast, num_mk, SCTP_FR_T3_TIMEOUT);
664         }
665 #ifdef SCTP_DEBUG
666         if (num_mk) {
667                 SCTPDBG(SCTP_DEBUG_TIMER1, "LAST TSN marked was %x\n",
668                     tsnlast);
669                 SCTPDBG(SCTP_DEBUG_TIMER1, "Num marked for retransmission was %d peer-rwd:%ld\n",
670                     num_mk, (u_long)stcb->asoc.peers_rwnd);
671                 SCTPDBG(SCTP_DEBUG_TIMER1, "LAST TSN marked was %x\n",
672                     tsnlast);
673                 SCTPDBG(SCTP_DEBUG_TIMER1, "Num marked for retransmission was %d peer-rwd:%d\n",
674                     num_mk,
675                     (int)stcb->asoc.peers_rwnd);
676         }
677 #endif
678         *num_marked = num_mk;
679         if ((stcb->asoc.sent_queue_retran_cnt == 0) && (could_be_sent)) {
680                 /* fix it so we retransmit the highest acked anyway */
681                 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
682                 cnt_mk++;
683                 could_be_sent->sent = SCTP_DATAGRAM_RESEND;
684         }
685         if (stcb->asoc.sent_queue_retran_cnt != cnt_mk) {
686 #ifdef INVARIANTS
687                 SCTP_PRINTF("Local Audit says there are %d for retran asoc cnt:%d\n",
688                     cnt_mk, stcb->asoc.sent_queue_retran_cnt);
689 #endif
690 #ifndef SCTP_AUDITING_ENABLED
691                 stcb->asoc.sent_queue_retran_cnt = cnt_mk;
692 #endif
693         }
694         /* Now check for a ECN Echo that may be stranded */
695         TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
696                 if ((chk->whoTo == net) &&
697                     (chk->rec.chunk_id.id == SCTP_ECN_ECHO)) {
698                         sctp_free_remote_addr(chk->whoTo);
699                         chk->whoTo = alt;
700                         if (chk->sent != SCTP_DATAGRAM_RESEND) {
701                                 chk->sent = SCTP_DATAGRAM_RESEND;
702                                 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
703                         }
704                         atomic_add_int(&alt->ref_count, 1);
705                 }
706         }
707         if (audit_tf) {
708                 SCTPDBG(SCTP_DEBUG_TIMER4,
709                     "Audit total flight due to negative value net:%p\n",
710                     net);
711                 stcb->asoc.total_flight = 0;
712                 stcb->asoc.total_flight_count = 0;
713                 /* Clear all networks flight size */
714                 TAILQ_FOREACH(lnets, &stcb->asoc.nets, sctp_next) {
715                         lnets->flight_size = 0;
716                         SCTPDBG(SCTP_DEBUG_TIMER4,
717                             "Net:%p c-f cwnd:%d ssthresh:%d\n",
718                             lnets, lnets->cwnd, lnets->ssthresh);
719                 }
720                 TAILQ_FOREACH(chk, &stcb->asoc.sent_queue, sctp_next) {
721                         if (chk->sent < SCTP_DATAGRAM_RESEND) {
722                                 if (sctp_logging_level & SCTP_FLIGHT_LOGGING_ENABLE) {
723                                         sctp_misc_ints(SCTP_FLIGHT_LOG_UP,
724                                             chk->whoTo->flight_size,
725                                             chk->book_size,
726                                             (uintptr_t) chk->whoTo,
727                                             chk->rec.data.TSN_seq);
728                                 }
729                                 sctp_flight_size_increase(chk);
730                                 sctp_total_flight_increase(stcb, chk);
731                         }
732                 }
733         }
734         /*
735          * Setup the ecn nonce re-sync point. We do this since
736          * retranmissions are NOT setup for ECN. This means that do to
737          * Karn's rule, we don't know the total of the peers ecn bits.
738          */
739         chk = TAILQ_FIRST(&stcb->asoc.send_queue);
740         if (chk == NULL) {
741                 stcb->asoc.nonce_resync_tsn = stcb->asoc.sending_seq;
742         } else {
743                 stcb->asoc.nonce_resync_tsn = chk->rec.data.TSN_seq;
744         }
745         stcb->asoc.nonce_wait_for_ecne = 0;
746         stcb->asoc.nonce_sum_check = 0;
747         /* We return 1 if we only have a window probe outstanding */
748         return (0);
749 }
750
751 static void
752 sctp_move_all_chunks_to_alt(struct sctp_tcb *stcb,
753     struct sctp_nets *net,
754     struct sctp_nets *alt)
755 {
756         struct sctp_association *asoc;
757         struct sctp_stream_out *outs;
758         struct sctp_tmit_chunk *chk;
759         struct sctp_stream_queue_pending *sp;
760
761         if (net == alt)
762                 /* nothing to do */
763                 return;
764
765         asoc = &stcb->asoc;
766
767         /*
768          * now through all the streams checking for chunks sent to our bad
769          * network.
770          */
771         TAILQ_FOREACH(outs, &asoc->out_wheel, next_spoke) {
772                 /* now clean up any chunks here */
773                 TAILQ_FOREACH(sp, &outs->outqueue, next) {
774                         if (sp->net == net) {
775                                 sctp_free_remote_addr(sp->net);
776                                 sp->net = alt;
777                                 atomic_add_int(&alt->ref_count, 1);
778                         }
779                 }
780         }
781         /* Now check the pending queue */
782         TAILQ_FOREACH(chk, &asoc->send_queue, sctp_next) {
783                 if (chk->whoTo == net) {
784                         sctp_free_remote_addr(chk->whoTo);
785                         chk->whoTo = alt;
786                         atomic_add_int(&alt->ref_count, 1);
787                 }
788         }
789
790 }
791
792 int
793 sctp_t3rxt_timer(struct sctp_inpcb *inp,
794     struct sctp_tcb *stcb,
795     struct sctp_nets *net)
796 {
797         struct sctp_nets *alt;
798         int win_probe, num_mk;
799
800         if (sctp_logging_level & SCTP_FR_LOGGING_ENABLE) {
801                 sctp_log_fr(0, 0, 0, SCTP_FR_T3_TIMEOUT);
802         }
803         if (sctp_logging_level & SCTP_CWND_LOGGING_ENABLE) {
804                 struct sctp_nets *lnet;
805
806                 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
807                         if (net == lnet) {
808                                 sctp_log_cwnd(stcb, lnet, 1, SCTP_CWND_LOG_FROM_T3);
809                         } else {
810                                 sctp_log_cwnd(stcb, lnet, 0, SCTP_CWND_LOG_FROM_T3);
811                         }
812                 }
813         }
814         /* Find an alternate and mark those for retransmission */
815         if ((stcb->asoc.peers_rwnd == 0) &&
816             (stcb->asoc.total_flight < net->mtu)) {
817                 SCTP_STAT_INCR(sctps_timowindowprobe);
818                 win_probe = 1;
819         } else {
820                 win_probe = 0;
821         }
822
823         if (sctp_cmt_on_off) {
824                 /*
825                  * CMT: Using RTX_SSTHRESH policy for CMT. If CMT is being
826                  * used, then pick dest with largest ssthresh for any
827                  * retransmission.
828                  */
829                 alt = net;
830                 alt = sctp_find_alternate_net(stcb, alt, 1);
831                 /*
832                  * CUCv2: If a different dest is picked for the
833                  * retransmission, then new (rtx-)pseudo_cumack needs to be
834                  * tracked for orig dest. Let CUCv2 track new (rtx-)
835                  * pseudo-cumack always.
836                  */
837                 net->find_pseudo_cumack = 1;
838                 net->find_rtx_pseudo_cumack = 1;
839
840         } else {                /* CMT is OFF */
841
842                 alt = sctp_find_alternate_net(stcb, net, 0);
843         }
844
845         (void)sctp_mark_all_for_resend(stcb, net, alt, win_probe, &num_mk);
846         /* FR Loss recovery just ended with the T3. */
847         stcb->asoc.fast_retran_loss_recovery = 0;
848
849         /* CMT FR loss recovery ended with the T3 */
850         net->fast_retran_loss_recovery = 0;
851
852         /*
853          * setup the sat loss recovery that prevents satellite cwnd advance.
854          */
855         stcb->asoc.sat_t3_loss_recovery = 1;
856         stcb->asoc.sat_t3_recovery_tsn = stcb->asoc.sending_seq;
857
858         /* Backoff the timer and cwnd */
859         sctp_backoff_on_timeout(stcb, net, win_probe, num_mk);
860         if (win_probe == 0) {
861                 /* We don't do normal threshold management on window probes */
862                 if (sctp_threshold_management(inp, stcb, net,
863                     stcb->asoc.max_send_times)) {
864                         /* Association was destroyed */
865                         return (1);
866                 } else {
867                         if (net != stcb->asoc.primary_destination) {
868                                 /* send a immediate HB if our RTO is stale */
869                                 struct timeval now;
870                                 unsigned int ms_goneby;
871
872                                 (void)SCTP_GETTIME_TIMEVAL(&now);
873                                 if (net->last_sent_time.tv_sec) {
874                                         ms_goneby = (now.tv_sec - net->last_sent_time.tv_sec) * 1000;
875                                 } else {
876                                         ms_goneby = 0;
877                                 }
878                                 if ((ms_goneby > net->RTO) || (net->RTO == 0)) {
879                                         /*
880                                          * no recent feed back in an RTO or
881                                          * more, request a RTT update
882                                          */
883                                         (void)sctp_send_hb(stcb, 1, net);
884                                 }
885                         }
886                 }
887         } else {
888                 /*
889                  * For a window probe we don't penalize the net's but only
890                  * the association. This may fail it if SACKs are not coming
891                  * back. If sack's are coming with rwnd locked at 0, we will
892                  * continue to hold things waiting for rwnd to raise
893                  */
894                 if (sctp_threshold_management(inp, stcb, NULL,
895                     stcb->asoc.max_send_times)) {
896                         /* Association was destroyed */
897                         return (1);
898                 }
899         }
900         if (net->dest_state & SCTP_ADDR_NOT_REACHABLE) {
901                 /* Move all pending over too */
902                 sctp_move_all_chunks_to_alt(stcb, net, alt);
903
904                 /*
905                  * Get the address that failed, to force a new src address
906                  * selecton and a route allocation.
907                  */
908                 if (net->ro._s_addr) {
909                         sctp_free_ifa(net->ro._s_addr);
910                         net->ro._s_addr = NULL;
911                 }
912                 net->src_addr_selected = 0;
913
914                 /* Force a route allocation too */
915                 if (net->ro.ro_rt) {
916                         RTFREE(net->ro.ro_rt);
917                         net->ro.ro_rt = NULL;
918                 }
919                 /* Was it our primary? */
920                 if ((stcb->asoc.primary_destination == net) && (alt != net)) {
921                         /*
922                          * Yes, note it as such and find an alternate note:
923                          * this means HB code must use this to resent the
924                          * primary if it goes active AND if someone does a
925                          * change-primary then this flag must be cleared
926                          * from any net structures.
927                          */
928                         if (sctp_set_primary_addr(stcb,
929                             (struct sockaddr *)NULL,
930                             alt) == 0) {
931                                 net->dest_state |= SCTP_ADDR_WAS_PRIMARY;
932                         }
933                 }
934         }
935         /*
936          * Special case for cookie-echo'ed case, we don't do output but must
937          * await the COOKIE-ACK before retransmission
938          */
939         if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED) {
940                 /*
941                  * Here we just reset the timer and start again since we
942                  * have not established the asoc
943                  */
944                 sctp_timer_start(SCTP_TIMER_TYPE_SEND, inp, stcb, net);
945                 return (0);
946         }
947         if (stcb->asoc.peer_supports_prsctp) {
948                 struct sctp_tmit_chunk *lchk;
949
950                 lchk = sctp_try_advance_peer_ack_point(stcb, &stcb->asoc);
951                 /* C3. See if we need to send a Fwd-TSN */
952                 if (compare_with_wrap(stcb->asoc.advanced_peer_ack_point,
953                     stcb->asoc.last_acked_seq, MAX_TSN)) {
954                         /*
955                          * ISSUE with ECN, see FWD-TSN processing for notes
956                          * on issues that will occur when the ECN NONCE
957                          * stuff is put into SCTP for cross checking.
958                          */
959                         send_forward_tsn(stcb, &stcb->asoc);
960                         if (lchk) {
961                                 /* Assure a timer is up */
962                                 sctp_timer_start(SCTP_TIMER_TYPE_SEND, stcb->sctp_ep, stcb, lchk->whoTo);
963                         }
964                 }
965         }
966         if (sctp_logging_level & SCTP_CWND_MONITOR_ENABLE) {
967                 sctp_log_cwnd(stcb, net, net->cwnd, SCTP_CWND_LOG_FROM_RTX);
968         }
969         return (0);
970 }
971
972 int
973 sctp_t1init_timer(struct sctp_inpcb *inp,
974     struct sctp_tcb *stcb,
975     struct sctp_nets *net)
976 {
977         /* bump the thresholds */
978         if (stcb->asoc.delayed_connection) {
979                 /*
980                  * special hook for delayed connection. The library did NOT
981                  * complete the rest of its sends.
982                  */
983                 stcb->asoc.delayed_connection = 0;
984                 sctp_send_initiate(inp, stcb);
985                 return (0);
986         }
987         if (SCTP_GET_STATE((&stcb->asoc)) != SCTP_STATE_COOKIE_WAIT) {
988                 return (0);
989         }
990         if (sctp_threshold_management(inp, stcb, net,
991             stcb->asoc.max_init_times)) {
992                 /* Association was destroyed */
993                 return (1);
994         }
995         stcb->asoc.dropped_special_cnt = 0;
996         sctp_backoff_on_timeout(stcb, stcb->asoc.primary_destination, 1, 0);
997         if (stcb->asoc.initial_init_rto_max < net->RTO) {
998                 net->RTO = stcb->asoc.initial_init_rto_max;
999         }
1000         if (stcb->asoc.numnets > 1) {
1001                 /* If we have more than one addr use it */
1002                 struct sctp_nets *alt;
1003
1004                 alt = sctp_find_alternate_net(stcb, stcb->asoc.primary_destination, 0);
1005                 if ((alt != NULL) && (alt != stcb->asoc.primary_destination)) {
1006                         sctp_move_all_chunks_to_alt(stcb, stcb->asoc.primary_destination, alt);
1007                         stcb->asoc.primary_destination = alt;
1008                 }
1009         }
1010         /* Send out a new init */
1011         sctp_send_initiate(inp, stcb);
1012         return (0);
1013 }
1014
1015 /*
1016  * For cookie and asconf we actually need to find and mark for resend, then
1017  * increment the resend counter (after all the threshold management stuff of
1018  * course).
1019  */
1020 int
1021 sctp_cookie_timer(struct sctp_inpcb *inp,
1022     struct sctp_tcb *stcb,
1023     struct sctp_nets *net)
1024 {
1025         struct sctp_nets *alt;
1026         struct sctp_tmit_chunk *cookie;
1027
1028         /* first before all else we must find the cookie */
1029         TAILQ_FOREACH(cookie, &stcb->asoc.control_send_queue, sctp_next) {
1030                 if (cookie->rec.chunk_id.id == SCTP_COOKIE_ECHO) {
1031                         break;
1032                 }
1033         }
1034         if (cookie == NULL) {
1035                 if (SCTP_GET_STATE(&stcb->asoc) == SCTP_STATE_COOKIE_ECHOED) {
1036                         /* FOOBAR! */
1037                         struct mbuf *oper;
1038
1039                         oper = sctp_get_mbuf_for_msg((sizeof(struct sctp_paramhdr) + sizeof(uint32_t)),
1040                             0, M_DONTWAIT, 1, MT_DATA);
1041                         if (oper) {
1042                                 struct sctp_paramhdr *ph;
1043                                 uint32_t *ippp;
1044
1045                                 SCTP_BUF_LEN(oper) = sizeof(struct sctp_paramhdr) +
1046                                     sizeof(uint32_t);
1047                                 ph = mtod(oper, struct sctp_paramhdr *);
1048                                 ph->param_type = htons(SCTP_CAUSE_PROTOCOL_VIOLATION);
1049                                 ph->param_length = htons(SCTP_BUF_LEN(oper));
1050                                 ippp = (uint32_t *) (ph + 1);
1051                                 *ippp = htonl(SCTP_FROM_SCTP_TIMER + SCTP_LOC_2);
1052                         }
1053                         inp->last_abort_code = SCTP_FROM_SCTP_TIMER + SCTP_LOC_3;
1054                         sctp_abort_an_association(inp, stcb, SCTP_INTERNAL_ERROR,
1055                             oper);
1056                 } else {
1057 #ifdef INVARIANTS
1058                         panic("Cookie timer expires in wrong state?");
1059 #else
1060                         SCTP_PRINTF("Strange in state %d not cookie-echoed yet c-e timer expires?\n", SCTP_GET_STATE(&stcb->asoc));
1061                         return (0);
1062 #endif
1063                 }
1064                 return (0);
1065         }
1066         /* Ok we found the cookie, threshold management next */
1067         if (sctp_threshold_management(inp, stcb, cookie->whoTo,
1068             stcb->asoc.max_init_times)) {
1069                 /* Assoc is over */
1070                 return (1);
1071         }
1072         /*
1073          * cleared theshold management now lets backoff the address & select
1074          * an alternate
1075          */
1076         stcb->asoc.dropped_special_cnt = 0;
1077         sctp_backoff_on_timeout(stcb, cookie->whoTo, 1, 0);
1078         alt = sctp_find_alternate_net(stcb, cookie->whoTo, 0);
1079         if (alt != cookie->whoTo) {
1080                 sctp_free_remote_addr(cookie->whoTo);
1081                 cookie->whoTo = alt;
1082                 atomic_add_int(&alt->ref_count, 1);
1083         }
1084         /* Now mark the retran info */
1085         if (cookie->sent != SCTP_DATAGRAM_RESEND) {
1086                 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
1087         }
1088         cookie->sent = SCTP_DATAGRAM_RESEND;
1089         /*
1090          * Now call the output routine to kick out the cookie again, Note we
1091          * don't mark any chunks for retran so that FR will need to kick in
1092          * to move these (or a send timer).
1093          */
1094         return (0);
1095 }
1096
1097 int
1098 sctp_strreset_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1099     struct sctp_nets *net)
1100 {
1101         struct sctp_nets *alt;
1102         struct sctp_tmit_chunk *strrst = NULL, *chk = NULL;
1103
1104         if (stcb->asoc.stream_reset_outstanding == 0) {
1105                 return (0);
1106         }
1107         /* find the existing STRRESET, we use the seq number we sent out on */
1108         (void)sctp_find_stream_reset(stcb, stcb->asoc.str_reset_seq_out, &strrst);
1109         if (strrst == NULL) {
1110                 return (0);
1111         }
1112         /* do threshold management */
1113         if (sctp_threshold_management(inp, stcb, strrst->whoTo,
1114             stcb->asoc.max_send_times)) {
1115                 /* Assoc is over */
1116                 return (1);
1117         }
1118         /*
1119          * cleared theshold management now lets backoff the address & select
1120          * an alternate
1121          */
1122         sctp_backoff_on_timeout(stcb, strrst->whoTo, 1, 0);
1123         alt = sctp_find_alternate_net(stcb, strrst->whoTo, 0);
1124         sctp_free_remote_addr(strrst->whoTo);
1125         strrst->whoTo = alt;
1126         atomic_add_int(&alt->ref_count, 1);
1127
1128         /* See if a ECN Echo is also stranded */
1129         TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
1130                 if ((chk->whoTo == net) &&
1131                     (chk->rec.chunk_id.id == SCTP_ECN_ECHO)) {
1132                         sctp_free_remote_addr(chk->whoTo);
1133                         if (chk->sent != SCTP_DATAGRAM_RESEND) {
1134                                 chk->sent = SCTP_DATAGRAM_RESEND;
1135                                 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
1136                         }
1137                         chk->whoTo = alt;
1138                         atomic_add_int(&alt->ref_count, 1);
1139                 }
1140         }
1141         if (net->dest_state & SCTP_ADDR_NOT_REACHABLE) {
1142                 /*
1143                  * If the address went un-reachable, we need to move to
1144                  * alternates for ALL chk's in queue
1145                  */
1146                 sctp_move_all_chunks_to_alt(stcb, net, alt);
1147         }
1148         /* mark the retran info */
1149         if (strrst->sent != SCTP_DATAGRAM_RESEND)
1150                 sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
1151         strrst->sent = SCTP_DATAGRAM_RESEND;
1152
1153         /* restart the timer */
1154         sctp_timer_start(SCTP_TIMER_TYPE_STRRESET, inp, stcb, strrst->whoTo);
1155         return (0);
1156 }
1157
1158 int
1159 sctp_asconf_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1160     struct sctp_nets *net)
1161 {
1162         struct sctp_nets *alt;
1163         struct sctp_tmit_chunk *asconf, *chk;
1164
1165         /* is this the first send, or a retransmission? */
1166         if (stcb->asoc.asconf_sent == 0) {
1167                 /* compose a new ASCONF chunk and send it */
1168                 sctp_send_asconf(stcb, net);
1169         } else {
1170                 /* Retransmission of the existing ASCONF needed... */
1171
1172                 /* find the existing ASCONF */
1173                 TAILQ_FOREACH(asconf, &stcb->asoc.control_send_queue,
1174                     sctp_next) {
1175                         if (asconf->rec.chunk_id.id == SCTP_ASCONF) {
1176                                 break;
1177                         }
1178                 }
1179                 if (asconf == NULL) {
1180                         return (0);
1181                 }
1182                 /* do threshold management */
1183                 if (sctp_threshold_management(inp, stcb, asconf->whoTo,
1184                     stcb->asoc.max_send_times)) {
1185                         /* Assoc is over */
1186                         return (1);
1187                 }
1188                 /*
1189                  * PETER? FIX? How will the following code ever run? If the
1190                  * max_send_times is hit, threshold managment will blow away
1191                  * the association?
1192                  */
1193                 if (asconf->snd_count > stcb->asoc.max_send_times) {
1194                         /*
1195                          * Something is rotten, peer is not responding to
1196                          * ASCONFs but maybe is to data etc.  e.g. it is not
1197                          * properly handling the chunk type upper bits Mark
1198                          * this peer as ASCONF incapable and cleanup
1199                          */
1200                         SCTPDBG(SCTP_DEBUG_TIMER1, "asconf_timer: Peer has not responded to our repeated ASCONFs\n");
1201                         sctp_asconf_cleanup(stcb, net);
1202                         return (0);
1203                 }
1204                 /*
1205                  * cleared theshold management now lets backoff the address
1206                  * & select an alternate
1207                  */
1208                 sctp_backoff_on_timeout(stcb, asconf->whoTo, 1, 0);
1209                 alt = sctp_find_alternate_net(stcb, asconf->whoTo, 0);
1210                 sctp_free_remote_addr(asconf->whoTo);
1211                 asconf->whoTo = alt;
1212                 atomic_add_int(&alt->ref_count, 1);
1213
1214                 /* See if a ECN Echo is also stranded */
1215                 TAILQ_FOREACH(chk, &stcb->asoc.control_send_queue, sctp_next) {
1216                         if ((chk->whoTo == net) &&
1217                             (chk->rec.chunk_id.id == SCTP_ECN_ECHO)) {
1218                                 sctp_free_remote_addr(chk->whoTo);
1219                                 chk->whoTo = alt;
1220                                 if (chk->sent != SCTP_DATAGRAM_RESEND) {
1221                                         chk->sent = SCTP_DATAGRAM_RESEND;
1222                                         sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
1223                                 }
1224                                 atomic_add_int(&alt->ref_count, 1);
1225                         }
1226                 }
1227                 if (net->dest_state & SCTP_ADDR_NOT_REACHABLE) {
1228                         /*
1229                          * If the address went un-reachable, we need to move
1230                          * to alternates for ALL chk's in queue
1231                          */
1232                         sctp_move_all_chunks_to_alt(stcb, net, alt);
1233                 }
1234                 /* mark the retran info */
1235                 if (asconf->sent != SCTP_DATAGRAM_RESEND)
1236                         sctp_ucount_incr(stcb->asoc.sent_queue_retran_cnt);
1237                 asconf->sent = SCTP_DATAGRAM_RESEND;
1238         }
1239         return (0);
1240 }
1241
1242 /*
1243  * For the shutdown and shutdown-ack, we do not keep one around on the
1244  * control queue. This means we must generate a new one and call the general
1245  * chunk output routine, AFTER having done threshold management.
1246  */
1247 int
1248 sctp_shutdown_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1249     struct sctp_nets *net)
1250 {
1251         struct sctp_nets *alt;
1252
1253         /* first threshold managment */
1254         if (sctp_threshold_management(inp, stcb, net, stcb->asoc.max_send_times)) {
1255                 /* Assoc is over */
1256                 return (1);
1257         }
1258         /* second select an alternative */
1259         alt = sctp_find_alternate_net(stcb, net, 0);
1260
1261         /* third generate a shutdown into the queue for out net */
1262         if (alt) {
1263                 sctp_send_shutdown(stcb, alt);
1264         } else {
1265                 /*
1266                  * if alt is NULL, there is no dest to send to??
1267                  */
1268                 return (0);
1269         }
1270         /* fourth restart timer */
1271         sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN, inp, stcb, alt);
1272         return (0);
1273 }
1274
1275 int
1276 sctp_shutdownack_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1277     struct sctp_nets *net)
1278 {
1279         struct sctp_nets *alt;
1280
1281         /* first threshold managment */
1282         if (sctp_threshold_management(inp, stcb, net, stcb->asoc.max_send_times)) {
1283                 /* Assoc is over */
1284                 return (1);
1285         }
1286         /* second select an alternative */
1287         alt = sctp_find_alternate_net(stcb, net, 0);
1288
1289         /* third generate a shutdown into the queue for out net */
1290         sctp_send_shutdown_ack(stcb, alt);
1291
1292         /* fourth restart timer */
1293         sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNACK, inp, stcb, alt);
1294         return (0);
1295 }
1296
1297 static void
1298 sctp_audit_stream_queues_for_size(struct sctp_inpcb *inp,
1299     struct sctp_tcb *stcb)
1300 {
1301         struct sctp_stream_out *outs;
1302         struct sctp_stream_queue_pending *sp;
1303         unsigned int chks_in_queue = 0;
1304         int being_filled = 0;
1305
1306         /*
1307          * This function is ONLY called when the send/sent queues are empty.
1308          */
1309         if ((stcb == NULL) || (inp == NULL))
1310                 return;
1311
1312         if (stcb->asoc.sent_queue_retran_cnt) {
1313                 SCTP_PRINTF("Hmm, sent_queue_retran_cnt is non-zero %d\n",
1314                     stcb->asoc.sent_queue_retran_cnt);
1315                 stcb->asoc.sent_queue_retran_cnt = 0;
1316         }
1317         SCTP_TCB_SEND_LOCK(stcb);
1318         if (TAILQ_EMPTY(&stcb->asoc.out_wheel)) {
1319                 int i, cnt = 0;
1320
1321                 /* Check to see if a spoke fell off the wheel */
1322                 for (i = 0; i < stcb->asoc.streamoutcnt; i++) {
1323                         if (!TAILQ_EMPTY(&stcb->asoc.strmout[i].outqueue)) {
1324                                 sctp_insert_on_wheel(stcb, &stcb->asoc, &stcb->asoc.strmout[i], 1);
1325                                 cnt++;
1326                         }
1327                 }
1328                 if (cnt) {
1329                         /* yep, we lost a spoke or two */
1330                         SCTP_PRINTF("Found an additional %d streams NOT on outwheel, corrected\n", cnt);
1331                 } else {
1332                         /* no spokes lost, */
1333                         stcb->asoc.total_output_queue_size = 0;
1334                 }
1335                 SCTP_TCB_SEND_UNLOCK(stcb);
1336                 return;
1337         }
1338         SCTP_TCB_SEND_UNLOCK(stcb);
1339         /* Check to see if some data queued, if so report it */
1340         TAILQ_FOREACH(outs, &stcb->asoc.out_wheel, next_spoke) {
1341                 if (!TAILQ_EMPTY(&outs->outqueue)) {
1342                         TAILQ_FOREACH(sp, &outs->outqueue, next) {
1343                                 if (sp->msg_is_complete)
1344                                         being_filled++;
1345                                 chks_in_queue++;
1346                         }
1347                 }
1348         }
1349         if (chks_in_queue != stcb->asoc.stream_queue_cnt) {
1350                 SCTP_PRINTF("Hmm, stream queue cnt at %d I counted %d in stream out wheel\n",
1351                     stcb->asoc.stream_queue_cnt, chks_in_queue);
1352         }
1353         if (chks_in_queue) {
1354                 /* call the output queue function */
1355                 sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_T3);
1356                 if ((TAILQ_EMPTY(&stcb->asoc.send_queue)) &&
1357                     (TAILQ_EMPTY(&stcb->asoc.sent_queue))) {
1358                         /*
1359                          * Probably should go in and make it go back through
1360                          * and add fragments allowed
1361                          */
1362                         if (being_filled == 0) {
1363                                 SCTP_PRINTF("Still nothing moved %d chunks are stuck\n",
1364                                     chks_in_queue);
1365                         }
1366                 }
1367         } else {
1368                 SCTP_PRINTF("Found no chunks on any queue tot:%lu\n",
1369                     (u_long)stcb->asoc.total_output_queue_size);
1370                 stcb->asoc.total_output_queue_size = 0;
1371         }
1372 }
1373
1374 int
1375 sctp_heartbeat_timer(struct sctp_inpcb *inp, struct sctp_tcb *stcb,
1376     struct sctp_nets *net, int cnt_of_unconf)
1377 {
1378         if (net) {
1379                 if (net->hb_responded == 0) {
1380                         if (net->ro._s_addr) {
1381                                 /*
1382                                  * Invalidate the src address if we did not
1383                                  * get a response last time.
1384                                  */
1385                                 sctp_free_ifa(net->ro._s_addr);
1386                                 net->ro._s_addr = NULL;
1387                                 net->src_addr_selected = 0;
1388                         }
1389                         sctp_backoff_on_timeout(stcb, net, 1, 0);
1390                 }
1391                 /* Zero PBA, if it needs it */
1392                 if (net->partial_bytes_acked) {
1393                         net->partial_bytes_acked = 0;
1394                 }
1395         }
1396         if ((stcb->asoc.total_output_queue_size > 0) &&
1397             (TAILQ_EMPTY(&stcb->asoc.send_queue)) &&
1398             (TAILQ_EMPTY(&stcb->asoc.sent_queue))) {
1399                 sctp_audit_stream_queues_for_size(inp, stcb);
1400         }
1401         /* Send a new HB, this will do threshold managment, pick a new dest */
1402         if (cnt_of_unconf == 0) {
1403                 if (sctp_send_hb(stcb, 0, NULL) < 0) {
1404                         return (1);
1405                 }
1406         } else {
1407                 /*
1408                  * this will send out extra hb's up to maxburst if there are
1409                  * any unconfirmed addresses.
1410                  */
1411                 uint32_t cnt_sent = 0;
1412
1413                 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
1414                         if ((net->dest_state & SCTP_ADDR_UNCONFIRMED) &&
1415                             (net->dest_state & SCTP_ADDR_REACHABLE)) {
1416                                 cnt_sent++;
1417                                 if (net->hb_responded == 0) {
1418                                         /* Did we respond last time? */
1419                                         if (net->ro._s_addr) {
1420                                                 sctp_free_ifa(net->ro._s_addr);
1421                                                 net->ro._s_addr = NULL;
1422                                                 net->src_addr_selected = 0;
1423                                         }
1424                                 }
1425                                 if (sctp_send_hb(stcb, 1, net) == 0) {
1426                                         break;
1427                                 }
1428                                 if (cnt_sent >= sctp_hb_maxburst)
1429                                         break;
1430                         }
1431                 }
1432         }
1433         return (0);
1434 }
1435
1436 int
1437 sctp_is_hb_timer_running(struct sctp_tcb *stcb)
1438 {
1439         if (SCTP_OS_TIMER_PENDING(&stcb->asoc.hb_timer.timer)) {
1440                 /* its running */
1441                 return (1);
1442         } else {
1443                 /* nope */
1444                 return (0);
1445         }
1446 }
1447
1448 int
1449 sctp_is_sack_timer_running(struct sctp_tcb *stcb)
1450 {
1451         if (SCTP_OS_TIMER_PENDING(&stcb->asoc.dack_timer.timer)) {
1452                 /* its running */
1453                 return (1);
1454         } else {
1455                 /* nope */
1456                 return (0);
1457         }
1458 }
1459
1460 #define SCTP_NUMBER_OF_MTU_SIZES 18
1461 static uint32_t mtu_sizes[] = {
1462         68,
1463         296,
1464         508,
1465         512,
1466         544,
1467         576,
1468         1006,
1469         1492,
1470         1500,
1471         1536,
1472         2002,
1473         2048,
1474         4352,
1475         4464,
1476         8166,
1477         17914,
1478         32000,
1479         65535
1480 };
1481
1482
1483 static uint32_t
1484 sctp_getnext_mtu(struct sctp_inpcb *inp, uint32_t cur_mtu)
1485 {
1486         /* select another MTU that is just bigger than this one */
1487         int i;
1488
1489         for (i = 0; i < SCTP_NUMBER_OF_MTU_SIZES; i++) {
1490                 if (cur_mtu < mtu_sizes[i]) {
1491                         /* no max_mtu is bigger than this one */
1492                         return (mtu_sizes[i]);
1493                 }
1494         }
1495         /* here return the highest allowable */
1496         return (cur_mtu);
1497 }
1498
1499
1500 void
1501 sctp_pathmtu_timer(struct sctp_inpcb *inp,
1502     struct sctp_tcb *stcb,
1503     struct sctp_nets *net)
1504 {
1505         uint32_t next_mtu;
1506
1507         /* restart the timer in any case */
1508         next_mtu = sctp_getnext_mtu(inp, net->mtu);
1509         if (next_mtu <= net->mtu) {
1510                 /* nothing to do */
1511                 return;
1512         } {
1513                 uint32_t mtu;
1514
1515                 if ((net->src_addr_selected == 0) ||
1516                     (net->ro._s_addr == NULL) ||
1517                     (net->ro._s_addr->localifa_flags & SCTP_BEING_DELETED)) {
1518                         if ((net->ro._s_addr != NULL) && (net->ro._s_addr->localifa_flags & SCTP_BEING_DELETED)) {
1519                                 sctp_free_ifa(net->ro._s_addr);
1520                                 net->ro._s_addr = NULL;
1521                                 net->src_addr_selected = 0;
1522                         } else if (net->ro._s_addr == NULL) {
1523                                 net->ro._s_addr = sctp_source_address_selection(inp,
1524                                     stcb,
1525                                     (sctp_route_t *) & net->ro,
1526                                     net, 0, stcb->asoc.vrf_id);
1527                         }
1528                         if (net->ro._s_addr)
1529                                 net->src_addr_selected = 1;
1530                 }
1531                 if (net->ro._s_addr) {
1532                         mtu = SCTP_GATHER_MTU_FROM_ROUTE(net->ro._s_addr, &net->ro._s_addr.sa, net->ro.ro_rt);
1533                         if (mtu > next_mtu) {
1534                                 net->mtu = next_mtu;
1535                         }
1536                 }
1537         }
1538         /* restart the timer */
1539         sctp_timer_start(SCTP_TIMER_TYPE_PATHMTURAISE, inp, stcb, net);
1540 }
1541
1542 void
1543 sctp_autoclose_timer(struct sctp_inpcb *inp,
1544     struct sctp_tcb *stcb,
1545     struct sctp_nets *net)
1546 {
1547         struct timeval tn, *tim_touse;
1548         struct sctp_association *asoc;
1549         int ticks_gone_by;
1550
1551         (void)SCTP_GETTIME_TIMEVAL(&tn);
1552         if (stcb->asoc.sctp_autoclose_ticks &&
1553             sctp_is_feature_on(inp, SCTP_PCB_FLAGS_AUTOCLOSE)) {
1554                 /* Auto close is on */
1555                 asoc = &stcb->asoc;
1556                 /* pick the time to use */
1557                 if (asoc->time_last_rcvd.tv_sec >
1558                     asoc->time_last_sent.tv_sec) {
1559                         tim_touse = &asoc->time_last_rcvd;
1560                 } else {
1561                         tim_touse = &asoc->time_last_sent;
1562                 }
1563                 /* Now has long enough transpired to autoclose? */
1564                 ticks_gone_by = SEC_TO_TICKS(tn.tv_sec - tim_touse->tv_sec);
1565                 if ((ticks_gone_by > 0) &&
1566                     (ticks_gone_by >= (int)asoc->sctp_autoclose_ticks)) {
1567                         /*
1568                          * autoclose time has hit, call the output routine,
1569                          * which should do nothing just to be SURE we don't
1570                          * have hanging data. We can then safely check the
1571                          * queues and know that we are clear to send
1572                          * shutdown
1573                          */
1574                         sctp_chunk_output(inp, stcb, SCTP_OUTPUT_FROM_AUTOCLOSE_TMR);
1575                         /* Are we clean? */
1576                         if (TAILQ_EMPTY(&asoc->send_queue) &&
1577                             TAILQ_EMPTY(&asoc->sent_queue)) {
1578                                 /*
1579                                  * there is nothing queued to send, so I'm
1580                                  * done...
1581                                  */
1582                                 if (SCTP_GET_STATE(asoc) != SCTP_STATE_SHUTDOWN_SENT) {
1583                                         /* only send SHUTDOWN 1st time thru */
1584                                         sctp_send_shutdown(stcb, stcb->asoc.primary_destination);
1585                                         if ((SCTP_GET_STATE(asoc) == SCTP_STATE_OPEN) ||
1586                                             (SCTP_GET_STATE(asoc) == SCTP_STATE_SHUTDOWN_RECEIVED)) {
1587                                                 SCTP_STAT_DECR_GAUGE32(sctps_currestab);
1588                                         }
1589                                         asoc->state = SCTP_STATE_SHUTDOWN_SENT;
1590                                         sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWN,
1591                                             stcb->sctp_ep, stcb,
1592                                             asoc->primary_destination);
1593                                         sctp_timer_start(SCTP_TIMER_TYPE_SHUTDOWNGUARD,
1594                                             stcb->sctp_ep, stcb,
1595                                             asoc->primary_destination);
1596                                 }
1597                         }
1598                 } else {
1599                         /*
1600                          * No auto close at this time, reset t-o to check
1601                          * later
1602                          */
1603                         int tmp;
1604
1605                         /* fool the timer startup to use the time left */
1606                         tmp = asoc->sctp_autoclose_ticks;
1607                         asoc->sctp_autoclose_ticks -= ticks_gone_by;
1608                         sctp_timer_start(SCTP_TIMER_TYPE_AUTOCLOSE, inp, stcb,
1609                             net);
1610                         /* restore the real tick value */
1611                         asoc->sctp_autoclose_ticks = tmp;
1612                 }
1613         }
1614 }
1615
1616 void
1617 sctp_iterator_timer(struct sctp_iterator *it)
1618 {
1619         int iteration_count = 0;
1620         int inp_skip = 0;
1621
1622         /*
1623          * only one iterator can run at a time. This is the only way we can
1624          * cleanly pull ep's from underneath all the running interators when
1625          * a ep is freed.
1626          */
1627         SCTP_ITERATOR_LOCK();
1628         if (it->inp == NULL) {
1629                 /* iterator is complete */
1630 done_with_iterator:
1631                 SCTP_ITERATOR_UNLOCK();
1632                 SCTP_INP_INFO_WLOCK();
1633                 TAILQ_REMOVE(&sctppcbinfo.iteratorhead, it, sctp_nxt_itr);
1634                 /* stopping the callout is not needed, in theory */
1635                 SCTP_INP_INFO_WUNLOCK();
1636                 (void)SCTP_OS_TIMER_STOP(&it->tmr.timer);
1637                 if (it->function_atend != NULL) {
1638                         (*it->function_atend) (it->pointer, it->val);
1639                 }
1640                 SCTP_FREE(it, SCTP_M_ITER);
1641                 return;
1642         }
1643 select_a_new_ep:
1644         SCTP_INP_WLOCK(it->inp);
1645         while (((it->pcb_flags) &&
1646             ((it->inp->sctp_flags & it->pcb_flags) != it->pcb_flags)) ||
1647             ((it->pcb_features) &&
1648             ((it->inp->sctp_features & it->pcb_features) != it->pcb_features))) {
1649                 /* endpoint flags or features don't match, so keep looking */
1650                 if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1651                         SCTP_INP_WUNLOCK(it->inp);
1652                         goto done_with_iterator;
1653                 }
1654                 SCTP_INP_WUNLOCK(it->inp);
1655                 it->inp = LIST_NEXT(it->inp, sctp_list);
1656                 if (it->inp == NULL) {
1657                         goto done_with_iterator;
1658                 }
1659                 SCTP_INP_WLOCK(it->inp);
1660         }
1661         if ((it->inp->inp_starting_point_for_iterator != NULL) &&
1662             (it->inp->inp_starting_point_for_iterator != it)) {
1663                 SCTP_PRINTF("Iterator collision, waiting for one at %p\n",
1664                     it->inp);
1665                 SCTP_INP_WUNLOCK(it->inp);
1666                 goto start_timer_return;
1667         }
1668         /* mark the current iterator on the endpoint */
1669         it->inp->inp_starting_point_for_iterator = it;
1670         SCTP_INP_WUNLOCK(it->inp);
1671         SCTP_INP_RLOCK(it->inp);
1672         /* now go through each assoc which is in the desired state */
1673         if (it->done_current_ep == 0) {
1674                 if (it->function_inp != NULL)
1675                         inp_skip = (*it->function_inp) (it->inp, it->pointer, it->val);
1676                 it->done_current_ep = 1;
1677         }
1678         if (it->stcb == NULL) {
1679                 /* run the per instance function */
1680                 it->stcb = LIST_FIRST(&it->inp->sctp_asoc_list);
1681         }
1682         SCTP_INP_RUNLOCK(it->inp);
1683         if ((inp_skip) || it->stcb == NULL) {
1684                 if (it->function_inp_end != NULL) {
1685                         inp_skip = (*it->function_inp_end) (it->inp,
1686                             it->pointer,
1687                             it->val);
1688                 }
1689                 goto no_stcb;
1690         }
1691         if ((it->stcb) &&
1692             (it->stcb->asoc.stcb_starting_point_for_iterator == it)) {
1693                 it->stcb->asoc.stcb_starting_point_for_iterator = NULL;
1694         }
1695         while (it->stcb) {
1696                 SCTP_TCB_LOCK(it->stcb);
1697                 if (it->asoc_state && ((it->stcb->asoc.state & it->asoc_state) != it->asoc_state)) {
1698                         /* not in the right state... keep looking */
1699                         SCTP_TCB_UNLOCK(it->stcb);
1700                         goto next_assoc;
1701                 }
1702                 /* mark the current iterator on the assoc */
1703                 it->stcb->asoc.stcb_starting_point_for_iterator = it;
1704                 /* see if we have limited out the iterator loop */
1705                 iteration_count++;
1706                 if (iteration_count > SCTP_ITERATOR_MAX_AT_ONCE) {
1707         start_timer_return:
1708                         /* set a timer to continue this later */
1709                         SCTP_TCB_UNLOCK(it->stcb);
1710                         sctp_timer_start(SCTP_TIMER_TYPE_ITERATOR,
1711                             (struct sctp_inpcb *)it, NULL, NULL);
1712                         SCTP_ITERATOR_UNLOCK();
1713                         return;
1714                 }
1715                 /* run function on this one */
1716                 (*it->function_assoc) (it->inp, it->stcb, it->pointer, it->val);
1717
1718                 /*
1719                  * we lie here, it really needs to have its own type but
1720                  * first I must verify that this won't effect things :-0
1721                  */
1722                 if (it->no_chunk_output == 0)
1723                         sctp_chunk_output(it->inp, it->stcb, SCTP_OUTPUT_FROM_T3);
1724
1725                 SCTP_TCB_UNLOCK(it->stcb);
1726 next_assoc:
1727                 it->stcb = LIST_NEXT(it->stcb, sctp_tcblist);
1728                 if (it->stcb == NULL) {
1729                         if (it->function_inp_end != NULL) {
1730                                 inp_skip = (*it->function_inp_end) (it->inp,
1731                                     it->pointer,
1732                                     it->val);
1733                         }
1734                 }
1735         }
1736 no_stcb:
1737         /* done with all assocs on this endpoint, move on to next endpoint */
1738         it->done_current_ep = 0;
1739         SCTP_INP_WLOCK(it->inp);
1740         it->inp->inp_starting_point_for_iterator = NULL;
1741         SCTP_INP_WUNLOCK(it->inp);
1742         if (it->iterator_flags & SCTP_ITERATOR_DO_SINGLE_INP) {
1743                 it->inp = NULL;
1744         } else {
1745                 SCTP_INP_INFO_RLOCK();
1746                 it->inp = LIST_NEXT(it->inp, sctp_list);
1747                 SCTP_INP_INFO_RUNLOCK();
1748         }
1749         if (it->inp == NULL) {
1750                 goto done_with_iterator;
1751         }
1752         goto select_a_new_ep;
1753 }