2 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
3 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
4 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions are met:
9 * a) Redistributions of source code must retain the above copyright notice,
10 * this list of conditions and the following disclaimer.
12 * b) Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in
14 * the documentation and/or other materials provided with the distribution.
16 * c) Neither the name of Cisco Systems, Inc. nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
22 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
23 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
24 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
25 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
26 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
27 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
28 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
29 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
30 * THE POSSIBILITY OF SUCH DAMAGE.
33 #include <sys/cdefs.h>
34 __FBSDID("$FreeBSD$");
36 #include <netinet/sctp_os.h>
37 #include <netinet/sctp_var.h>
38 #include <netinet/sctp_sysctl.h>
39 #include <netinet/sctp_pcb.h>
40 #include <netinet/sctp_header.h>
41 #include <netinet/sctputil.h>
42 #include <netinet/sctp_output.h>
43 #include <netinet/sctp_input.h>
44 #include <netinet/sctp_indata.h>
45 #include <netinet/sctp_uio.h>
46 #include <netinet/sctp_timer.h>
47 #include <netinet/sctp_auth.h>
48 #include <netinet/sctp_asconf.h>
49 #include <netinet/sctp_dtrace_declare.h>
51 #define SHIFT_MPTCP_MULTI_N 40
52 #define SHIFT_MPTCP_MULTI_Z 16
53 #define SHIFT_MPTCP_MULTI 8
56 sctp_enforce_cwnd_limit(struct sctp_association *assoc, struct sctp_nets *net)
58 if ((assoc->max_cwnd > 0) &&
59 (net->cwnd > assoc->max_cwnd) &&
60 (net->cwnd > (net->mtu - sizeof(struct sctphdr)))) {
61 net->cwnd = assoc->max_cwnd;
62 if (net->cwnd < (net->mtu - sizeof(struct sctphdr))) {
63 net->cwnd = net->mtu - sizeof(struct sctphdr);
69 sctp_set_initial_cc_param(struct sctp_tcb *stcb, struct sctp_nets *net)
71 struct sctp_association *assoc;
75 cwnd_in_mtu = SCTP_BASE_SYSCTL(sctp_initial_cwnd);
76 if (cwnd_in_mtu == 0) {
77 /* Using 0 means that the value of RFC 4960 is used. */
78 net->cwnd = min((net->mtu * 4), max((2 * net->mtu), SCTP_INITIAL_CWND));
81 * We take the minimum of the burst limit and the initial
84 if ((assoc->max_burst > 0) && (cwnd_in_mtu > assoc->max_burst))
85 cwnd_in_mtu = assoc->max_burst;
86 net->cwnd = (net->mtu - sizeof(struct sctphdr)) * cwnd_in_mtu;
88 if ((stcb->asoc.sctp_cmt_on_off == SCTP_CMT_RPV1) ||
89 (stcb->asoc.sctp_cmt_on_off == SCTP_CMT_RPV2)) {
90 /* In case of resource pooling initialize appropriately */
91 net->cwnd /= assoc->numnets;
92 if (net->cwnd < (net->mtu - sizeof(struct sctphdr))) {
93 net->cwnd = net->mtu - sizeof(struct sctphdr);
96 sctp_enforce_cwnd_limit(assoc, net);
97 net->ssthresh = assoc->peers_rwnd;
98 SDT_PROBE5(sctp, cwnd, net, init,
99 stcb->asoc.my_vtag, ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), net,
101 if (SCTP_BASE_SYSCTL(sctp_logging_level) &
102 (SCTP_CWND_MONITOR_ENABLE | SCTP_CWND_LOGGING_ENABLE)) {
103 sctp_log_cwnd(stcb, net, 0, SCTP_CWND_INITIALIZATION);
108 sctp_cwnd_update_after_fr(struct sctp_tcb *stcb,
109 struct sctp_association *asoc)
111 struct sctp_nets *net;
112 uint32_t t_ssthresh, t_cwnd;
113 uint64_t t_ucwnd_sbw;
115 /* MT FIXME: Don't compute this over and over again */
119 if ((asoc->sctp_cmt_on_off == SCTP_CMT_RPV1) ||
120 (asoc->sctp_cmt_on_off == SCTP_CMT_RPV2)) {
121 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
122 t_ssthresh += net->ssthresh;
124 if (net->lastsa > 0) {
125 t_ucwnd_sbw += (uint64_t)net->cwnd / (uint64_t)net->lastsa;
128 if (t_ucwnd_sbw == 0) {
134 * CMT fast recovery code. Need to debug. ((sctp_cmt_on_off > 0) &&
135 * (net->fast_retran_loss_recovery == 0)))
137 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
138 if ((asoc->fast_retran_loss_recovery == 0) ||
139 (asoc->sctp_cmt_on_off > 0)) {
140 /* out of a RFC2582 Fast recovery window? */
141 if (net->net_ack > 0) {
143 * per section 7.2.3, are there any
144 * destinations that had a fast retransmit
145 * to them. If so what we need to do is
146 * adjust ssthresh and cwnd.
148 struct sctp_tmit_chunk *lchk;
149 int old_cwnd = net->cwnd;
151 if ((asoc->sctp_cmt_on_off == SCTP_CMT_RPV1) ||
152 (asoc->sctp_cmt_on_off == SCTP_CMT_RPV2)) {
153 if (asoc->sctp_cmt_on_off == SCTP_CMT_RPV1) {
154 net->ssthresh = (uint32_t)(((uint64_t)4 *
156 (uint64_t)net->ssthresh) /
157 (uint64_t)t_ssthresh);
160 if (asoc->sctp_cmt_on_off == SCTP_CMT_RPV2) {
165 * lastsa>>3; we don't need
172 * Short Version => Equal to
175 net->ssthresh = (uint32_t)(((uint64_t)4 *
177 (uint64_t)net->cwnd) /
180 /* INCREASE FACTOR */ ;
182 if ((net->cwnd > t_cwnd / 2) &&
183 (net->ssthresh < net->cwnd - t_cwnd / 2)) {
184 net->ssthresh = net->cwnd - t_cwnd / 2;
186 if (net->ssthresh < net->mtu) {
187 net->ssthresh = net->mtu;
190 net->ssthresh = net->cwnd / 2;
191 if (net->ssthresh < (net->mtu * 2)) {
192 net->ssthresh = 2 * net->mtu;
195 net->cwnd = net->ssthresh;
196 sctp_enforce_cwnd_limit(asoc, net);
197 SDT_PROBE5(sctp, cwnd, net, fr,
198 stcb->asoc.my_vtag, ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), net,
199 old_cwnd, net->cwnd);
200 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
201 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd),
202 SCTP_CWND_LOG_FROM_FR);
204 lchk = TAILQ_FIRST(&asoc->send_queue);
206 net->partial_bytes_acked = 0;
207 /* Turn on fast recovery window */
208 asoc->fast_retran_loss_recovery = 1;
210 /* Mark end of the window */
211 asoc->fast_recovery_tsn = asoc->sending_seq - 1;
213 asoc->fast_recovery_tsn = lchk->rec.data.tsn - 1;
217 * CMT fast recovery -- per destination
220 net->fast_retran_loss_recovery = 1;
223 /* Mark end of the window */
224 net->fast_recovery_tsn = asoc->sending_seq - 1;
226 net->fast_recovery_tsn = lchk->rec.data.tsn - 1;
229 sctp_timer_stop(SCTP_TIMER_TYPE_SEND,
230 stcb->sctp_ep, stcb, net,
231 SCTP_FROM_SCTP_CC_FUNCTIONS + SCTP_LOC_1);
232 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
233 stcb->sctp_ep, stcb, net);
235 } else if (net->net_ack > 0) {
237 * Mark a peg that we WOULD have done a cwnd
238 * reduction but RFC2582 prevented this action.
240 SCTP_STAT_INCR(sctps_fastretransinrtt);
245 /* Defines for instantaneous bw decisions */
246 #define SCTP_INST_LOOSING 1 /* Losing to other flows */
247 #define SCTP_INST_NEUTRAL 2 /* Neutral, no indication */
248 #define SCTP_INST_GAINING 3 /* Gaining, step down possible */
252 cc_bw_same(struct sctp_tcb *stcb, struct sctp_nets *net, uint64_t nbw,
253 uint64_t rtt_offset, uint64_t vtag, uint8_t inst_ind)
255 uint64_t oth, probepoint;
257 probepoint = (((uint64_t)net->cwnd) << 32);
258 if (net->rtt > net->cc_mod.rtcc.lbw_rtt + rtt_offset) {
260 * rtt increased we don't update bw.. so we don't update the
264 probepoint |= ((5 << 16) | 1);
265 SDT_PROBE5(sctp, cwnd, net, rttvar,
267 ((net->cc_mod.rtcc.lbw << 32) | nbw),
268 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
271 if ((net->cc_mod.rtcc.steady_step) && (inst_ind != SCTP_INST_LOOSING)) {
272 if (net->cc_mod.rtcc.last_step_state == 5)
273 net->cc_mod.rtcc.step_cnt++;
275 net->cc_mod.rtcc.step_cnt = 1;
276 net->cc_mod.rtcc.last_step_state = 5;
277 if ((net->cc_mod.rtcc.step_cnt == net->cc_mod.rtcc.steady_step) ||
278 ((net->cc_mod.rtcc.step_cnt > net->cc_mod.rtcc.steady_step) &&
279 ((net->cc_mod.rtcc.step_cnt % net->cc_mod.rtcc.steady_step) == 0))) {
280 /* Try a step down */
281 oth = net->cc_mod.rtcc.vol_reduce;
283 oth |= net->cc_mod.rtcc.step_cnt;
285 oth |= net->cc_mod.rtcc.last_step_state;
286 SDT_PROBE5(sctp, cwnd, net, rttstep,
288 ((net->cc_mod.rtcc.lbw << 32) | nbw),
289 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
292 if (net->cwnd > (4 * net->mtu)) {
293 net->cwnd -= net->mtu;
294 net->cc_mod.rtcc.vol_reduce++;
296 net->cc_mod.rtcc.step_cnt = 0;
302 if (net->rtt < net->cc_mod.rtcc.lbw_rtt - rtt_offset) {
304 * rtt decreased, there could be more room. we update both
305 * the bw and the rtt here to lock this in as a good step
309 probepoint |= ((6 << 16) | 0);
310 SDT_PROBE5(sctp, cwnd, net, rttvar,
312 ((net->cc_mod.rtcc.lbw << 32) | nbw),
313 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
316 if (net->cc_mod.rtcc.steady_step) {
317 oth = net->cc_mod.rtcc.vol_reduce;
319 oth |= net->cc_mod.rtcc.step_cnt;
321 oth |= net->cc_mod.rtcc.last_step_state;
322 SDT_PROBE5(sctp, cwnd, net, rttstep,
324 ((net->cc_mod.rtcc.lbw << 32) | nbw),
325 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
328 if ((net->cc_mod.rtcc.last_step_state == 5) &&
329 (net->cc_mod.rtcc.step_cnt > net->cc_mod.rtcc.steady_step)) {
330 /* Step down worked */
331 net->cc_mod.rtcc.step_cnt = 0;
334 net->cc_mod.rtcc.last_step_state = 6;
335 net->cc_mod.rtcc.step_cnt = 0;
338 net->cc_mod.rtcc.lbw = nbw;
339 net->cc_mod.rtcc.lbw_rtt = net->rtt;
340 net->cc_mod.rtcc.cwnd_at_bw_set = net->cwnd;
341 if (inst_ind == SCTP_INST_GAINING)
343 else if (inst_ind == SCTP_INST_NEUTRAL)
349 * Ok bw and rtt remained the same .. no update to any
352 probepoint |= ((7 << 16) | net->cc_mod.rtcc.ret_from_eq);
353 SDT_PROBE5(sctp, cwnd, net, rttvar,
355 ((net->cc_mod.rtcc.lbw << 32) | nbw),
356 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
359 if ((net->cc_mod.rtcc.steady_step) && (inst_ind != SCTP_INST_LOOSING)) {
360 if (net->cc_mod.rtcc.last_step_state == 5)
361 net->cc_mod.rtcc.step_cnt++;
363 net->cc_mod.rtcc.step_cnt = 1;
364 net->cc_mod.rtcc.last_step_state = 5;
365 if ((net->cc_mod.rtcc.step_cnt == net->cc_mod.rtcc.steady_step) ||
366 ((net->cc_mod.rtcc.step_cnt > net->cc_mod.rtcc.steady_step) &&
367 ((net->cc_mod.rtcc.step_cnt % net->cc_mod.rtcc.steady_step) == 0))) {
368 /* Try a step down */
369 if (net->cwnd > (4 * net->mtu)) {
370 net->cwnd -= net->mtu;
371 net->cc_mod.rtcc.vol_reduce++;
374 net->cc_mod.rtcc.step_cnt = 0;
378 if (inst_ind == SCTP_INST_GAINING)
380 else if (inst_ind == SCTP_INST_NEUTRAL)
383 return ((int)net->cc_mod.rtcc.ret_from_eq);
387 cc_bw_decrease(struct sctp_tcb *stcb, struct sctp_nets *net, uint64_t nbw, uint64_t rtt_offset,
388 uint64_t vtag, uint8_t inst_ind)
390 uint64_t oth, probepoint;
392 /* Bandwidth decreased. */
393 probepoint = (((uint64_t)net->cwnd) << 32);
394 if (net->rtt > net->cc_mod.rtcc.lbw_rtt + rtt_offset) {
396 /* Did we add more */
397 if ((net->cwnd > net->cc_mod.rtcc.cwnd_at_bw_set) &&
398 (inst_ind != SCTP_INST_LOOSING)) {
399 /* We caused it maybe.. back off? */
401 probepoint |= ((1 << 16) | 1);
402 SDT_PROBE5(sctp, cwnd, net, rttvar,
404 ((net->cc_mod.rtcc.lbw << 32) | nbw),
405 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
408 if (net->cc_mod.rtcc.ret_from_eq) {
410 * Switch over to CA if we are less
413 net->ssthresh = net->cwnd - 1;
414 net->partial_bytes_acked = 0;
419 probepoint |= ((2 << 16) | 0);
420 SDT_PROBE5(sctp, cwnd, net, rttvar,
422 ((net->cc_mod.rtcc.lbw << 32) | nbw),
423 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
426 /* Someone else - fight for more? */
427 if (net->cc_mod.rtcc.steady_step) {
428 oth = net->cc_mod.rtcc.vol_reduce;
430 oth |= net->cc_mod.rtcc.step_cnt;
432 oth |= net->cc_mod.rtcc.last_step_state;
433 SDT_PROBE5(sctp, cwnd, net, rttstep,
435 ((net->cc_mod.rtcc.lbw << 32) | nbw),
436 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
440 * Did we voluntarily give up some? if so take one
443 if ((net->cc_mod.rtcc.vol_reduce) &&
444 (inst_ind != SCTP_INST_GAINING)) {
445 net->cwnd += net->mtu;
446 sctp_enforce_cwnd_limit(&stcb->asoc, net);
447 net->cc_mod.rtcc.vol_reduce--;
449 net->cc_mod.rtcc.last_step_state = 2;
450 net->cc_mod.rtcc.step_cnt = 0;
453 } else if (net->rtt < net->cc_mod.rtcc.lbw_rtt - rtt_offset) {
454 /* bw & rtt decreased */
456 probepoint |= ((3 << 16) | 0);
457 SDT_PROBE5(sctp, cwnd, net, rttvar,
459 ((net->cc_mod.rtcc.lbw << 32) | nbw),
460 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
463 if (net->cc_mod.rtcc.steady_step) {
464 oth = net->cc_mod.rtcc.vol_reduce;
466 oth |= net->cc_mod.rtcc.step_cnt;
468 oth |= net->cc_mod.rtcc.last_step_state;
469 SDT_PROBE5(sctp, cwnd, net, rttstep,
471 ((net->cc_mod.rtcc.lbw << 32) | nbw),
472 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
475 if ((net->cc_mod.rtcc.vol_reduce) &&
476 (inst_ind != SCTP_INST_GAINING)) {
477 net->cwnd += net->mtu;
478 sctp_enforce_cwnd_limit(&stcb->asoc, net);
479 net->cc_mod.rtcc.vol_reduce--;
481 net->cc_mod.rtcc.last_step_state = 3;
482 net->cc_mod.rtcc.step_cnt = 0;
486 /* The bw decreased but rtt stayed the same */
488 probepoint |= ((4 << 16) | 0);
489 SDT_PROBE5(sctp, cwnd, net, rttvar,
491 ((net->cc_mod.rtcc.lbw << 32) | nbw),
492 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
495 if (net->cc_mod.rtcc.steady_step) {
496 oth = net->cc_mod.rtcc.vol_reduce;
498 oth |= net->cc_mod.rtcc.step_cnt;
500 oth |= net->cc_mod.rtcc.last_step_state;
501 SDT_PROBE5(sctp, cwnd, net, rttstep,
503 ((net->cc_mod.rtcc.lbw << 32) | nbw),
504 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
507 if ((net->cc_mod.rtcc.vol_reduce) &&
508 (inst_ind != SCTP_INST_GAINING)) {
509 net->cwnd += net->mtu;
510 sctp_enforce_cwnd_limit(&stcb->asoc, net);
511 net->cc_mod.rtcc.vol_reduce--;
513 net->cc_mod.rtcc.last_step_state = 4;
514 net->cc_mod.rtcc.step_cnt = 0;
517 net->cc_mod.rtcc.lbw = nbw;
518 net->cc_mod.rtcc.lbw_rtt = net->rtt;
519 net->cc_mod.rtcc.cwnd_at_bw_set = net->cwnd;
520 if (inst_ind == SCTP_INST_GAINING) {
528 cc_bw_increase(struct sctp_tcb *stcb, struct sctp_nets *net, uint64_t nbw, uint64_t vtag)
530 uint64_t oth, probepoint;
533 * BW increased, so update and return 0, since all actions in our
534 * table say to do the normal CC update. Note that we pay no
535 * attention to the inst_ind since our overall sum is increasing.
538 probepoint = (((uint64_t)net->cwnd) << 32);
539 SDT_PROBE5(sctp, cwnd, net, rttvar,
541 ((net->cc_mod.rtcc.lbw << 32) | nbw),
542 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
545 if (net->cc_mod.rtcc.steady_step) {
546 oth = net->cc_mod.rtcc.vol_reduce;
548 oth |= net->cc_mod.rtcc.step_cnt;
550 oth |= net->cc_mod.rtcc.last_step_state;
551 SDT_PROBE5(sctp, cwnd, net, rttstep,
553 ((net->cc_mod.rtcc.lbw << 32) | nbw),
554 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
557 net->cc_mod.rtcc.last_step_state = 0;
558 net->cc_mod.rtcc.step_cnt = 0;
559 net->cc_mod.rtcc.vol_reduce = 0;
561 net->cc_mod.rtcc.lbw = nbw;
562 net->cc_mod.rtcc.lbw_rtt = net->rtt;
563 net->cc_mod.rtcc.cwnd_at_bw_set = net->cwnd;
567 /* RTCC Algorithm to limit growth of cwnd, return
568 * true if you want to NOT allow cwnd growth
571 cc_bw_limit(struct sctp_tcb *stcb, struct sctp_nets *net, uint64_t nbw)
573 uint64_t bw_offset, rtt_offset;
574 uint64_t probepoint, rtt, vtag;
575 uint64_t bytes_for_this_rtt, inst_bw;
576 uint64_t div, inst_off;
582 * Here we need to see if we want
583 * to limit cwnd growth due to increase
584 * in overall rtt but no increase in bw.
585 * We use the following table to figure
586 * out what we should do. When we return
587 * 0, cc update goes on as planned. If we
588 * return 1, then no cc update happens and cwnd
589 * stays where it is at.
590 * ----------------------------------
592 * *********************************
593 * INC | INC | return 0
594 * ----------------------------------
595 * INC | SAME | return 0
596 * ----------------------------------
597 * INC | DECR | return 0
598 * ----------------------------------
599 * SAME | INC | return 1
600 * ----------------------------------
601 * SAME | SAME | return 1
602 * ----------------------------------
603 * SAME | DECR | return 0
604 * ----------------------------------
605 * DECR | INC | return 0 or 1 based on if we caused.
606 * ----------------------------------
607 * DECR | SAME | return 0
608 * ----------------------------------
609 * DECR | DECR | return 0
610 * ----------------------------------
612 * We are a bit fuzz on what an increase or
613 * decrease is. For BW it is the same if
614 * it did not change within 1/64th. For
615 * RTT it stayed the same if it did not
616 * change within 1/32nd
618 bw_shift = SCTP_BASE_SYSCTL(sctp_rttvar_bw);
619 rtt = stcb->asoc.my_vtag;
620 vtag = (rtt << 32) | (((uint32_t)(stcb->sctp_ep->sctp_lport)) << 16) | (stcb->rport);
621 probepoint = (((uint64_t)net->cwnd) << 32);
623 if (net->cc_mod.rtcc.rtt_set_this_sack) {
624 net->cc_mod.rtcc.rtt_set_this_sack = 0;
625 bytes_for_this_rtt = net->cc_mod.rtcc.bw_bytes - net->cc_mod.rtcc.bw_bytes_at_last_rttc;
626 net->cc_mod.rtcc.bw_bytes_at_last_rttc = net->cc_mod.rtcc.bw_bytes;
628 div = net->rtt / 1000;
630 inst_bw = bytes_for_this_rtt / div;
631 inst_off = inst_bw >> bw_shift;
633 inst_ind = SCTP_INST_GAINING;
634 else if ((inst_bw + inst_off) < nbw)
635 inst_ind = SCTP_INST_LOOSING;
637 inst_ind = SCTP_INST_NEUTRAL;
638 probepoint |= ((0xb << 16) | inst_ind);
640 inst_ind = net->cc_mod.rtcc.last_inst_ind;
641 inst_bw = bytes_for_this_rtt / (uint64_t)(net->rtt);
642 /* Can't determine do not change */
643 probepoint |= ((0xc << 16) | inst_ind);
646 inst_ind = net->cc_mod.rtcc.last_inst_ind;
647 inst_bw = bytes_for_this_rtt;
648 /* Can't determine do not change */
649 probepoint |= ((0xd << 16) | inst_ind);
651 SDT_PROBE5(sctp, cwnd, net, rttvar,
653 ((nbw << 32) | inst_bw),
654 ((net->cc_mod.rtcc.lbw_rtt << 32) | rtt),
658 /* No rtt measurement, use last one */
659 inst_ind = net->cc_mod.rtcc.last_inst_ind;
661 bw_offset = net->cc_mod.rtcc.lbw >> bw_shift;
662 if (nbw > net->cc_mod.rtcc.lbw + bw_offset) {
663 ret = cc_bw_increase(stcb, net, nbw, vtag);
666 rtt_offset = net->cc_mod.rtcc.lbw_rtt >> SCTP_BASE_SYSCTL(sctp_rttvar_rtt);
667 if (nbw < net->cc_mod.rtcc.lbw - bw_offset) {
668 ret = cc_bw_decrease(stcb, net, nbw, rtt_offset, vtag, inst_ind);
672 * If we reach here then we are in a situation where the bw stayed
675 ret = cc_bw_same(stcb, net, nbw, rtt_offset, vtag, inst_ind);
677 net->cc_mod.rtcc.last_inst_ind = inst_ind;
682 sctp_cwnd_update_after_sack_common(struct sctp_tcb *stcb,
683 struct sctp_association *asoc,
684 int accum_moved, int reneged_all SCTP_UNUSED, int will_exit, int use_rtcc)
686 struct sctp_nets *net;
688 uint32_t t_ssthresh, t_cwnd, incr;
689 uint64_t t_ucwnd_sbw;
690 uint64_t t_path_mptcp;
691 uint64_t mptcp_like_alpha;
695 /* MT FIXME: Don't compute this over and over again */
700 mptcp_like_alpha = 1;
701 if ((stcb->asoc.sctp_cmt_on_off == SCTP_CMT_RPV1) ||
702 (stcb->asoc.sctp_cmt_on_off == SCTP_CMT_RPV2) ||
703 (stcb->asoc.sctp_cmt_on_off == SCTP_CMT_MPTCP)) {
705 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
706 t_ssthresh += net->ssthresh;
708 /* lastsa>>3; we don't need to devide ... */
713 t_ucwnd_sbw += (uint64_t)net->cwnd / (uint64_t)srtt;
714 t_path_mptcp += (((uint64_t)net->cwnd) << SHIFT_MPTCP_MULTI_Z) /
715 (((uint64_t)net->mtu) * (uint64_t)srtt);
716 tmp = (((uint64_t)net->cwnd) << SHIFT_MPTCP_MULTI_N) /
717 ((uint64_t)net->mtu * (uint64_t)(srtt * srtt));
718 if (tmp > max_path) {
723 if (t_path_mptcp > 0) {
724 mptcp_like_alpha = max_path / (t_path_mptcp * t_path_mptcp);
726 mptcp_like_alpha = 1;
729 if (t_ssthresh == 0) {
732 if (t_ucwnd_sbw == 0) {
735 /******************************/
736 /* update cwnd and Early FR */
737 /******************************/
738 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
740 #ifdef JANA_CMT_FAST_RECOVERY
742 * CMT fast recovery code. Need to debug.
744 if (net->fast_retran_loss_recovery && net->new_pseudo_cumack) {
745 if (SCTP_TSN_GE(asoc->last_acked_seq, net->fast_recovery_tsn) ||
746 SCTP_TSN_GE(net->pseudo_cumack, net->fast_recovery_tsn)) {
747 net->will_exit_fast_recovery = 1;
751 /* if nothing was acked on this destination skip it */
752 if (net->net_ack == 0) {
753 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
754 sctp_log_cwnd(stcb, net, 0, SCTP_CWND_LOG_FROM_SACK);
758 #ifdef JANA_CMT_FAST_RECOVERY
760 * CMT fast recovery code
763 * if (sctp_cmt_on_off > 0 && net->fast_retran_loss_recovery
764 * && net->will_exit_fast_recovery == 0) { @@@ Do something
765 * } else if (sctp_cmt_on_off == 0 &&
766 * asoc->fast_retran_loss_recovery && will_exit == 0) {
770 if (asoc->fast_retran_loss_recovery &&
772 (asoc->sctp_cmt_on_off == 0)) {
774 * If we are in loss recovery we skip any cwnd
780 * Did any measurements go on for this network?
782 if (use_rtcc && (net->cc_mod.rtcc.tls_needs_set > 0)) {
786 * At this point our bw_bytes has been updated by
787 * incoming sack information.
789 * But our bw may not yet be set.
792 if ((net->cc_mod.rtcc.new_tot_time / 1000) > 0) {
793 nbw = net->cc_mod.rtcc.bw_bytes / (net->cc_mod.rtcc.new_tot_time / 1000);
795 nbw = net->cc_mod.rtcc.bw_bytes;
797 if (net->cc_mod.rtcc.lbw) {
798 if (cc_bw_limit(stcb, net, nbw)) {
799 /* Hold here, no update */
803 uint64_t vtag, probepoint;
805 probepoint = (((uint64_t)net->cwnd) << 32);
806 probepoint |= ((0xa << 16) | 0);
807 vtag = (net->rtt << 32) |
808 (((uint32_t)(stcb->sctp_ep->sctp_lport)) << 16) |
811 SDT_PROBE5(sctp, cwnd, net, rttvar,
814 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
817 net->cc_mod.rtcc.lbw = nbw;
818 net->cc_mod.rtcc.lbw_rtt = net->rtt;
819 if (net->cc_mod.rtcc.rtt_set_this_sack) {
820 net->cc_mod.rtcc.rtt_set_this_sack = 0;
821 net->cc_mod.rtcc.bw_bytes_at_last_rttc = net->cc_mod.rtcc.bw_bytes;
826 * CMT: CUC algorithm. Update cwnd if pseudo-cumack has
830 ((asoc->sctp_cmt_on_off > 0) && net->new_pseudo_cumack)) {
831 /* If the cumulative ack moved we can proceed */
832 if (net->cwnd <= net->ssthresh) {
833 /* We are in slow start */
834 if (net->flight_size + net->net_ack >= net->cwnd) {
837 old_cwnd = net->cwnd;
838 switch (asoc->sctp_cmt_on_off) {
840 limit = (uint32_t)(((uint64_t)net->mtu *
841 (uint64_t)SCTP_BASE_SYSCTL(sctp_L2_abc_variable) *
842 (uint64_t)net->ssthresh) /
843 (uint64_t)t_ssthresh);
844 incr = (uint32_t)(((uint64_t)net->net_ack *
845 (uint64_t)net->ssthresh) /
846 (uint64_t)t_ssthresh);
856 * lastsa>>3; we don't need
863 limit = (uint32_t)(((uint64_t)net->mtu *
864 (uint64_t)SCTP_BASE_SYSCTL(sctp_L2_abc_variable) *
865 (uint64_t)net->cwnd) /
866 ((uint64_t)srtt * t_ucwnd_sbw));
867 /* INCREASE FACTOR */
868 incr = (uint32_t)(((uint64_t)net->net_ack *
869 (uint64_t)net->cwnd) /
870 ((uint64_t)srtt * t_ucwnd_sbw));
871 /* INCREASE FACTOR */
880 limit = (uint32_t)(((uint64_t)net->mtu *
882 (uint64_t)SCTP_BASE_SYSCTL(sctp_L2_abc_variable)) >>
884 incr = (uint32_t)(((uint64_t)net->net_ack *
890 if (incr > net->net_ack) {
893 if (incr > net->mtu) {
899 if (incr > net->mtu * SCTP_BASE_SYSCTL(sctp_L2_abc_variable)) {
900 incr = net->mtu * SCTP_BASE_SYSCTL(sctp_L2_abc_variable);
905 sctp_enforce_cwnd_limit(asoc, net);
906 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
907 sctp_log_cwnd(stcb, net, incr,
908 SCTP_CWND_LOG_FROM_SS);
910 SDT_PROBE5(sctp, cwnd, net, ack,
912 ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)),
914 old_cwnd, net->cwnd);
916 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
917 sctp_log_cwnd(stcb, net, net->net_ack,
918 SCTP_CWND_LOG_NOADV_SS);
922 /* We are in congestion avoidance */
926 net->partial_bytes_acked += net->net_ack;
928 if ((net->flight_size + net->net_ack >= net->cwnd) &&
929 (net->partial_bytes_acked >= net->cwnd)) {
930 net->partial_bytes_acked -= net->cwnd;
931 old_cwnd = net->cwnd;
932 switch (asoc->sctp_cmt_on_off) {
934 incr = (uint32_t)(((uint64_t)net->mtu *
935 (uint64_t)net->ssthresh) /
936 (uint64_t)t_ssthresh);
943 * lastsa>>3; we don't need
950 incr = (uint32_t)((uint64_t)net->mtu *
951 (uint64_t)net->cwnd /
954 /* INCREASE FACTOR */
960 incr = (uint32_t)((mptcp_like_alpha *
961 (uint64_t)net->cwnd) >>
963 if (incr > net->mtu) {
972 sctp_enforce_cwnd_limit(asoc, net);
973 SDT_PROBE5(sctp, cwnd, net, ack,
975 ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)),
977 old_cwnd, net->cwnd);
978 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
979 sctp_log_cwnd(stcb, net, net->mtu,
980 SCTP_CWND_LOG_FROM_CA);
983 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
984 sctp_log_cwnd(stcb, net, net->net_ack,
985 SCTP_CWND_LOG_NOADV_CA);
990 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
991 sctp_log_cwnd(stcb, net, net->mtu,
992 SCTP_CWND_LOG_NO_CUMACK);
999 sctp_cwnd_update_exit_pf_common(struct sctp_tcb *stcb, struct sctp_nets *net)
1003 old_cwnd = net->cwnd;
1004 net->cwnd = net->mtu;
1005 SDT_PROBE5(sctp, cwnd, net, ack,
1006 stcb->asoc.my_vtag, ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), net,
1007 old_cwnd, net->cwnd);
1008 SCTPDBG(SCTP_DEBUG_INDATA1, "Destination %p moved from PF to reachable with cwnd %d.\n",
1009 (void *)net, net->cwnd);
1014 sctp_cwnd_update_after_timeout(struct sctp_tcb *stcb, struct sctp_nets *net)
1016 int old_cwnd = net->cwnd;
1017 uint32_t t_ssthresh, t_cwnd;
1018 uint64_t t_ucwnd_sbw;
1020 /* MT FIXME: Don't compute this over and over again */
1023 if ((stcb->asoc.sctp_cmt_on_off == SCTP_CMT_RPV1) ||
1024 (stcb->asoc.sctp_cmt_on_off == SCTP_CMT_RPV2)) {
1025 struct sctp_nets *lnet;
1029 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
1030 t_ssthresh += lnet->ssthresh;
1031 t_cwnd += lnet->cwnd;
1032 srtt = lnet->lastsa;
1033 /* lastsa>>3; we don't need to divide ... */
1035 t_ucwnd_sbw += (uint64_t)lnet->cwnd / (uint64_t)srtt;
1038 if (t_ssthresh < 1) {
1041 if (t_ucwnd_sbw < 1) {
1044 if (stcb->asoc.sctp_cmt_on_off == SCTP_CMT_RPV1) {
1045 net->ssthresh = (uint32_t)(((uint64_t)4 *
1046 (uint64_t)net->mtu *
1047 (uint64_t)net->ssthresh) /
1048 (uint64_t)t_ssthresh);
1053 /* lastsa>>3; we don't need to divide ... */
1057 cc_delta = t_ucwnd_sbw * (uint64_t)srtt / 2;
1058 if (cc_delta < t_cwnd) {
1059 net->ssthresh = (uint32_t)((uint64_t)t_cwnd - cc_delta);
1061 net->ssthresh = net->mtu;
1064 if ((net->cwnd > t_cwnd / 2) &&
1065 (net->ssthresh < net->cwnd - t_cwnd / 2)) {
1066 net->ssthresh = net->cwnd - t_cwnd / 2;
1068 if (net->ssthresh < net->mtu) {
1069 net->ssthresh = net->mtu;
1072 net->ssthresh = max(net->cwnd / 2, 4 * net->mtu);
1074 net->cwnd = net->mtu;
1075 net->partial_bytes_acked = 0;
1076 SDT_PROBE5(sctp, cwnd, net, to,
1078 ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)),
1080 old_cwnd, net->cwnd);
1081 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
1082 sctp_log_cwnd(stcb, net, net->cwnd - old_cwnd, SCTP_CWND_LOG_FROM_RTX);
1087 sctp_cwnd_update_after_ecn_echo_common(struct sctp_tcb *stcb, struct sctp_nets *net,
1088 int in_window, int num_pkt_lost, int use_rtcc)
1090 int old_cwnd = net->cwnd;
1092 if ((use_rtcc) && (net->lan_type == SCTP_LAN_LOCAL) && (net->cc_mod.rtcc.use_dccc_ecn)) {
1093 /* Data center Congestion Control */
1094 if (in_window == 0) {
1096 * Go to CA with the cwnd at the point we sent the
1097 * TSN that was marked with a CE.
1099 if (net->ecn_prev_cwnd < net->cwnd) {
1100 /* Restore to prev cwnd */
1101 net->cwnd = net->ecn_prev_cwnd - (net->mtu * num_pkt_lost);
1103 /* Just cut in 1/2 */
1107 net->ssthresh = net->cwnd - (num_pkt_lost * net->mtu);
1108 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
1109 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SAT);
1113 * Further tuning down required over the drastic
1116 net->ssthresh -= (net->mtu * num_pkt_lost);
1117 net->cwnd -= (net->mtu * num_pkt_lost);
1118 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
1119 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SAT);
1123 SCTP_STAT_INCR(sctps_ecnereducedcwnd);
1125 if (in_window == 0) {
1126 SCTP_STAT_INCR(sctps_ecnereducedcwnd);
1127 net->ssthresh = net->cwnd / 2;
1128 if (net->ssthresh < net->mtu) {
1129 net->ssthresh = net->mtu;
1131 * here back off the timer as well, to slow
1136 net->cwnd = net->ssthresh;
1137 SDT_PROBE5(sctp, cwnd, net, ecn,
1139 ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)),
1141 old_cwnd, net->cwnd);
1142 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
1143 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SAT);
1151 sctp_cwnd_update_after_packet_dropped(struct sctp_tcb *stcb,
1152 struct sctp_nets *net, struct sctp_pktdrop_chunk *cp,
1153 uint32_t *bottle_bw, uint32_t *on_queue)
1157 int old_cwnd = net->cwnd;
1159 /* get bottle neck bw */
1160 *bottle_bw = ntohl(cp->bottle_bw);
1161 /* and whats on queue */
1162 *on_queue = ntohl(cp->current_onq);
1164 * adjust the on-queue if our flight is more it could be that the
1165 * router has not yet gotten data "in-flight" to it
1167 if (*on_queue < net->flight_size) {
1168 *on_queue = net->flight_size;
1170 /* rtt is measured in micro seconds, bottle_bw in bytes per second */
1171 bw_avail = (uint32_t)(((uint64_t)(*bottle_bw) * net->rtt) / (uint64_t)1000000);
1172 if (bw_avail > *bottle_bw) {
1174 * Cap the growth to no more than the bottle neck. This can
1175 * happen as RTT slides up due to queues. It also means if
1176 * you have more than a 1 second RTT with a empty queue you
1177 * will be limited to the bottle_bw per second no matter if
1178 * other points have 1/2 the RTT and you could get more
1181 bw_avail = *bottle_bw;
1183 if (*on_queue > bw_avail) {
1185 * No room for anything else don't allow anything else to be
1186 * "added to the fire".
1188 int seg_inflight, seg_onqueue, my_portion;
1190 net->partial_bytes_acked = 0;
1191 /* how much are we over queue size? */
1192 incr = *on_queue - bw_avail;
1193 if (stcb->asoc.seen_a_sack_this_pkt) {
1195 * undo any cwnd adjustment that the sack might have
1198 net->cwnd = net->prev_cwnd;
1200 /* Now how much of that is mine? */
1201 seg_inflight = net->flight_size / net->mtu;
1202 seg_onqueue = *on_queue / net->mtu;
1203 my_portion = (incr * seg_inflight) / seg_onqueue;
1205 /* Have I made an adjustment already */
1206 if (net->cwnd > net->flight_size) {
1208 * for this flight I made an adjustment we need to
1209 * decrease the portion by a share our previous
1214 diff_adj = net->cwnd - net->flight_size;
1215 if (diff_adj > my_portion)
1218 my_portion -= diff_adj;
1221 * back down to the previous cwnd (assume we have had a sack
1222 * before this packet). minus what ever portion of the
1223 * overage is my fault.
1225 net->cwnd -= my_portion;
1227 /* we will NOT back down more than 1 MTU */
1228 if (net->cwnd <= net->mtu) {
1229 net->cwnd = net->mtu;
1232 net->ssthresh = net->cwnd - 1;
1235 * Take 1/4 of the space left or max burst up .. whichever
1238 incr = (bw_avail - *on_queue) >> 2;
1239 if ((stcb->asoc.max_burst > 0) &&
1240 (stcb->asoc.max_burst * net->mtu < incr)) {
1241 incr = stcb->asoc.max_burst * net->mtu;
1245 if (net->cwnd > bw_avail) {
1246 /* We can't exceed the pipe size */
1247 net->cwnd = bw_avail;
1249 if (net->cwnd < net->mtu) {
1250 /* We always have 1 MTU */
1251 net->cwnd = net->mtu;
1253 sctp_enforce_cwnd_limit(&stcb->asoc, net);
1254 if (net->cwnd - old_cwnd != 0) {
1255 /* log only changes */
1256 SDT_PROBE5(sctp, cwnd, net, pd,
1258 ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)),
1260 old_cwnd, net->cwnd);
1261 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
1262 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd),
1263 SCTP_CWND_LOG_FROM_SAT);
1269 sctp_cwnd_update_after_output(struct sctp_tcb *stcb,
1270 struct sctp_nets *net, int burst_limit)
1272 int old_cwnd = net->cwnd;
1274 if (net->ssthresh < net->cwnd)
1275 net->ssthresh = net->cwnd;
1277 net->cwnd = (net->flight_size + (burst_limit * net->mtu));
1278 sctp_enforce_cwnd_limit(&stcb->asoc, net);
1279 SDT_PROBE5(sctp, cwnd, net, bl,
1281 ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)),
1283 old_cwnd, net->cwnd);
1284 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
1285 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_BRST);
1291 sctp_cwnd_update_after_sack(struct sctp_tcb *stcb,
1292 struct sctp_association *asoc,
1293 int accum_moved, int reneged_all, int will_exit)
1295 /* Passing a zero argument in last disables the rtcc algorithm */
1296 sctp_cwnd_update_after_sack_common(stcb, asoc, accum_moved, reneged_all, will_exit, 0);
1300 sctp_cwnd_update_after_ecn_echo(struct sctp_tcb *stcb, struct sctp_nets *net,
1301 int in_window, int num_pkt_lost)
1303 /* Passing a zero argument in last disables the rtcc algorithm */
1304 sctp_cwnd_update_after_ecn_echo_common(stcb, net, in_window, num_pkt_lost, 0);
1307 /* Here starts the RTCCVAR type CC invented by RRS which
1308 * is a slight mod to RFC2581. We reuse a common routine or
1309 * two since these algorithms are so close and need to
1313 sctp_cwnd_update_rtcc_after_ecn_echo(struct sctp_tcb *stcb, struct sctp_nets *net,
1314 int in_window, int num_pkt_lost)
1316 sctp_cwnd_update_after_ecn_echo_common(stcb, net, in_window, num_pkt_lost, 1);
1322 sctp_cwnd_update_rtcc_tsn_acknowledged(struct sctp_nets *net,
1323 struct sctp_tmit_chunk *tp1)
1325 net->cc_mod.rtcc.bw_bytes += tp1->send_size;
1329 sctp_cwnd_prepare_rtcc_net_for_sack(struct sctp_tcb *stcb SCTP_UNUSED,
1330 struct sctp_nets *net)
1332 if (net->cc_mod.rtcc.tls_needs_set > 0) {
1333 /* We had a bw measurment going on */
1334 struct timeval ltls;
1336 SCTP_GETPTIME_TIMEVAL(<ls);
1337 timevalsub(<ls, &net->cc_mod.rtcc.tls);
1338 net->cc_mod.rtcc.new_tot_time = (ltls.tv_sec * 1000000) + ltls.tv_usec;
1343 sctp_cwnd_new_rtcc_transmission_begins(struct sctp_tcb *stcb,
1344 struct sctp_nets *net)
1346 uint64_t vtag, probepoint;
1348 if (net->cc_mod.rtcc.lbw) {
1349 /* Clear the old bw.. we went to 0 in-flight */
1350 vtag = (net->rtt << 32) | (((uint32_t)(stcb->sctp_ep->sctp_lport)) << 16) |
1352 probepoint = (((uint64_t)net->cwnd) << 32);
1354 probepoint |= ((8 << 16) | 0);
1355 SDT_PROBE5(sctp, cwnd, net, rttvar,
1357 ((net->cc_mod.rtcc.lbw << 32) | 0),
1358 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
1361 net->cc_mod.rtcc.lbw_rtt = 0;
1362 net->cc_mod.rtcc.cwnd_at_bw_set = 0;
1363 net->cc_mod.rtcc.lbw = 0;
1364 net->cc_mod.rtcc.bw_bytes_at_last_rttc = 0;
1365 net->cc_mod.rtcc.vol_reduce = 0;
1366 net->cc_mod.rtcc.bw_tot_time = 0;
1367 net->cc_mod.rtcc.bw_bytes = 0;
1368 net->cc_mod.rtcc.tls_needs_set = 0;
1369 if (net->cc_mod.rtcc.steady_step) {
1370 net->cc_mod.rtcc.vol_reduce = 0;
1371 net->cc_mod.rtcc.step_cnt = 0;
1372 net->cc_mod.rtcc.last_step_state = 0;
1374 if (net->cc_mod.rtcc.ret_from_eq) {
1375 /* less aggressive one - reset cwnd too */
1376 uint32_t cwnd_in_mtu, cwnd;
1378 cwnd_in_mtu = SCTP_BASE_SYSCTL(sctp_initial_cwnd);
1379 if (cwnd_in_mtu == 0) {
1381 * Using 0 means that the value of RFC 4960
1384 cwnd = min((net->mtu * 4), max((2 * net->mtu), SCTP_INITIAL_CWND));
1387 * We take the minimum of the burst limit
1388 * and the initial congestion window.
1390 if ((stcb->asoc.max_burst > 0) && (cwnd_in_mtu > stcb->asoc.max_burst))
1391 cwnd_in_mtu = stcb->asoc.max_burst;
1392 cwnd = (net->mtu - sizeof(struct sctphdr)) * cwnd_in_mtu;
1394 if (net->cwnd > cwnd) {
1396 * Only set if we are not a timeout (i.e.
1406 sctp_set_rtcc_initial_cc_param(struct sctp_tcb *stcb,
1407 struct sctp_nets *net)
1409 uint64_t vtag, probepoint;
1411 sctp_set_initial_cc_param(stcb, net);
1412 stcb->asoc.use_precise_time = 1;
1413 probepoint = (((uint64_t)net->cwnd) << 32);
1414 probepoint |= ((9 << 16) | 0);
1415 vtag = (net->rtt << 32) |
1416 (((uint32_t)(stcb->sctp_ep->sctp_lport)) << 16) |
1418 SDT_PROBE5(sctp, cwnd, net, rttvar,
1424 net->cc_mod.rtcc.lbw_rtt = 0;
1425 net->cc_mod.rtcc.cwnd_at_bw_set = 0;
1426 net->cc_mod.rtcc.vol_reduce = 0;
1427 net->cc_mod.rtcc.lbw = 0;
1428 net->cc_mod.rtcc.vol_reduce = 0;
1429 net->cc_mod.rtcc.bw_bytes_at_last_rttc = 0;
1430 net->cc_mod.rtcc.bw_tot_time = 0;
1431 net->cc_mod.rtcc.bw_bytes = 0;
1432 net->cc_mod.rtcc.tls_needs_set = 0;
1433 net->cc_mod.rtcc.ret_from_eq = SCTP_BASE_SYSCTL(sctp_rttvar_eqret);
1434 net->cc_mod.rtcc.steady_step = SCTP_BASE_SYSCTL(sctp_steady_step);
1435 net->cc_mod.rtcc.use_dccc_ecn = SCTP_BASE_SYSCTL(sctp_use_dccc_ecn);
1436 net->cc_mod.rtcc.step_cnt = 0;
1437 net->cc_mod.rtcc.last_step_state = 0;
1443 sctp_cwnd_rtcc_socket_option(struct sctp_tcb *stcb, int setorget,
1444 struct sctp_cc_option *cc_opt)
1446 struct sctp_nets *net;
1448 if (setorget == 1) {
1450 if (cc_opt->option == SCTP_CC_OPT_RTCC_SETMODE) {
1451 if ((cc_opt->aid_value.assoc_value != 0) &&
1452 (cc_opt->aid_value.assoc_value != 1)) {
1455 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
1456 net->cc_mod.rtcc.ret_from_eq = cc_opt->aid_value.assoc_value;
1458 } else if (cc_opt->option == SCTP_CC_OPT_USE_DCCC_ECN) {
1459 if ((cc_opt->aid_value.assoc_value != 0) &&
1460 (cc_opt->aid_value.assoc_value != 1)) {
1463 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
1464 net->cc_mod.rtcc.use_dccc_ecn = cc_opt->aid_value.assoc_value;
1466 } else if (cc_opt->option == SCTP_CC_OPT_STEADY_STEP) {
1467 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
1468 net->cc_mod.rtcc.steady_step = cc_opt->aid_value.assoc_value;
1475 if (cc_opt->option == SCTP_CC_OPT_RTCC_SETMODE) {
1476 net = TAILQ_FIRST(&stcb->asoc.nets);
1480 cc_opt->aid_value.assoc_value = net->cc_mod.rtcc.ret_from_eq;
1481 } else if (cc_opt->option == SCTP_CC_OPT_USE_DCCC_ECN) {
1482 net = TAILQ_FIRST(&stcb->asoc.nets);
1486 cc_opt->aid_value.assoc_value = net->cc_mod.rtcc.use_dccc_ecn;
1487 } else if (cc_opt->option == SCTP_CC_OPT_STEADY_STEP) {
1488 net = TAILQ_FIRST(&stcb->asoc.nets);
1492 cc_opt->aid_value.assoc_value = net->cc_mod.rtcc.steady_step;
1501 sctp_cwnd_update_rtcc_packet_transmitted(struct sctp_tcb *stcb SCTP_UNUSED,
1502 struct sctp_nets *net)
1504 if (net->cc_mod.rtcc.tls_needs_set == 0) {
1505 SCTP_GETPTIME_TIMEVAL(&net->cc_mod.rtcc.tls);
1506 net->cc_mod.rtcc.tls_needs_set = 2;
1511 sctp_cwnd_update_rtcc_after_sack(struct sctp_tcb *stcb,
1512 struct sctp_association *asoc,
1513 int accum_moved, int reneged_all, int will_exit)
1515 /* Passing a one argument at the last enables the rtcc algorithm */
1516 sctp_cwnd_update_after_sack_common(stcb, asoc, accum_moved, reneged_all, will_exit, 1);
1520 sctp_rtt_rtcc_calculated(struct sctp_tcb *stcb SCTP_UNUSED,
1521 struct sctp_nets *net,
1522 struct timeval *now SCTP_UNUSED)
1524 net->cc_mod.rtcc.rtt_set_this_sack = 1;
1527 /* Here starts Sally Floyds HS-TCP */
1529 struct sctp_hs_raise_drop {
1532 int8_t drop_percent;
1535 #define SCTP_HS_TABLE_SIZE 73
1537 static const struct sctp_hs_raise_drop sctp_cwnd_adjust[SCTP_HS_TABLE_SIZE] = {
1538 {38, 1, 50}, /* 0 */
1539 {118, 2, 44}, /* 1 */
1540 {221, 3, 41}, /* 2 */
1541 {347, 4, 38}, /* 3 */
1542 {495, 5, 37}, /* 4 */
1543 {663, 6, 35}, /* 5 */
1544 {851, 7, 34}, /* 6 */
1545 {1058, 8, 33}, /* 7 */
1546 {1284, 9, 32}, /* 8 */
1547 {1529, 10, 31}, /* 9 */
1548 {1793, 11, 30}, /* 10 */
1549 {2076, 12, 29}, /* 11 */
1550 {2378, 13, 28}, /* 12 */
1551 {2699, 14, 28}, /* 13 */
1552 {3039, 15, 27}, /* 14 */
1553 {3399, 16, 27}, /* 15 */
1554 {3778, 17, 26}, /* 16 */
1555 {4177, 18, 26}, /* 17 */
1556 {4596, 19, 25}, /* 18 */
1557 {5036, 20, 25}, /* 19 */
1558 {5497, 21, 24}, /* 20 */
1559 {5979, 22, 24}, /* 21 */
1560 {6483, 23, 23}, /* 22 */
1561 {7009, 24, 23}, /* 23 */
1562 {7558, 25, 22}, /* 24 */
1563 {8130, 26, 22}, /* 25 */
1564 {8726, 27, 22}, /* 26 */
1565 {9346, 28, 21}, /* 27 */
1566 {9991, 29, 21}, /* 28 */
1567 {10661, 30, 21}, /* 29 */
1568 {11358, 31, 20}, /* 30 */
1569 {12082, 32, 20}, /* 31 */
1570 {12834, 33, 20}, /* 32 */
1571 {13614, 34, 19}, /* 33 */
1572 {14424, 35, 19}, /* 34 */
1573 {15265, 36, 19}, /* 35 */
1574 {16137, 37, 19}, /* 36 */
1575 {17042, 38, 18}, /* 37 */
1576 {17981, 39, 18}, /* 38 */
1577 {18955, 40, 18}, /* 39 */
1578 {19965, 41, 17}, /* 40 */
1579 {21013, 42, 17}, /* 41 */
1580 {22101, 43, 17}, /* 42 */
1581 {23230, 44, 17}, /* 43 */
1582 {24402, 45, 16}, /* 44 */
1583 {25618, 46, 16}, /* 45 */
1584 {26881, 47, 16}, /* 46 */
1585 {28193, 48, 16}, /* 47 */
1586 {29557, 49, 15}, /* 48 */
1587 {30975, 50, 15}, /* 49 */
1588 {32450, 51, 15}, /* 50 */
1589 {33986, 52, 15}, /* 51 */
1590 {35586, 53, 14}, /* 52 */
1591 {37253, 54, 14}, /* 53 */
1592 {38992, 55, 14}, /* 54 */
1593 {40808, 56, 14}, /* 55 */
1594 {42707, 57, 13}, /* 56 */
1595 {44694, 58, 13}, /* 57 */
1596 {46776, 59, 13}, /* 58 */
1597 {48961, 60, 13}, /* 59 */
1598 {51258, 61, 13}, /* 60 */
1599 {53677, 62, 12}, /* 61 */
1600 {56230, 63, 12}, /* 62 */
1601 {58932, 64, 12}, /* 63 */
1602 {61799, 65, 12}, /* 64 */
1603 {64851, 66, 11}, /* 65 */
1604 {68113, 67, 11}, /* 66 */
1605 {71617, 68, 11}, /* 67 */
1606 {75401, 69, 10}, /* 68 */
1607 {79517, 70, 10}, /* 69 */
1608 {84035, 71, 10}, /* 70 */
1609 {89053, 72, 10}, /* 71 */
1610 {94717, 73, 9} /* 72 */
1614 sctp_hs_cwnd_increase(struct sctp_tcb *stcb, struct sctp_nets *net)
1616 int cur_val, i, indx, incr;
1617 int old_cwnd = net->cwnd;
1619 cur_val = net->cwnd >> 10;
1620 indx = SCTP_HS_TABLE_SIZE - 1;
1622 if (cur_val < sctp_cwnd_adjust[0].cwnd) {
1624 if (net->net_ack > net->mtu) {
1625 net->cwnd += net->mtu;
1627 net->cwnd += net->net_ack;
1630 for (i = net->last_hs_used; i < SCTP_HS_TABLE_SIZE; i++) {
1631 if (cur_val < sctp_cwnd_adjust[i].cwnd) {
1636 net->last_hs_used = indx;
1637 incr = (((int32_t)sctp_cwnd_adjust[indx].increase) << 10);
1640 sctp_enforce_cwnd_limit(&stcb->asoc, net);
1641 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
1642 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SS);
1647 sctp_hs_cwnd_decrease(struct sctp_tcb *stcb, struct sctp_nets *net)
1649 int cur_val, i, indx;
1650 int old_cwnd = net->cwnd;
1652 cur_val = net->cwnd >> 10;
1653 if (cur_val < sctp_cwnd_adjust[0].cwnd) {
1655 net->ssthresh = net->cwnd / 2;
1656 if (net->ssthresh < (net->mtu * 2)) {
1657 net->ssthresh = 2 * net->mtu;
1659 net->cwnd = net->ssthresh;
1661 /* drop by the proper amount */
1662 net->ssthresh = net->cwnd - (int)((net->cwnd / 100) *
1663 (int32_t)sctp_cwnd_adjust[net->last_hs_used].drop_percent);
1664 net->cwnd = net->ssthresh;
1665 /* now where are we */
1666 indx = net->last_hs_used;
1667 cur_val = net->cwnd >> 10;
1668 /* reset where we are in the table */
1669 if (cur_val < sctp_cwnd_adjust[0].cwnd) {
1670 /* feel out of hs */
1671 net->last_hs_used = 0;
1673 for (i = indx; i >= 1; i--) {
1674 if (cur_val > sctp_cwnd_adjust[i - 1].cwnd) {
1678 net->last_hs_used = indx;
1681 sctp_enforce_cwnd_limit(&stcb->asoc, net);
1682 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
1683 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_FR);
1688 sctp_hs_cwnd_update_after_fr(struct sctp_tcb *stcb,
1689 struct sctp_association *asoc)
1691 struct sctp_nets *net;
1694 * CMT fast recovery code. Need to debug. ((sctp_cmt_on_off > 0) &&
1695 * (net->fast_retran_loss_recovery == 0)))
1697 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
1698 if ((asoc->fast_retran_loss_recovery == 0) ||
1699 (asoc->sctp_cmt_on_off > 0)) {
1700 /* out of a RFC2582 Fast recovery window? */
1701 if (net->net_ack > 0) {
1703 * per section 7.2.3, are there any
1704 * destinations that had a fast retransmit
1705 * to them. If so what we need to do is
1706 * adjust ssthresh and cwnd.
1708 struct sctp_tmit_chunk *lchk;
1710 sctp_hs_cwnd_decrease(stcb, net);
1712 lchk = TAILQ_FIRST(&asoc->send_queue);
1714 net->partial_bytes_acked = 0;
1715 /* Turn on fast recovery window */
1716 asoc->fast_retran_loss_recovery = 1;
1718 /* Mark end of the window */
1719 asoc->fast_recovery_tsn = asoc->sending_seq - 1;
1721 asoc->fast_recovery_tsn = lchk->rec.data.tsn - 1;
1725 * CMT fast recovery -- per destination
1726 * recovery variable.
1728 net->fast_retran_loss_recovery = 1;
1731 /* Mark end of the window */
1732 net->fast_recovery_tsn = asoc->sending_seq - 1;
1734 net->fast_recovery_tsn = lchk->rec.data.tsn - 1;
1737 sctp_timer_stop(SCTP_TIMER_TYPE_SEND,
1738 stcb->sctp_ep, stcb, net,
1739 SCTP_FROM_SCTP_CC_FUNCTIONS + SCTP_LOC_2);
1740 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
1741 stcb->sctp_ep, stcb, net);
1743 } else if (net->net_ack > 0) {
1745 * Mark a peg that we WOULD have done a cwnd
1746 * reduction but RFC2582 prevented this action.
1748 SCTP_STAT_INCR(sctps_fastretransinrtt);
1754 sctp_hs_cwnd_update_after_sack(struct sctp_tcb *stcb,
1755 struct sctp_association *asoc,
1756 int accum_moved, int reneged_all SCTP_UNUSED, int will_exit)
1758 struct sctp_nets *net;
1760 /******************************/
1761 /* update cwnd and Early FR */
1762 /******************************/
1763 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
1765 #ifdef JANA_CMT_FAST_RECOVERY
1767 * CMT fast recovery code. Need to debug.
1769 if (net->fast_retran_loss_recovery && net->new_pseudo_cumack) {
1770 if (SCTP_TSN_GE(asoc->last_acked_seq, net->fast_recovery_tsn) ||
1771 SCTP_TSN_GE(net->pseudo_cumack, net->fast_recovery_tsn)) {
1772 net->will_exit_fast_recovery = 1;
1776 /* if nothing was acked on this destination skip it */
1777 if (net->net_ack == 0) {
1778 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
1779 sctp_log_cwnd(stcb, net, 0, SCTP_CWND_LOG_FROM_SACK);
1783 #ifdef JANA_CMT_FAST_RECOVERY
1785 * CMT fast recovery code
1788 * if (sctp_cmt_on_off > 0 && net->fast_retran_loss_recovery
1789 * && net->will_exit_fast_recovery == 0) { @@@ Do something
1790 * } else if (sctp_cmt_on_off == 0 &&
1791 * asoc->fast_retran_loss_recovery && will_exit == 0) {
1795 if (asoc->fast_retran_loss_recovery &&
1797 (asoc->sctp_cmt_on_off == 0)) {
1799 * If we are in loss recovery we skip any cwnd
1805 * CMT: CUC algorithm. Update cwnd if pseudo-cumack has
1809 ((asoc->sctp_cmt_on_off > 0) && net->new_pseudo_cumack)) {
1810 /* If the cumulative ack moved we can proceed */
1811 if (net->cwnd <= net->ssthresh) {
1812 /* We are in slow start */
1813 if (net->flight_size + net->net_ack >= net->cwnd) {
1814 sctp_hs_cwnd_increase(stcb, net);
1816 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
1817 sctp_log_cwnd(stcb, net, net->net_ack,
1818 SCTP_CWND_LOG_NOADV_SS);
1822 /* We are in congestion avoidance */
1823 net->partial_bytes_acked += net->net_ack;
1824 if ((net->flight_size + net->net_ack >= net->cwnd) &&
1825 (net->partial_bytes_acked >= net->cwnd)) {
1826 net->partial_bytes_acked -= net->cwnd;
1827 net->cwnd += net->mtu;
1828 sctp_enforce_cwnd_limit(asoc, net);
1829 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
1830 sctp_log_cwnd(stcb, net, net->mtu,
1831 SCTP_CWND_LOG_FROM_CA);
1834 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
1835 sctp_log_cwnd(stcb, net, net->net_ack,
1836 SCTP_CWND_LOG_NOADV_CA);
1841 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
1842 sctp_log_cwnd(stcb, net, net->mtu,
1843 SCTP_CWND_LOG_NO_CUMACK);
1851 * H-TCP congestion control. The algorithm is detailed in:
1852 * R.N.Shorten, D.J.Leith:
1853 * "H-TCP: TCP for high-speed and long-distance networks"
1854 * Proc. PFLDnet, Argonne, 2004.
1855 * http://www.hamilton.ie/net/htcp3.pdf
1859 static int use_rtt_scaling = 1;
1860 static int use_bandwidth_switch = 1;
1863 between(uint32_t seq1, uint32_t seq2, uint32_t seq3)
1865 return (seq3 - seq2 >= seq1 - seq2);
1868 static inline uint32_t
1869 htcp_cong_time(struct htcp *ca)
1871 return (sctp_get_tick_count() - ca->last_cong);
1874 static inline uint32_t
1875 htcp_ccount(struct htcp *ca)
1877 return (htcp_cong_time(ca) / ca->minRTT);
1881 htcp_reset(struct htcp *ca)
1883 ca->undo_last_cong = ca->last_cong;
1884 ca->undo_maxRTT = ca->maxRTT;
1885 ca->undo_old_maxB = ca->old_maxB;
1886 ca->last_cong = sctp_get_tick_count();
1889 #ifdef SCTP_NOT_USED
1892 htcp_cwnd_undo(struct sctp_tcb *stcb, struct sctp_nets *net)
1894 net->cc_mod.htcp_ca.last_cong = net->cc_mod.htcp_ca.undo_last_cong;
1895 net->cc_mod.htcp_ca.maxRTT = net->cc_mod.htcp_ca.undo_maxRTT;
1896 net->cc_mod.htcp_ca.old_maxB = net->cc_mod.htcp_ca.undo_old_maxB;
1897 return (max(net->cwnd, ((net->ssthresh / net->mtu << 7) / net->cc_mod.htcp_ca.beta) * net->mtu));
1903 measure_rtt(struct sctp_nets *net)
1905 uint32_t srtt = net->lastsa >> SCTP_RTT_SHIFT;
1907 /* keep track of minimum RTT seen so far, minRTT is zero at first */
1908 if (net->cc_mod.htcp_ca.minRTT > srtt || !net->cc_mod.htcp_ca.minRTT)
1909 net->cc_mod.htcp_ca.minRTT = srtt;
1912 if (net->fast_retran_ip == 0 && net->ssthresh < 0xFFFF && htcp_ccount(&net->cc_mod.htcp_ca) > 3) {
1913 if (net->cc_mod.htcp_ca.maxRTT < net->cc_mod.htcp_ca.minRTT)
1914 net->cc_mod.htcp_ca.maxRTT = net->cc_mod.htcp_ca.minRTT;
1915 if (net->cc_mod.htcp_ca.maxRTT < srtt && srtt <= net->cc_mod.htcp_ca.maxRTT + MSEC_TO_TICKS(20))
1916 net->cc_mod.htcp_ca.maxRTT = srtt;
1921 measure_achieved_throughput(struct sctp_nets *net)
1923 uint32_t now = sctp_get_tick_count();
1925 if (net->fast_retran_ip == 0)
1926 net->cc_mod.htcp_ca.bytes_acked = net->net_ack;
1928 if (!use_bandwidth_switch)
1931 /* achieved throughput calculations */
1932 /* JRS - not 100% sure of this statement */
1933 if (net->fast_retran_ip == 1) {
1934 net->cc_mod.htcp_ca.bytecount = 0;
1935 net->cc_mod.htcp_ca.lasttime = now;
1939 net->cc_mod.htcp_ca.bytecount += net->net_ack;
1940 if ((net->cc_mod.htcp_ca.bytecount >= net->cwnd - (((net->cc_mod.htcp_ca.alpha >> 7) ? (net->cc_mod.htcp_ca.alpha >> 7) : 1) * net->mtu)) &&
1941 (now - net->cc_mod.htcp_ca.lasttime >= net->cc_mod.htcp_ca.minRTT) &&
1942 (net->cc_mod.htcp_ca.minRTT > 0)) {
1943 uint32_t cur_Bi = net->cc_mod.htcp_ca.bytecount / net->mtu * hz / (now - net->cc_mod.htcp_ca.lasttime);
1945 if (htcp_ccount(&net->cc_mod.htcp_ca) <= 3) {
1946 /* just after backoff */
1947 net->cc_mod.htcp_ca.minB = net->cc_mod.htcp_ca.maxB = net->cc_mod.htcp_ca.Bi = cur_Bi;
1949 net->cc_mod.htcp_ca.Bi = (3 * net->cc_mod.htcp_ca.Bi + cur_Bi) / 4;
1950 if (net->cc_mod.htcp_ca.Bi > net->cc_mod.htcp_ca.maxB)
1951 net->cc_mod.htcp_ca.maxB = net->cc_mod.htcp_ca.Bi;
1952 if (net->cc_mod.htcp_ca.minB > net->cc_mod.htcp_ca.maxB)
1953 net->cc_mod.htcp_ca.minB = net->cc_mod.htcp_ca.maxB;
1955 net->cc_mod.htcp_ca.bytecount = 0;
1956 net->cc_mod.htcp_ca.lasttime = now;
1961 htcp_beta_update(struct htcp *ca, uint32_t minRTT, uint32_t maxRTT)
1963 if (use_bandwidth_switch) {
1964 uint32_t maxB = ca->maxB;
1965 uint32_t old_maxB = ca->old_maxB;
1967 ca->old_maxB = ca->maxB;
1969 if (!between(5 * maxB, 4 * old_maxB, 6 * old_maxB)) {
1970 ca->beta = BETA_MIN;
1976 if (ca->modeswitch && minRTT > (uint32_t)MSEC_TO_TICKS(10) && maxRTT) {
1977 ca->beta = (minRTT << 7) / maxRTT;
1978 if (ca->beta < BETA_MIN)
1979 ca->beta = BETA_MIN;
1980 else if (ca->beta > BETA_MAX)
1981 ca->beta = BETA_MAX;
1983 ca->beta = BETA_MIN;
1989 htcp_alpha_update(struct htcp *ca)
1991 uint32_t minRTT = ca->minRTT;
1992 uint32_t factor = 1;
1993 uint32_t diff = htcp_cong_time(ca);
1995 if (diff > (uint32_t)hz) {
1997 factor = 1 + (10 * diff + ((diff / 2) * (diff / 2) / hz)) / hz;
2000 if (use_rtt_scaling && minRTT) {
2001 uint32_t scale = (hz << 3) / (10 * minRTT);
2003 scale = min(max(scale, 1U << 2), 10U << 3); /* clamping ratio to
2004 * interval [0.5,10]<<3 */
2005 factor = (factor << 3) / scale;
2010 ca->alpha = 2 * factor * ((1 << 7) - ca->beta);
2012 ca->alpha = ALPHA_BASE;
2015 /* After we have the rtt data to calculate beta, we'd still prefer to wait one
2016 * rtt before we adjust our beta to ensure we are working from a consistent
2019 * This function should be called when we hit a congestion event since only at
2020 * that point do we really have a real sense of maxRTT (the queues en route
2021 * were getting just too full now).
2024 htcp_param_update(struct sctp_nets *net)
2026 uint32_t minRTT = net->cc_mod.htcp_ca.minRTT;
2027 uint32_t maxRTT = net->cc_mod.htcp_ca.maxRTT;
2029 htcp_beta_update(&net->cc_mod.htcp_ca, minRTT, maxRTT);
2030 htcp_alpha_update(&net->cc_mod.htcp_ca);
2033 * add slowly fading memory for maxRTT to accommodate routing
2036 if (minRTT > 0 && maxRTT > minRTT)
2037 net->cc_mod.htcp_ca.maxRTT = minRTT + ((maxRTT - minRTT) * 95) / 100;
2041 htcp_recalc_ssthresh(struct sctp_nets *net)
2043 htcp_param_update(net);
2044 return (max(((net->cwnd / net->mtu * net->cc_mod.htcp_ca.beta) >> 7) * net->mtu, 2U * net->mtu));
2048 htcp_cong_avoid(struct sctp_tcb *stcb, struct sctp_nets *net)
2051 * How to handle these functions?
2052 * if (!tcp_is_cwnd_limited(sk, in_flight)) RRS - good question.
2055 if (net->cwnd <= net->ssthresh) {
2056 /* We are in slow start */
2057 if (net->flight_size + net->net_ack >= net->cwnd) {
2058 if (net->net_ack > (net->mtu * SCTP_BASE_SYSCTL(sctp_L2_abc_variable))) {
2059 net->cwnd += (net->mtu * SCTP_BASE_SYSCTL(sctp_L2_abc_variable));
2060 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
2061 sctp_log_cwnd(stcb, net, net->mtu,
2062 SCTP_CWND_LOG_FROM_SS);
2066 net->cwnd += net->net_ack;
2067 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
2068 sctp_log_cwnd(stcb, net, net->net_ack,
2069 SCTP_CWND_LOG_FROM_SS);
2073 sctp_enforce_cwnd_limit(&stcb->asoc, net);
2075 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
2076 sctp_log_cwnd(stcb, net, net->net_ack,
2077 SCTP_CWND_LOG_NOADV_SS);
2084 * In dangerous area, increase slowly. In theory this is
2085 * net->cwnd += alpha / net->cwnd
2087 /* What is snd_cwnd_cnt?? */
2088 if (((net->partial_bytes_acked / net->mtu * net->cc_mod.htcp_ca.alpha) >> 7) * net->mtu >= net->cwnd) {
2090 * Does SCTP have a cwnd clamp?
2091 * if (net->snd_cwnd < net->snd_cwnd_clamp) - Nope (RRS).
2093 net->cwnd += net->mtu;
2094 net->partial_bytes_acked = 0;
2095 sctp_enforce_cwnd_limit(&stcb->asoc, net);
2096 htcp_alpha_update(&net->cc_mod.htcp_ca);
2097 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
2098 sctp_log_cwnd(stcb, net, net->mtu,
2099 SCTP_CWND_LOG_FROM_CA);
2102 net->partial_bytes_acked += net->net_ack;
2103 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
2104 sctp_log_cwnd(stcb, net, net->net_ack,
2105 SCTP_CWND_LOG_NOADV_CA);
2109 net->cc_mod.htcp_ca.bytes_acked = net->mtu;
2113 #ifdef SCTP_NOT_USED
2114 /* Lower bound on congestion window. */
2116 htcp_min_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net)
2118 return (net->ssthresh);
2123 htcp_init(struct sctp_nets *net)
2125 memset(&net->cc_mod.htcp_ca, 0, sizeof(struct htcp));
2126 net->cc_mod.htcp_ca.alpha = ALPHA_BASE;
2127 net->cc_mod.htcp_ca.beta = BETA_MIN;
2128 net->cc_mod.htcp_ca.bytes_acked = net->mtu;
2129 net->cc_mod.htcp_ca.last_cong = sctp_get_tick_count();
2133 sctp_htcp_set_initial_cc_param(struct sctp_tcb *stcb, struct sctp_nets *net)
2136 * We take the max of the burst limit times a MTU or the
2137 * INITIAL_CWND. We then limit this to 4 MTU's of sending.
2139 net->cwnd = min((net->mtu * 4), max((2 * net->mtu), SCTP_INITIAL_CWND));
2140 net->ssthresh = stcb->asoc.peers_rwnd;
2141 sctp_enforce_cwnd_limit(&stcb->asoc, net);
2144 if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_CWND_MONITOR_ENABLE | SCTP_CWND_LOGGING_ENABLE)) {
2145 sctp_log_cwnd(stcb, net, 0, SCTP_CWND_INITIALIZATION);
2150 sctp_htcp_cwnd_update_after_sack(struct sctp_tcb *stcb,
2151 struct sctp_association *asoc,
2152 int accum_moved, int reneged_all SCTP_UNUSED, int will_exit)
2154 struct sctp_nets *net;
2156 /******************************/
2157 /* update cwnd and Early FR */
2158 /******************************/
2159 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
2161 #ifdef JANA_CMT_FAST_RECOVERY
2163 * CMT fast recovery code. Need to debug.
2165 if (net->fast_retran_loss_recovery && net->new_pseudo_cumack) {
2166 if (SCTP_TSN_GE(asoc->last_acked_seq, net->fast_recovery_tsn) ||
2167 SCTP_TSN_GE(net->pseudo_cumack, net->fast_recovery_tsn)) {
2168 net->will_exit_fast_recovery = 1;
2172 /* if nothing was acked on this destination skip it */
2173 if (net->net_ack == 0) {
2174 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
2175 sctp_log_cwnd(stcb, net, 0, SCTP_CWND_LOG_FROM_SACK);
2179 #ifdef JANA_CMT_FAST_RECOVERY
2181 * CMT fast recovery code
2184 * if (sctp_cmt_on_off > 0 && net->fast_retran_loss_recovery
2185 * && net->will_exit_fast_recovery == 0) { @@@ Do something
2186 * } else if (sctp_cmt_on_off == 0 &&
2187 * asoc->fast_retran_loss_recovery && will_exit == 0) {
2191 if (asoc->fast_retran_loss_recovery &&
2193 (asoc->sctp_cmt_on_off == 0)) {
2195 * If we are in loss recovery we skip any cwnd
2201 * CMT: CUC algorithm. Update cwnd if pseudo-cumack has
2205 ((asoc->sctp_cmt_on_off > 0) && net->new_pseudo_cumack)) {
2206 htcp_cong_avoid(stcb, net);
2207 measure_achieved_throughput(net);
2209 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
2210 sctp_log_cwnd(stcb, net, net->mtu,
2211 SCTP_CWND_LOG_NO_CUMACK);
2218 sctp_htcp_cwnd_update_after_fr(struct sctp_tcb *stcb,
2219 struct sctp_association *asoc)
2221 struct sctp_nets *net;
2224 * CMT fast recovery code. Need to debug. ((sctp_cmt_on_off > 0) &&
2225 * (net->fast_retran_loss_recovery == 0)))
2227 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
2228 if ((asoc->fast_retran_loss_recovery == 0) ||
2229 (asoc->sctp_cmt_on_off > 0)) {
2230 /* out of a RFC2582 Fast recovery window? */
2231 if (net->net_ack > 0) {
2233 * per section 7.2.3, are there any
2234 * destinations that had a fast retransmit
2235 * to them. If so what we need to do is
2236 * adjust ssthresh and cwnd.
2238 struct sctp_tmit_chunk *lchk;
2239 int old_cwnd = net->cwnd;
2241 /* JRS - reset as if state were changed */
2242 htcp_reset(&net->cc_mod.htcp_ca);
2243 net->ssthresh = htcp_recalc_ssthresh(net);
2244 net->cwnd = net->ssthresh;
2245 sctp_enforce_cwnd_limit(asoc, net);
2246 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
2247 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd),
2248 SCTP_CWND_LOG_FROM_FR);
2250 lchk = TAILQ_FIRST(&asoc->send_queue);
2252 net->partial_bytes_acked = 0;
2253 /* Turn on fast recovery window */
2254 asoc->fast_retran_loss_recovery = 1;
2256 /* Mark end of the window */
2257 asoc->fast_recovery_tsn = asoc->sending_seq - 1;
2259 asoc->fast_recovery_tsn = lchk->rec.data.tsn - 1;
2263 * CMT fast recovery -- per destination
2264 * recovery variable.
2266 net->fast_retran_loss_recovery = 1;
2269 /* Mark end of the window */
2270 net->fast_recovery_tsn = asoc->sending_seq - 1;
2272 net->fast_recovery_tsn = lchk->rec.data.tsn - 1;
2275 sctp_timer_stop(SCTP_TIMER_TYPE_SEND,
2276 stcb->sctp_ep, stcb, net,
2277 SCTP_FROM_SCTP_CC_FUNCTIONS + SCTP_LOC_3);
2278 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
2279 stcb->sctp_ep, stcb, net);
2281 } else if (net->net_ack > 0) {
2283 * Mark a peg that we WOULD have done a cwnd
2284 * reduction but RFC2582 prevented this action.
2286 SCTP_STAT_INCR(sctps_fastretransinrtt);
2292 sctp_htcp_cwnd_update_after_timeout(struct sctp_tcb *stcb,
2293 struct sctp_nets *net)
2295 int old_cwnd = net->cwnd;
2297 /* JRS - reset as if the state were being changed to timeout */
2298 htcp_reset(&net->cc_mod.htcp_ca);
2299 net->ssthresh = htcp_recalc_ssthresh(net);
2300 net->cwnd = net->mtu;
2301 net->partial_bytes_acked = 0;
2302 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
2303 sctp_log_cwnd(stcb, net, net->cwnd - old_cwnd, SCTP_CWND_LOG_FROM_RTX);
2308 sctp_htcp_cwnd_update_after_ecn_echo(struct sctp_tcb *stcb,
2309 struct sctp_nets *net, int in_window, int num_pkt_lost SCTP_UNUSED)
2313 old_cwnd = net->cwnd;
2315 /* JRS - reset hctp as if state changed */
2316 if (in_window == 0) {
2317 htcp_reset(&net->cc_mod.htcp_ca);
2318 SCTP_STAT_INCR(sctps_ecnereducedcwnd);
2319 net->ssthresh = htcp_recalc_ssthresh(net);
2320 if (net->ssthresh < net->mtu) {
2321 net->ssthresh = net->mtu;
2322 /* here back off the timer as well, to slow us down */
2325 net->cwnd = net->ssthresh;
2326 sctp_enforce_cwnd_limit(&stcb->asoc, net);
2327 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
2328 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SAT);
2333 const struct sctp_cc_functions sctp_cc_functions[] = {
2335 .sctp_set_initial_cc_param = sctp_set_initial_cc_param,
2336 .sctp_cwnd_update_after_sack = sctp_cwnd_update_after_sack,
2337 .sctp_cwnd_update_exit_pf = sctp_cwnd_update_exit_pf_common,
2338 .sctp_cwnd_update_after_fr = sctp_cwnd_update_after_fr,
2339 .sctp_cwnd_update_after_timeout = sctp_cwnd_update_after_timeout,
2340 .sctp_cwnd_update_after_ecn_echo = sctp_cwnd_update_after_ecn_echo,
2341 .sctp_cwnd_update_after_packet_dropped = sctp_cwnd_update_after_packet_dropped,
2342 .sctp_cwnd_update_after_output = sctp_cwnd_update_after_output,
2345 .sctp_set_initial_cc_param = sctp_set_initial_cc_param,
2346 .sctp_cwnd_update_after_sack = sctp_hs_cwnd_update_after_sack,
2347 .sctp_cwnd_update_exit_pf = sctp_cwnd_update_exit_pf_common,
2348 .sctp_cwnd_update_after_fr = sctp_hs_cwnd_update_after_fr,
2349 .sctp_cwnd_update_after_timeout = sctp_cwnd_update_after_timeout,
2350 .sctp_cwnd_update_after_ecn_echo = sctp_cwnd_update_after_ecn_echo,
2351 .sctp_cwnd_update_after_packet_dropped = sctp_cwnd_update_after_packet_dropped,
2352 .sctp_cwnd_update_after_output = sctp_cwnd_update_after_output,
2355 .sctp_set_initial_cc_param = sctp_htcp_set_initial_cc_param,
2356 .sctp_cwnd_update_after_sack = sctp_htcp_cwnd_update_after_sack,
2357 .sctp_cwnd_update_exit_pf = sctp_cwnd_update_exit_pf_common,
2358 .sctp_cwnd_update_after_fr = sctp_htcp_cwnd_update_after_fr,
2359 .sctp_cwnd_update_after_timeout = sctp_htcp_cwnd_update_after_timeout,
2360 .sctp_cwnd_update_after_ecn_echo = sctp_htcp_cwnd_update_after_ecn_echo,
2361 .sctp_cwnd_update_after_packet_dropped = sctp_cwnd_update_after_packet_dropped,
2362 .sctp_cwnd_update_after_output = sctp_cwnd_update_after_output,
2365 .sctp_set_initial_cc_param = sctp_set_rtcc_initial_cc_param,
2366 .sctp_cwnd_update_after_sack = sctp_cwnd_update_rtcc_after_sack,
2367 .sctp_cwnd_update_exit_pf = sctp_cwnd_update_exit_pf_common,
2368 .sctp_cwnd_update_after_fr = sctp_cwnd_update_after_fr,
2369 .sctp_cwnd_update_after_timeout = sctp_cwnd_update_after_timeout,
2370 .sctp_cwnd_update_after_ecn_echo = sctp_cwnd_update_rtcc_after_ecn_echo,
2371 .sctp_cwnd_update_after_packet_dropped = sctp_cwnd_update_after_packet_dropped,
2372 .sctp_cwnd_update_after_output = sctp_cwnd_update_after_output,
2373 .sctp_cwnd_update_packet_transmitted = sctp_cwnd_update_rtcc_packet_transmitted,
2374 .sctp_cwnd_update_tsn_acknowledged = sctp_cwnd_update_rtcc_tsn_acknowledged,
2375 .sctp_cwnd_new_transmission_begins = sctp_cwnd_new_rtcc_transmission_begins,
2376 .sctp_cwnd_prepare_net_for_sack = sctp_cwnd_prepare_rtcc_net_for_sack,
2377 .sctp_cwnd_socket_option = sctp_cwnd_rtcc_socket_option,
2378 .sctp_rtt_calculated = sctp_rtt_rtcc_calculated