2 * SPDX-License-Identifier: BSD-3-Clause
4 * Copyright (c) 2001-2007, by Cisco Systems, Inc. All rights reserved.
5 * Copyright (c) 2008-2012, by Randall Stewart. All rights reserved.
6 * Copyright (c) 2008-2012, by Michael Tuexen. All rights reserved.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions are met:
11 * a) Redistributions of source code must retain the above copyright notice,
12 * this list of conditions and the following disclaimer.
14 * b) Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in
16 * the documentation and/or other materials provided with the distribution.
18 * c) Neither the name of Cisco Systems, Inc. nor the names of its
19 * contributors may be used to endorse or promote products derived
20 * from this software without specific prior written permission.
22 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
23 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
24 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
26 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
27 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
28 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
29 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
30 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
31 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
32 * THE POSSIBILITY OF SUCH DAMAGE.
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
38 #include <netinet/sctp_os.h>
39 #include <netinet/sctp_var.h>
40 #include <netinet/sctp_sysctl.h>
41 #include <netinet/sctp_pcb.h>
42 #include <netinet/sctp_header.h>
43 #include <netinet/sctputil.h>
44 #include <netinet/sctp_output.h>
45 #include <netinet/sctp_input.h>
46 #include <netinet/sctp_indata.h>
47 #include <netinet/sctp_uio.h>
48 #include <netinet/sctp_timer.h>
49 #include <netinet/sctp_auth.h>
50 #include <netinet/sctp_asconf.h>
51 #include <netinet/sctp_dtrace_declare.h>
53 #define SHIFT_MPTCP_MULTI_N 40
54 #define SHIFT_MPTCP_MULTI_Z 16
55 #define SHIFT_MPTCP_MULTI 8
58 sctp_enforce_cwnd_limit(struct sctp_association *assoc, struct sctp_nets *net)
60 if ((assoc->max_cwnd > 0) &&
61 (net->cwnd > assoc->max_cwnd) &&
62 (net->cwnd > (net->mtu - sizeof(struct sctphdr)))) {
63 net->cwnd = assoc->max_cwnd;
64 if (net->cwnd < (net->mtu - sizeof(struct sctphdr))) {
65 net->cwnd = net->mtu - sizeof(struct sctphdr);
71 sctp_set_initial_cc_param(struct sctp_tcb *stcb, struct sctp_nets *net)
73 struct sctp_association *assoc;
77 cwnd_in_mtu = SCTP_BASE_SYSCTL(sctp_initial_cwnd);
78 if (cwnd_in_mtu == 0) {
79 /* Using 0 means that the value of RFC 4960 is used. */
80 net->cwnd = min((net->mtu * 4), max((2 * net->mtu), SCTP_INITIAL_CWND));
83 * We take the minimum of the burst limit and the initial
86 if ((assoc->max_burst > 0) && (cwnd_in_mtu > assoc->max_burst))
87 cwnd_in_mtu = assoc->max_burst;
88 net->cwnd = (net->mtu - sizeof(struct sctphdr)) * cwnd_in_mtu;
90 if ((stcb->asoc.sctp_cmt_on_off == SCTP_CMT_RPV1) ||
91 (stcb->asoc.sctp_cmt_on_off == SCTP_CMT_RPV2)) {
92 /* In case of resource pooling initialize appropriately */
93 net->cwnd /= assoc->numnets;
94 if (net->cwnd < (net->mtu - sizeof(struct sctphdr))) {
95 net->cwnd = net->mtu - sizeof(struct sctphdr);
98 sctp_enforce_cwnd_limit(assoc, net);
99 net->ssthresh = assoc->peers_rwnd;
100 SDT_PROBE5(sctp, cwnd, net, init,
101 stcb->asoc.my_vtag, ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), net,
103 if (SCTP_BASE_SYSCTL(sctp_logging_level) &
104 (SCTP_CWND_MONITOR_ENABLE | SCTP_CWND_LOGGING_ENABLE)) {
105 sctp_log_cwnd(stcb, net, 0, SCTP_CWND_INITIALIZATION);
110 sctp_cwnd_update_after_fr(struct sctp_tcb *stcb,
111 struct sctp_association *asoc)
113 struct sctp_nets *net;
114 uint32_t t_ssthresh, t_cwnd;
115 uint64_t t_ucwnd_sbw;
117 /* MT FIXME: Don't compute this over and over again */
121 if ((asoc->sctp_cmt_on_off == SCTP_CMT_RPV1) ||
122 (asoc->sctp_cmt_on_off == SCTP_CMT_RPV2)) {
123 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
124 t_ssthresh += net->ssthresh;
126 if (net->lastsa > 0) {
127 t_ucwnd_sbw += (uint64_t)net->cwnd / (uint64_t)net->lastsa;
130 if (t_ucwnd_sbw == 0) {
136 * CMT fast recovery code. Need to debug. ((sctp_cmt_on_off > 0) &&
137 * (net->fast_retran_loss_recovery == 0)))
139 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
140 if ((asoc->fast_retran_loss_recovery == 0) ||
141 (asoc->sctp_cmt_on_off > 0)) {
142 /* out of a RFC2582 Fast recovery window? */
143 if (net->net_ack > 0) {
145 * per section 7.2.3, are there any
146 * destinations that had a fast retransmit
147 * to them. If so what we need to do is
148 * adjust ssthresh and cwnd.
150 struct sctp_tmit_chunk *lchk;
151 int old_cwnd = net->cwnd;
153 if ((asoc->sctp_cmt_on_off == SCTP_CMT_RPV1) ||
154 (asoc->sctp_cmt_on_off == SCTP_CMT_RPV2)) {
155 if (asoc->sctp_cmt_on_off == SCTP_CMT_RPV1) {
156 net->ssthresh = (uint32_t)(((uint64_t)4 *
158 (uint64_t)net->ssthresh) /
159 (uint64_t)t_ssthresh);
162 if (asoc->sctp_cmt_on_off == SCTP_CMT_RPV2) {
167 * lastsa>>3; we don't need
174 * Short Version => Equal to
177 net->ssthresh = (uint32_t)(((uint64_t)4 *
179 (uint64_t)net->cwnd) /
182 /* INCREASE FACTOR */ ;
184 if ((net->cwnd > t_cwnd / 2) &&
185 (net->ssthresh < net->cwnd - t_cwnd / 2)) {
186 net->ssthresh = net->cwnd - t_cwnd / 2;
188 if (net->ssthresh < net->mtu) {
189 net->ssthresh = net->mtu;
192 net->ssthresh = net->cwnd / 2;
193 if (net->ssthresh < (net->mtu * 2)) {
194 net->ssthresh = 2 * net->mtu;
197 net->cwnd = net->ssthresh;
198 sctp_enforce_cwnd_limit(asoc, net);
199 SDT_PROBE5(sctp, cwnd, net, fr,
200 stcb->asoc.my_vtag, ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), net,
201 old_cwnd, net->cwnd);
202 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
203 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd),
204 SCTP_CWND_LOG_FROM_FR);
206 lchk = TAILQ_FIRST(&asoc->send_queue);
208 net->partial_bytes_acked = 0;
209 /* Turn on fast recovery window */
210 asoc->fast_retran_loss_recovery = 1;
212 /* Mark end of the window */
213 asoc->fast_recovery_tsn = asoc->sending_seq - 1;
215 asoc->fast_recovery_tsn = lchk->rec.data.tsn - 1;
219 * CMT fast recovery -- per destination
222 net->fast_retran_loss_recovery = 1;
225 /* Mark end of the window */
226 net->fast_recovery_tsn = asoc->sending_seq - 1;
228 net->fast_recovery_tsn = lchk->rec.data.tsn - 1;
231 sctp_timer_stop(SCTP_TIMER_TYPE_SEND,
232 stcb->sctp_ep, stcb, net,
233 SCTP_FROM_SCTP_CC_FUNCTIONS + SCTP_LOC_1);
234 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
235 stcb->sctp_ep, stcb, net);
237 } else if (net->net_ack > 0) {
239 * Mark a peg that we WOULD have done a cwnd
240 * reduction but RFC2582 prevented this action.
242 SCTP_STAT_INCR(sctps_fastretransinrtt);
247 /* Defines for instantaneous bw decisions */
248 #define SCTP_INST_LOOSING 1 /* Losing to other flows */
249 #define SCTP_INST_NEUTRAL 2 /* Neutral, no indication */
250 #define SCTP_INST_GAINING 3 /* Gaining, step down possible */
254 cc_bw_same(struct sctp_tcb *stcb, struct sctp_nets *net, uint64_t nbw,
255 uint64_t rtt_offset, uint64_t vtag, uint8_t inst_ind)
257 uint64_t oth, probepoint;
259 probepoint = (((uint64_t)net->cwnd) << 32);
260 if (net->rtt > net->cc_mod.rtcc.lbw_rtt + rtt_offset) {
262 * rtt increased we don't update bw.. so we don't update the
266 probepoint |= ((5 << 16) | 1);
267 SDT_PROBE5(sctp, cwnd, net, rttvar,
269 ((net->cc_mod.rtcc.lbw << 32) | nbw),
270 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
273 if ((net->cc_mod.rtcc.steady_step) && (inst_ind != SCTP_INST_LOOSING)) {
274 if (net->cc_mod.rtcc.last_step_state == 5)
275 net->cc_mod.rtcc.step_cnt++;
277 net->cc_mod.rtcc.step_cnt = 1;
278 net->cc_mod.rtcc.last_step_state = 5;
279 if ((net->cc_mod.rtcc.step_cnt == net->cc_mod.rtcc.steady_step) ||
280 ((net->cc_mod.rtcc.step_cnt > net->cc_mod.rtcc.steady_step) &&
281 ((net->cc_mod.rtcc.step_cnt % net->cc_mod.rtcc.steady_step) == 0))) {
282 /* Try a step down */
283 oth = net->cc_mod.rtcc.vol_reduce;
285 oth |= net->cc_mod.rtcc.step_cnt;
287 oth |= net->cc_mod.rtcc.last_step_state;
288 SDT_PROBE5(sctp, cwnd, net, rttstep,
290 ((net->cc_mod.rtcc.lbw << 32) | nbw),
291 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
294 if (net->cwnd > (4 * net->mtu)) {
295 net->cwnd -= net->mtu;
296 net->cc_mod.rtcc.vol_reduce++;
298 net->cc_mod.rtcc.step_cnt = 0;
304 if (net->rtt < net->cc_mod.rtcc.lbw_rtt - rtt_offset) {
306 * rtt decreased, there could be more room. we update both
307 * the bw and the rtt here to lock this in as a good step
311 probepoint |= ((6 << 16) | 0);
312 SDT_PROBE5(sctp, cwnd, net, rttvar,
314 ((net->cc_mod.rtcc.lbw << 32) | nbw),
315 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
318 if (net->cc_mod.rtcc.steady_step) {
319 oth = net->cc_mod.rtcc.vol_reduce;
321 oth |= net->cc_mod.rtcc.step_cnt;
323 oth |= net->cc_mod.rtcc.last_step_state;
324 SDT_PROBE5(sctp, cwnd, net, rttstep,
326 ((net->cc_mod.rtcc.lbw << 32) | nbw),
327 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
330 if ((net->cc_mod.rtcc.last_step_state == 5) &&
331 (net->cc_mod.rtcc.step_cnt > net->cc_mod.rtcc.steady_step)) {
332 /* Step down worked */
333 net->cc_mod.rtcc.step_cnt = 0;
336 net->cc_mod.rtcc.last_step_state = 6;
337 net->cc_mod.rtcc.step_cnt = 0;
340 net->cc_mod.rtcc.lbw = nbw;
341 net->cc_mod.rtcc.lbw_rtt = net->rtt;
342 net->cc_mod.rtcc.cwnd_at_bw_set = net->cwnd;
343 if (inst_ind == SCTP_INST_GAINING)
345 else if (inst_ind == SCTP_INST_NEUTRAL)
351 * Ok bw and rtt remained the same .. no update to any
354 probepoint |= ((7 << 16) | net->cc_mod.rtcc.ret_from_eq);
355 SDT_PROBE5(sctp, cwnd, net, rttvar,
357 ((net->cc_mod.rtcc.lbw << 32) | nbw),
358 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
361 if ((net->cc_mod.rtcc.steady_step) && (inst_ind != SCTP_INST_LOOSING)) {
362 if (net->cc_mod.rtcc.last_step_state == 5)
363 net->cc_mod.rtcc.step_cnt++;
365 net->cc_mod.rtcc.step_cnt = 1;
366 net->cc_mod.rtcc.last_step_state = 5;
367 if ((net->cc_mod.rtcc.step_cnt == net->cc_mod.rtcc.steady_step) ||
368 ((net->cc_mod.rtcc.step_cnt > net->cc_mod.rtcc.steady_step) &&
369 ((net->cc_mod.rtcc.step_cnt % net->cc_mod.rtcc.steady_step) == 0))) {
370 /* Try a step down */
371 if (net->cwnd > (4 * net->mtu)) {
372 net->cwnd -= net->mtu;
373 net->cc_mod.rtcc.vol_reduce++;
376 net->cc_mod.rtcc.step_cnt = 0;
380 if (inst_ind == SCTP_INST_GAINING)
382 else if (inst_ind == SCTP_INST_NEUTRAL)
385 return ((int)net->cc_mod.rtcc.ret_from_eq);
389 cc_bw_decrease(struct sctp_tcb *stcb, struct sctp_nets *net, uint64_t nbw, uint64_t rtt_offset,
390 uint64_t vtag, uint8_t inst_ind)
392 uint64_t oth, probepoint;
394 /* Bandwidth decreased. */
395 probepoint = (((uint64_t)net->cwnd) << 32);
396 if (net->rtt > net->cc_mod.rtcc.lbw_rtt + rtt_offset) {
398 /* Did we add more */
399 if ((net->cwnd > net->cc_mod.rtcc.cwnd_at_bw_set) &&
400 (inst_ind != SCTP_INST_LOOSING)) {
401 /* We caused it maybe.. back off? */
403 probepoint |= ((1 << 16) | 1);
404 SDT_PROBE5(sctp, cwnd, net, rttvar,
406 ((net->cc_mod.rtcc.lbw << 32) | nbw),
407 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
410 if (net->cc_mod.rtcc.ret_from_eq) {
412 * Switch over to CA if we are less
415 net->ssthresh = net->cwnd - 1;
416 net->partial_bytes_acked = 0;
421 probepoint |= ((2 << 16) | 0);
422 SDT_PROBE5(sctp, cwnd, net, rttvar,
424 ((net->cc_mod.rtcc.lbw << 32) | nbw),
425 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
428 /* Someone else - fight for more? */
429 if (net->cc_mod.rtcc.steady_step) {
430 oth = net->cc_mod.rtcc.vol_reduce;
432 oth |= net->cc_mod.rtcc.step_cnt;
434 oth |= net->cc_mod.rtcc.last_step_state;
435 SDT_PROBE5(sctp, cwnd, net, rttstep,
437 ((net->cc_mod.rtcc.lbw << 32) | nbw),
438 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
442 * Did we voluntarily give up some? if so take one
445 if ((net->cc_mod.rtcc.vol_reduce) &&
446 (inst_ind != SCTP_INST_GAINING)) {
447 net->cwnd += net->mtu;
448 sctp_enforce_cwnd_limit(&stcb->asoc, net);
449 net->cc_mod.rtcc.vol_reduce--;
451 net->cc_mod.rtcc.last_step_state = 2;
452 net->cc_mod.rtcc.step_cnt = 0;
455 } else if (net->rtt < net->cc_mod.rtcc.lbw_rtt - rtt_offset) {
456 /* bw & rtt decreased */
458 probepoint |= ((3 << 16) | 0);
459 SDT_PROBE5(sctp, cwnd, net, rttvar,
461 ((net->cc_mod.rtcc.lbw << 32) | nbw),
462 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
465 if (net->cc_mod.rtcc.steady_step) {
466 oth = net->cc_mod.rtcc.vol_reduce;
468 oth |= net->cc_mod.rtcc.step_cnt;
470 oth |= net->cc_mod.rtcc.last_step_state;
471 SDT_PROBE5(sctp, cwnd, net, rttstep,
473 ((net->cc_mod.rtcc.lbw << 32) | nbw),
474 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
477 if ((net->cc_mod.rtcc.vol_reduce) &&
478 (inst_ind != SCTP_INST_GAINING)) {
479 net->cwnd += net->mtu;
480 sctp_enforce_cwnd_limit(&stcb->asoc, net);
481 net->cc_mod.rtcc.vol_reduce--;
483 net->cc_mod.rtcc.last_step_state = 3;
484 net->cc_mod.rtcc.step_cnt = 0;
488 /* The bw decreased but rtt stayed the same */
490 probepoint |= ((4 << 16) | 0);
491 SDT_PROBE5(sctp, cwnd, net, rttvar,
493 ((net->cc_mod.rtcc.lbw << 32) | nbw),
494 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
497 if (net->cc_mod.rtcc.steady_step) {
498 oth = net->cc_mod.rtcc.vol_reduce;
500 oth |= net->cc_mod.rtcc.step_cnt;
502 oth |= net->cc_mod.rtcc.last_step_state;
503 SDT_PROBE5(sctp, cwnd, net, rttstep,
505 ((net->cc_mod.rtcc.lbw << 32) | nbw),
506 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
509 if ((net->cc_mod.rtcc.vol_reduce) &&
510 (inst_ind != SCTP_INST_GAINING)) {
511 net->cwnd += net->mtu;
512 sctp_enforce_cwnd_limit(&stcb->asoc, net);
513 net->cc_mod.rtcc.vol_reduce--;
515 net->cc_mod.rtcc.last_step_state = 4;
516 net->cc_mod.rtcc.step_cnt = 0;
519 net->cc_mod.rtcc.lbw = nbw;
520 net->cc_mod.rtcc.lbw_rtt = net->rtt;
521 net->cc_mod.rtcc.cwnd_at_bw_set = net->cwnd;
522 if (inst_ind == SCTP_INST_GAINING) {
530 cc_bw_increase(struct sctp_tcb *stcb, struct sctp_nets *net, uint64_t nbw, uint64_t vtag)
532 uint64_t oth, probepoint;
535 * BW increased, so update and return 0, since all actions in our
536 * table say to do the normal CC update. Note that we pay no
537 * attention to the inst_ind since our overall sum is increasing.
540 probepoint = (((uint64_t)net->cwnd) << 32);
541 SDT_PROBE5(sctp, cwnd, net, rttvar,
543 ((net->cc_mod.rtcc.lbw << 32) | nbw),
544 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
547 if (net->cc_mod.rtcc.steady_step) {
548 oth = net->cc_mod.rtcc.vol_reduce;
550 oth |= net->cc_mod.rtcc.step_cnt;
552 oth |= net->cc_mod.rtcc.last_step_state;
553 SDT_PROBE5(sctp, cwnd, net, rttstep,
555 ((net->cc_mod.rtcc.lbw << 32) | nbw),
556 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
559 net->cc_mod.rtcc.last_step_state = 0;
560 net->cc_mod.rtcc.step_cnt = 0;
561 net->cc_mod.rtcc.vol_reduce = 0;
563 net->cc_mod.rtcc.lbw = nbw;
564 net->cc_mod.rtcc.lbw_rtt = net->rtt;
565 net->cc_mod.rtcc.cwnd_at_bw_set = net->cwnd;
569 /* RTCC Algorithm to limit growth of cwnd, return
570 * true if you want to NOT allow cwnd growth
573 cc_bw_limit(struct sctp_tcb *stcb, struct sctp_nets *net, uint64_t nbw)
575 uint64_t bw_offset, rtt_offset;
576 uint64_t probepoint, rtt, vtag;
577 uint64_t bytes_for_this_rtt, inst_bw;
578 uint64_t div, inst_off;
584 * Here we need to see if we want
585 * to limit cwnd growth due to increase
586 * in overall rtt but no increase in bw.
587 * We use the following table to figure
588 * out what we should do. When we return
589 * 0, cc update goes on as planned. If we
590 * return 1, then no cc update happens and cwnd
591 * stays where it is at.
592 * ----------------------------------
594 * *********************************
595 * INC | INC | return 0
596 * ----------------------------------
597 * INC | SAME | return 0
598 * ----------------------------------
599 * INC | DECR | return 0
600 * ----------------------------------
601 * SAME | INC | return 1
602 * ----------------------------------
603 * SAME | SAME | return 1
604 * ----------------------------------
605 * SAME | DECR | return 0
606 * ----------------------------------
607 * DECR | INC | return 0 or 1 based on if we caused.
608 * ----------------------------------
609 * DECR | SAME | return 0
610 * ----------------------------------
611 * DECR | DECR | return 0
612 * ----------------------------------
614 * We are a bit fuzz on what an increase or
615 * decrease is. For BW it is the same if
616 * it did not change within 1/64th. For
617 * RTT it stayed the same if it did not
618 * change within 1/32nd
620 bw_shift = SCTP_BASE_SYSCTL(sctp_rttvar_bw);
621 rtt = stcb->asoc.my_vtag;
622 vtag = (rtt << 32) | (((uint32_t)(stcb->sctp_ep->sctp_lport)) << 16) | (stcb->rport);
623 probepoint = (((uint64_t)net->cwnd) << 32);
625 if (net->cc_mod.rtcc.rtt_set_this_sack) {
626 net->cc_mod.rtcc.rtt_set_this_sack = 0;
627 bytes_for_this_rtt = net->cc_mod.rtcc.bw_bytes - net->cc_mod.rtcc.bw_bytes_at_last_rttc;
628 net->cc_mod.rtcc.bw_bytes_at_last_rttc = net->cc_mod.rtcc.bw_bytes;
630 div = net->rtt / 1000;
632 inst_bw = bytes_for_this_rtt / div;
633 inst_off = inst_bw >> bw_shift;
635 inst_ind = SCTP_INST_GAINING;
636 else if ((inst_bw + inst_off) < nbw)
637 inst_ind = SCTP_INST_LOOSING;
639 inst_ind = SCTP_INST_NEUTRAL;
640 probepoint |= ((0xb << 16) | inst_ind);
642 inst_ind = net->cc_mod.rtcc.last_inst_ind;
643 inst_bw = bytes_for_this_rtt / (uint64_t)(net->rtt);
644 /* Can't determine do not change */
645 probepoint |= ((0xc << 16) | inst_ind);
648 inst_ind = net->cc_mod.rtcc.last_inst_ind;
649 inst_bw = bytes_for_this_rtt;
650 /* Can't determine do not change */
651 probepoint |= ((0xd << 16) | inst_ind);
653 SDT_PROBE5(sctp, cwnd, net, rttvar,
655 ((nbw << 32) | inst_bw),
656 ((net->cc_mod.rtcc.lbw_rtt << 32) | rtt),
660 /* No rtt measurement, use last one */
661 inst_ind = net->cc_mod.rtcc.last_inst_ind;
663 bw_offset = net->cc_mod.rtcc.lbw >> bw_shift;
664 if (nbw > net->cc_mod.rtcc.lbw + bw_offset) {
665 ret = cc_bw_increase(stcb, net, nbw, vtag);
668 rtt_offset = net->cc_mod.rtcc.lbw_rtt >> SCTP_BASE_SYSCTL(sctp_rttvar_rtt);
669 if (nbw < net->cc_mod.rtcc.lbw - bw_offset) {
670 ret = cc_bw_decrease(stcb, net, nbw, rtt_offset, vtag, inst_ind);
674 * If we reach here then we are in a situation where the bw stayed
677 ret = cc_bw_same(stcb, net, nbw, rtt_offset, vtag, inst_ind);
679 net->cc_mod.rtcc.last_inst_ind = inst_ind;
684 sctp_cwnd_update_after_sack_common(struct sctp_tcb *stcb,
685 struct sctp_association *asoc,
686 int accum_moved, int reneged_all SCTP_UNUSED, int will_exit, int use_rtcc)
688 struct sctp_nets *net;
690 uint32_t t_ssthresh, t_cwnd, incr;
691 uint64_t t_ucwnd_sbw;
692 uint64_t t_path_mptcp;
693 uint64_t mptcp_like_alpha;
697 /* MT FIXME: Don't compute this over and over again */
702 mptcp_like_alpha = 1;
703 if ((stcb->asoc.sctp_cmt_on_off == SCTP_CMT_RPV1) ||
704 (stcb->asoc.sctp_cmt_on_off == SCTP_CMT_RPV2) ||
705 (stcb->asoc.sctp_cmt_on_off == SCTP_CMT_MPTCP)) {
707 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
708 t_ssthresh += net->ssthresh;
710 /* lastsa>>3; we don't need to devide ... */
715 t_ucwnd_sbw += (uint64_t)net->cwnd / (uint64_t)srtt;
716 t_path_mptcp += (((uint64_t)net->cwnd) << SHIFT_MPTCP_MULTI_Z) /
717 (((uint64_t)net->mtu) * (uint64_t)srtt);
718 tmp = (((uint64_t)net->cwnd) << SHIFT_MPTCP_MULTI_N) /
719 ((uint64_t)net->mtu * (uint64_t)(srtt * srtt));
720 if (tmp > max_path) {
725 if (t_path_mptcp > 0) {
726 mptcp_like_alpha = max_path / (t_path_mptcp * t_path_mptcp);
728 mptcp_like_alpha = 1;
731 if (t_ssthresh == 0) {
734 if (t_ucwnd_sbw == 0) {
737 /******************************/
738 /* update cwnd and Early FR */
739 /******************************/
740 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
742 #ifdef JANA_CMT_FAST_RECOVERY
744 * CMT fast recovery code. Need to debug.
746 if (net->fast_retran_loss_recovery && net->new_pseudo_cumack) {
747 if (SCTP_TSN_GE(asoc->last_acked_seq, net->fast_recovery_tsn) ||
748 SCTP_TSN_GE(net->pseudo_cumack, net->fast_recovery_tsn)) {
749 net->will_exit_fast_recovery = 1;
753 /* if nothing was acked on this destination skip it */
754 if (net->net_ack == 0) {
755 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
756 sctp_log_cwnd(stcb, net, 0, SCTP_CWND_LOG_FROM_SACK);
760 #ifdef JANA_CMT_FAST_RECOVERY
762 * CMT fast recovery code
765 * if (sctp_cmt_on_off > 0 && net->fast_retran_loss_recovery
766 * && net->will_exit_fast_recovery == 0) { @@@ Do something
767 * } else if (sctp_cmt_on_off == 0 &&
768 * asoc->fast_retran_loss_recovery && will_exit == 0) {
772 if (asoc->fast_retran_loss_recovery &&
774 (asoc->sctp_cmt_on_off == 0)) {
776 * If we are in loss recovery we skip any cwnd
782 * Did any measurements go on for this network?
784 if (use_rtcc && (net->cc_mod.rtcc.tls_needs_set > 0)) {
788 * At this point our bw_bytes has been updated by
789 * incoming sack information.
791 * But our bw may not yet be set.
794 if ((net->cc_mod.rtcc.new_tot_time / 1000) > 0) {
795 nbw = net->cc_mod.rtcc.bw_bytes / (net->cc_mod.rtcc.new_tot_time / 1000);
797 nbw = net->cc_mod.rtcc.bw_bytes;
799 if (net->cc_mod.rtcc.lbw) {
800 if (cc_bw_limit(stcb, net, nbw)) {
801 /* Hold here, no update */
805 uint64_t vtag, probepoint;
807 probepoint = (((uint64_t)net->cwnd) << 32);
808 probepoint |= ((0xa << 16) | 0);
809 vtag = (net->rtt << 32) |
810 (((uint32_t)(stcb->sctp_ep->sctp_lport)) << 16) |
813 SDT_PROBE5(sctp, cwnd, net, rttvar,
816 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
819 net->cc_mod.rtcc.lbw = nbw;
820 net->cc_mod.rtcc.lbw_rtt = net->rtt;
821 if (net->cc_mod.rtcc.rtt_set_this_sack) {
822 net->cc_mod.rtcc.rtt_set_this_sack = 0;
823 net->cc_mod.rtcc.bw_bytes_at_last_rttc = net->cc_mod.rtcc.bw_bytes;
828 * CMT: CUC algorithm. Update cwnd if pseudo-cumack has
832 ((asoc->sctp_cmt_on_off > 0) && net->new_pseudo_cumack)) {
833 /* If the cumulative ack moved we can proceed */
834 if (net->cwnd <= net->ssthresh) {
835 /* We are in slow start */
836 if (net->flight_size + net->net_ack >= net->cwnd) {
839 old_cwnd = net->cwnd;
840 switch (asoc->sctp_cmt_on_off) {
842 limit = (uint32_t)(((uint64_t)net->mtu *
843 (uint64_t)SCTP_BASE_SYSCTL(sctp_L2_abc_variable) *
844 (uint64_t)net->ssthresh) /
845 (uint64_t)t_ssthresh);
846 incr = (uint32_t)(((uint64_t)net->net_ack *
847 (uint64_t)net->ssthresh) /
848 (uint64_t)t_ssthresh);
858 * lastsa>>3; we don't need
865 limit = (uint32_t)(((uint64_t)net->mtu *
866 (uint64_t)SCTP_BASE_SYSCTL(sctp_L2_abc_variable) *
867 (uint64_t)net->cwnd) /
868 ((uint64_t)srtt * t_ucwnd_sbw));
869 /* INCREASE FACTOR */
870 incr = (uint32_t)(((uint64_t)net->net_ack *
871 (uint64_t)net->cwnd) /
872 ((uint64_t)srtt * t_ucwnd_sbw));
873 /* INCREASE FACTOR */
882 limit = (uint32_t)(((uint64_t)net->mtu *
884 (uint64_t)SCTP_BASE_SYSCTL(sctp_L2_abc_variable)) >>
886 incr = (uint32_t)(((uint64_t)net->net_ack *
892 if (incr > net->net_ack) {
895 if (incr > net->mtu) {
901 if (incr > net->mtu * SCTP_BASE_SYSCTL(sctp_L2_abc_variable)) {
902 incr = net->mtu * SCTP_BASE_SYSCTL(sctp_L2_abc_variable);
907 sctp_enforce_cwnd_limit(asoc, net);
908 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
909 sctp_log_cwnd(stcb, net, incr,
910 SCTP_CWND_LOG_FROM_SS);
912 SDT_PROBE5(sctp, cwnd, net, ack,
914 ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)),
916 old_cwnd, net->cwnd);
918 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
919 sctp_log_cwnd(stcb, net, net->net_ack,
920 SCTP_CWND_LOG_NOADV_SS);
924 /* We are in congestion avoidance */
928 net->partial_bytes_acked += net->net_ack;
930 if ((net->flight_size + net->net_ack >= net->cwnd) &&
931 (net->partial_bytes_acked >= net->cwnd)) {
932 net->partial_bytes_acked -= net->cwnd;
933 old_cwnd = net->cwnd;
934 switch (asoc->sctp_cmt_on_off) {
936 incr = (uint32_t)(((uint64_t)net->mtu *
937 (uint64_t)net->ssthresh) /
938 (uint64_t)t_ssthresh);
945 * lastsa>>3; we don't need
952 incr = (uint32_t)((uint64_t)net->mtu *
953 (uint64_t)net->cwnd /
956 /* INCREASE FACTOR */
962 incr = (uint32_t)((mptcp_like_alpha *
963 (uint64_t)net->cwnd) >>
965 if (incr > net->mtu) {
974 sctp_enforce_cwnd_limit(asoc, net);
975 SDT_PROBE5(sctp, cwnd, net, ack,
977 ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)),
979 old_cwnd, net->cwnd);
980 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
981 sctp_log_cwnd(stcb, net, net->mtu,
982 SCTP_CWND_LOG_FROM_CA);
985 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
986 sctp_log_cwnd(stcb, net, net->net_ack,
987 SCTP_CWND_LOG_NOADV_CA);
992 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
993 sctp_log_cwnd(stcb, net, net->mtu,
994 SCTP_CWND_LOG_NO_CUMACK);
1001 sctp_cwnd_update_exit_pf_common(struct sctp_tcb *stcb, struct sctp_nets *net)
1005 old_cwnd = net->cwnd;
1006 net->cwnd = net->mtu;
1007 SDT_PROBE5(sctp, cwnd, net, ack,
1008 stcb->asoc.my_vtag, ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)), net,
1009 old_cwnd, net->cwnd);
1010 SCTPDBG(SCTP_DEBUG_INDATA1, "Destination %p moved from PF to reachable with cwnd %d.\n",
1011 (void *)net, net->cwnd);
1016 sctp_cwnd_update_after_timeout(struct sctp_tcb *stcb, struct sctp_nets *net)
1018 int old_cwnd = net->cwnd;
1019 uint32_t t_ssthresh, t_cwnd;
1020 uint64_t t_ucwnd_sbw;
1022 /* MT FIXME: Don't compute this over and over again */
1025 if ((stcb->asoc.sctp_cmt_on_off == SCTP_CMT_RPV1) ||
1026 (stcb->asoc.sctp_cmt_on_off == SCTP_CMT_RPV2)) {
1027 struct sctp_nets *lnet;
1031 TAILQ_FOREACH(lnet, &stcb->asoc.nets, sctp_next) {
1032 t_ssthresh += lnet->ssthresh;
1033 t_cwnd += lnet->cwnd;
1034 srtt = lnet->lastsa;
1035 /* lastsa>>3; we don't need to divide ... */
1037 t_ucwnd_sbw += (uint64_t)lnet->cwnd / (uint64_t)srtt;
1040 if (t_ssthresh < 1) {
1043 if (t_ucwnd_sbw < 1) {
1046 if (stcb->asoc.sctp_cmt_on_off == SCTP_CMT_RPV1) {
1047 net->ssthresh = (uint32_t)(((uint64_t)4 *
1048 (uint64_t)net->mtu *
1049 (uint64_t)net->ssthresh) /
1050 (uint64_t)t_ssthresh);
1055 /* lastsa>>3; we don't need to divide ... */
1059 cc_delta = t_ucwnd_sbw * (uint64_t)srtt / 2;
1060 if (cc_delta < t_cwnd) {
1061 net->ssthresh = (uint32_t)((uint64_t)t_cwnd - cc_delta);
1063 net->ssthresh = net->mtu;
1066 if ((net->cwnd > t_cwnd / 2) &&
1067 (net->ssthresh < net->cwnd - t_cwnd / 2)) {
1068 net->ssthresh = net->cwnd - t_cwnd / 2;
1070 if (net->ssthresh < net->mtu) {
1071 net->ssthresh = net->mtu;
1074 net->ssthresh = max(net->cwnd / 2, 4 * net->mtu);
1076 net->cwnd = net->mtu;
1077 net->partial_bytes_acked = 0;
1078 SDT_PROBE5(sctp, cwnd, net, to,
1080 ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)),
1082 old_cwnd, net->cwnd);
1083 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
1084 sctp_log_cwnd(stcb, net, net->cwnd - old_cwnd, SCTP_CWND_LOG_FROM_RTX);
1089 sctp_cwnd_update_after_ecn_echo_common(struct sctp_tcb *stcb, struct sctp_nets *net,
1090 int in_window, int num_pkt_lost, int use_rtcc)
1092 int old_cwnd = net->cwnd;
1094 if ((use_rtcc) && (net->lan_type == SCTP_LAN_LOCAL) && (net->cc_mod.rtcc.use_dccc_ecn)) {
1095 /* Data center Congestion Control */
1096 if (in_window == 0) {
1098 * Go to CA with the cwnd at the point we sent the
1099 * TSN that was marked with a CE.
1101 if (net->ecn_prev_cwnd < net->cwnd) {
1102 /* Restore to prev cwnd */
1103 net->cwnd = net->ecn_prev_cwnd - (net->mtu * num_pkt_lost);
1105 /* Just cut in 1/2 */
1109 net->ssthresh = net->cwnd - (num_pkt_lost * net->mtu);
1110 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
1111 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SAT);
1115 * Further tuning down required over the drastic
1118 net->ssthresh -= (net->mtu * num_pkt_lost);
1119 net->cwnd -= (net->mtu * num_pkt_lost);
1120 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
1121 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SAT);
1125 SCTP_STAT_INCR(sctps_ecnereducedcwnd);
1127 if (in_window == 0) {
1128 SCTP_STAT_INCR(sctps_ecnereducedcwnd);
1129 net->ssthresh = net->cwnd / 2;
1130 if (net->ssthresh < net->mtu) {
1131 net->ssthresh = net->mtu;
1133 * here back off the timer as well, to slow
1138 net->cwnd = net->ssthresh;
1139 SDT_PROBE5(sctp, cwnd, net, ecn,
1141 ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)),
1143 old_cwnd, net->cwnd);
1144 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
1145 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SAT);
1153 sctp_cwnd_update_after_packet_dropped(struct sctp_tcb *stcb,
1154 struct sctp_nets *net, struct sctp_pktdrop_chunk *cp,
1155 uint32_t *bottle_bw, uint32_t *on_queue)
1159 int old_cwnd = net->cwnd;
1161 /* get bottle neck bw */
1162 *bottle_bw = ntohl(cp->bottle_bw);
1163 /* and whats on queue */
1164 *on_queue = ntohl(cp->current_onq);
1166 * adjust the on-queue if our flight is more it could be that the
1167 * router has not yet gotten data "in-flight" to it
1169 if (*on_queue < net->flight_size) {
1170 *on_queue = net->flight_size;
1172 /* rtt is measured in micro seconds, bottle_bw in bytes per second */
1173 bw_avail = (uint32_t)(((uint64_t)(*bottle_bw) * net->rtt) / (uint64_t)1000000);
1174 if (bw_avail > *bottle_bw) {
1176 * Cap the growth to no more than the bottle neck. This can
1177 * happen as RTT slides up due to queues. It also means if
1178 * you have more than a 1 second RTT with a empty queue you
1179 * will be limited to the bottle_bw per second no matter if
1180 * other points have 1/2 the RTT and you could get more
1183 bw_avail = *bottle_bw;
1185 if (*on_queue > bw_avail) {
1187 * No room for anything else don't allow anything else to be
1188 * "added to the fire".
1190 int seg_inflight, seg_onqueue, my_portion;
1192 net->partial_bytes_acked = 0;
1193 /* how much are we over queue size? */
1194 incr = *on_queue - bw_avail;
1195 if (stcb->asoc.seen_a_sack_this_pkt) {
1197 * undo any cwnd adjustment that the sack might have
1200 net->cwnd = net->prev_cwnd;
1202 /* Now how much of that is mine? */
1203 seg_inflight = net->flight_size / net->mtu;
1204 seg_onqueue = *on_queue / net->mtu;
1205 my_portion = (incr * seg_inflight) / seg_onqueue;
1207 /* Have I made an adjustment already */
1208 if (net->cwnd > net->flight_size) {
1210 * for this flight I made an adjustment we need to
1211 * decrease the portion by a share our previous
1216 diff_adj = net->cwnd - net->flight_size;
1217 if (diff_adj > my_portion)
1220 my_portion -= diff_adj;
1223 * back down to the previous cwnd (assume we have had a sack
1224 * before this packet). minus what ever portion of the
1225 * overage is my fault.
1227 net->cwnd -= my_portion;
1229 /* we will NOT back down more than 1 MTU */
1230 if (net->cwnd <= net->mtu) {
1231 net->cwnd = net->mtu;
1234 net->ssthresh = net->cwnd - 1;
1237 * Take 1/4 of the space left or max burst up .. whichever
1240 incr = (bw_avail - *on_queue) >> 2;
1241 if ((stcb->asoc.max_burst > 0) &&
1242 (stcb->asoc.max_burst * net->mtu < incr)) {
1243 incr = stcb->asoc.max_burst * net->mtu;
1247 if (net->cwnd > bw_avail) {
1248 /* We can't exceed the pipe size */
1249 net->cwnd = bw_avail;
1251 if (net->cwnd < net->mtu) {
1252 /* We always have 1 MTU */
1253 net->cwnd = net->mtu;
1255 sctp_enforce_cwnd_limit(&stcb->asoc, net);
1256 if (net->cwnd - old_cwnd != 0) {
1257 /* log only changes */
1258 SDT_PROBE5(sctp, cwnd, net, pd,
1260 ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)),
1262 old_cwnd, net->cwnd);
1263 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
1264 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd),
1265 SCTP_CWND_LOG_FROM_SAT);
1271 sctp_cwnd_update_after_output(struct sctp_tcb *stcb,
1272 struct sctp_nets *net, int burst_limit)
1274 int old_cwnd = net->cwnd;
1276 if (net->ssthresh < net->cwnd)
1277 net->ssthresh = net->cwnd;
1279 net->cwnd = (net->flight_size + (burst_limit * net->mtu));
1280 sctp_enforce_cwnd_limit(&stcb->asoc, net);
1281 SDT_PROBE5(sctp, cwnd, net, bl,
1283 ((stcb->sctp_ep->sctp_lport << 16) | (stcb->rport)),
1285 old_cwnd, net->cwnd);
1286 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
1287 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_BRST);
1293 sctp_cwnd_update_after_sack(struct sctp_tcb *stcb,
1294 struct sctp_association *asoc,
1295 int accum_moved, int reneged_all, int will_exit)
1297 /* Passing a zero argument in last disables the rtcc algorithm */
1298 sctp_cwnd_update_after_sack_common(stcb, asoc, accum_moved, reneged_all, will_exit, 0);
1302 sctp_cwnd_update_after_ecn_echo(struct sctp_tcb *stcb, struct sctp_nets *net,
1303 int in_window, int num_pkt_lost)
1305 /* Passing a zero argument in last disables the rtcc algorithm */
1306 sctp_cwnd_update_after_ecn_echo_common(stcb, net, in_window, num_pkt_lost, 0);
1309 /* Here starts the RTCCVAR type CC invented by RRS which
1310 * is a slight mod to RFC2581. We reuse a common routine or
1311 * two since these algorithms are so close and need to
1315 sctp_cwnd_update_rtcc_after_ecn_echo(struct sctp_tcb *stcb, struct sctp_nets *net,
1316 int in_window, int num_pkt_lost)
1318 sctp_cwnd_update_after_ecn_echo_common(stcb, net, in_window, num_pkt_lost, 1);
1324 sctp_cwnd_update_rtcc_tsn_acknowledged(struct sctp_nets *net,
1325 struct sctp_tmit_chunk *tp1)
1327 net->cc_mod.rtcc.bw_bytes += tp1->send_size;
1331 sctp_cwnd_prepare_rtcc_net_for_sack(struct sctp_tcb *stcb SCTP_UNUSED,
1332 struct sctp_nets *net)
1334 if (net->cc_mod.rtcc.tls_needs_set > 0) {
1335 /* We had a bw measurment going on */
1336 struct timeval ltls;
1338 SCTP_GETPTIME_TIMEVAL(<ls);
1339 timevalsub(<ls, &net->cc_mod.rtcc.tls);
1340 net->cc_mod.rtcc.new_tot_time = (ltls.tv_sec * 1000000) + ltls.tv_usec;
1345 sctp_cwnd_new_rtcc_transmission_begins(struct sctp_tcb *stcb,
1346 struct sctp_nets *net)
1348 uint64_t vtag, probepoint;
1350 if (net->cc_mod.rtcc.lbw) {
1351 /* Clear the old bw.. we went to 0 in-flight */
1352 vtag = (net->rtt << 32) | (((uint32_t)(stcb->sctp_ep->sctp_lport)) << 16) |
1354 probepoint = (((uint64_t)net->cwnd) << 32);
1356 probepoint |= ((8 << 16) | 0);
1357 SDT_PROBE5(sctp, cwnd, net, rttvar,
1359 ((net->cc_mod.rtcc.lbw << 32) | 0),
1360 ((net->cc_mod.rtcc.lbw_rtt << 32) | net->rtt),
1363 net->cc_mod.rtcc.lbw_rtt = 0;
1364 net->cc_mod.rtcc.cwnd_at_bw_set = 0;
1365 net->cc_mod.rtcc.lbw = 0;
1366 net->cc_mod.rtcc.bw_bytes_at_last_rttc = 0;
1367 net->cc_mod.rtcc.vol_reduce = 0;
1368 net->cc_mod.rtcc.bw_tot_time = 0;
1369 net->cc_mod.rtcc.bw_bytes = 0;
1370 net->cc_mod.rtcc.tls_needs_set = 0;
1371 if (net->cc_mod.rtcc.steady_step) {
1372 net->cc_mod.rtcc.vol_reduce = 0;
1373 net->cc_mod.rtcc.step_cnt = 0;
1374 net->cc_mod.rtcc.last_step_state = 0;
1376 if (net->cc_mod.rtcc.ret_from_eq) {
1377 /* less aggressive one - reset cwnd too */
1378 uint32_t cwnd_in_mtu, cwnd;
1380 cwnd_in_mtu = SCTP_BASE_SYSCTL(sctp_initial_cwnd);
1381 if (cwnd_in_mtu == 0) {
1383 * Using 0 means that the value of RFC 4960
1386 cwnd = min((net->mtu * 4), max((2 * net->mtu), SCTP_INITIAL_CWND));
1389 * We take the minimum of the burst limit
1390 * and the initial congestion window.
1392 if ((stcb->asoc.max_burst > 0) && (cwnd_in_mtu > stcb->asoc.max_burst))
1393 cwnd_in_mtu = stcb->asoc.max_burst;
1394 cwnd = (net->mtu - sizeof(struct sctphdr)) * cwnd_in_mtu;
1396 if (net->cwnd > cwnd) {
1398 * Only set if we are not a timeout (i.e.
1408 sctp_set_rtcc_initial_cc_param(struct sctp_tcb *stcb,
1409 struct sctp_nets *net)
1411 uint64_t vtag, probepoint;
1413 sctp_set_initial_cc_param(stcb, net);
1414 stcb->asoc.use_precise_time = 1;
1415 probepoint = (((uint64_t)net->cwnd) << 32);
1416 probepoint |= ((9 << 16) | 0);
1417 vtag = (net->rtt << 32) |
1418 (((uint32_t)(stcb->sctp_ep->sctp_lport)) << 16) |
1420 SDT_PROBE5(sctp, cwnd, net, rttvar,
1426 net->cc_mod.rtcc.lbw_rtt = 0;
1427 net->cc_mod.rtcc.cwnd_at_bw_set = 0;
1428 net->cc_mod.rtcc.vol_reduce = 0;
1429 net->cc_mod.rtcc.lbw = 0;
1430 net->cc_mod.rtcc.vol_reduce = 0;
1431 net->cc_mod.rtcc.bw_bytes_at_last_rttc = 0;
1432 net->cc_mod.rtcc.bw_tot_time = 0;
1433 net->cc_mod.rtcc.bw_bytes = 0;
1434 net->cc_mod.rtcc.tls_needs_set = 0;
1435 net->cc_mod.rtcc.ret_from_eq = SCTP_BASE_SYSCTL(sctp_rttvar_eqret);
1436 net->cc_mod.rtcc.steady_step = SCTP_BASE_SYSCTL(sctp_steady_step);
1437 net->cc_mod.rtcc.use_dccc_ecn = SCTP_BASE_SYSCTL(sctp_use_dccc_ecn);
1438 net->cc_mod.rtcc.step_cnt = 0;
1439 net->cc_mod.rtcc.last_step_state = 0;
1445 sctp_cwnd_rtcc_socket_option(struct sctp_tcb *stcb, int setorget,
1446 struct sctp_cc_option *cc_opt)
1448 struct sctp_nets *net;
1450 if (setorget == 1) {
1452 if (cc_opt->option == SCTP_CC_OPT_RTCC_SETMODE) {
1453 if ((cc_opt->aid_value.assoc_value != 0) &&
1454 (cc_opt->aid_value.assoc_value != 1)) {
1457 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
1458 net->cc_mod.rtcc.ret_from_eq = cc_opt->aid_value.assoc_value;
1460 } else if (cc_opt->option == SCTP_CC_OPT_USE_DCCC_ECN) {
1461 if ((cc_opt->aid_value.assoc_value != 0) &&
1462 (cc_opt->aid_value.assoc_value != 1)) {
1465 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
1466 net->cc_mod.rtcc.use_dccc_ecn = cc_opt->aid_value.assoc_value;
1468 } else if (cc_opt->option == SCTP_CC_OPT_STEADY_STEP) {
1469 TAILQ_FOREACH(net, &stcb->asoc.nets, sctp_next) {
1470 net->cc_mod.rtcc.steady_step = cc_opt->aid_value.assoc_value;
1477 if (cc_opt->option == SCTP_CC_OPT_RTCC_SETMODE) {
1478 net = TAILQ_FIRST(&stcb->asoc.nets);
1482 cc_opt->aid_value.assoc_value = net->cc_mod.rtcc.ret_from_eq;
1483 } else if (cc_opt->option == SCTP_CC_OPT_USE_DCCC_ECN) {
1484 net = TAILQ_FIRST(&stcb->asoc.nets);
1488 cc_opt->aid_value.assoc_value = net->cc_mod.rtcc.use_dccc_ecn;
1489 } else if (cc_opt->option == SCTP_CC_OPT_STEADY_STEP) {
1490 net = TAILQ_FIRST(&stcb->asoc.nets);
1494 cc_opt->aid_value.assoc_value = net->cc_mod.rtcc.steady_step;
1503 sctp_cwnd_update_rtcc_packet_transmitted(struct sctp_tcb *stcb SCTP_UNUSED,
1504 struct sctp_nets *net)
1506 if (net->cc_mod.rtcc.tls_needs_set == 0) {
1507 SCTP_GETPTIME_TIMEVAL(&net->cc_mod.rtcc.tls);
1508 net->cc_mod.rtcc.tls_needs_set = 2;
1513 sctp_cwnd_update_rtcc_after_sack(struct sctp_tcb *stcb,
1514 struct sctp_association *asoc,
1515 int accum_moved, int reneged_all, int will_exit)
1517 /* Passing a one argument at the last enables the rtcc algorithm */
1518 sctp_cwnd_update_after_sack_common(stcb, asoc, accum_moved, reneged_all, will_exit, 1);
1522 sctp_rtt_rtcc_calculated(struct sctp_tcb *stcb SCTP_UNUSED,
1523 struct sctp_nets *net,
1524 struct timeval *now SCTP_UNUSED)
1526 net->cc_mod.rtcc.rtt_set_this_sack = 1;
1529 /* Here starts Sally Floyds HS-TCP */
1531 struct sctp_hs_raise_drop {
1534 int8_t drop_percent;
1537 #define SCTP_HS_TABLE_SIZE 73
1539 static const struct sctp_hs_raise_drop sctp_cwnd_adjust[SCTP_HS_TABLE_SIZE] = {
1540 {38, 1, 50}, /* 0 */
1541 {118, 2, 44}, /* 1 */
1542 {221, 3, 41}, /* 2 */
1543 {347, 4, 38}, /* 3 */
1544 {495, 5, 37}, /* 4 */
1545 {663, 6, 35}, /* 5 */
1546 {851, 7, 34}, /* 6 */
1547 {1058, 8, 33}, /* 7 */
1548 {1284, 9, 32}, /* 8 */
1549 {1529, 10, 31}, /* 9 */
1550 {1793, 11, 30}, /* 10 */
1551 {2076, 12, 29}, /* 11 */
1552 {2378, 13, 28}, /* 12 */
1553 {2699, 14, 28}, /* 13 */
1554 {3039, 15, 27}, /* 14 */
1555 {3399, 16, 27}, /* 15 */
1556 {3778, 17, 26}, /* 16 */
1557 {4177, 18, 26}, /* 17 */
1558 {4596, 19, 25}, /* 18 */
1559 {5036, 20, 25}, /* 19 */
1560 {5497, 21, 24}, /* 20 */
1561 {5979, 22, 24}, /* 21 */
1562 {6483, 23, 23}, /* 22 */
1563 {7009, 24, 23}, /* 23 */
1564 {7558, 25, 22}, /* 24 */
1565 {8130, 26, 22}, /* 25 */
1566 {8726, 27, 22}, /* 26 */
1567 {9346, 28, 21}, /* 27 */
1568 {9991, 29, 21}, /* 28 */
1569 {10661, 30, 21}, /* 29 */
1570 {11358, 31, 20}, /* 30 */
1571 {12082, 32, 20}, /* 31 */
1572 {12834, 33, 20}, /* 32 */
1573 {13614, 34, 19}, /* 33 */
1574 {14424, 35, 19}, /* 34 */
1575 {15265, 36, 19}, /* 35 */
1576 {16137, 37, 19}, /* 36 */
1577 {17042, 38, 18}, /* 37 */
1578 {17981, 39, 18}, /* 38 */
1579 {18955, 40, 18}, /* 39 */
1580 {19965, 41, 17}, /* 40 */
1581 {21013, 42, 17}, /* 41 */
1582 {22101, 43, 17}, /* 42 */
1583 {23230, 44, 17}, /* 43 */
1584 {24402, 45, 16}, /* 44 */
1585 {25618, 46, 16}, /* 45 */
1586 {26881, 47, 16}, /* 46 */
1587 {28193, 48, 16}, /* 47 */
1588 {29557, 49, 15}, /* 48 */
1589 {30975, 50, 15}, /* 49 */
1590 {32450, 51, 15}, /* 50 */
1591 {33986, 52, 15}, /* 51 */
1592 {35586, 53, 14}, /* 52 */
1593 {37253, 54, 14}, /* 53 */
1594 {38992, 55, 14}, /* 54 */
1595 {40808, 56, 14}, /* 55 */
1596 {42707, 57, 13}, /* 56 */
1597 {44694, 58, 13}, /* 57 */
1598 {46776, 59, 13}, /* 58 */
1599 {48961, 60, 13}, /* 59 */
1600 {51258, 61, 13}, /* 60 */
1601 {53677, 62, 12}, /* 61 */
1602 {56230, 63, 12}, /* 62 */
1603 {58932, 64, 12}, /* 63 */
1604 {61799, 65, 12}, /* 64 */
1605 {64851, 66, 11}, /* 65 */
1606 {68113, 67, 11}, /* 66 */
1607 {71617, 68, 11}, /* 67 */
1608 {75401, 69, 10}, /* 68 */
1609 {79517, 70, 10}, /* 69 */
1610 {84035, 71, 10}, /* 70 */
1611 {89053, 72, 10}, /* 71 */
1612 {94717, 73, 9} /* 72 */
1616 sctp_hs_cwnd_increase(struct sctp_tcb *stcb, struct sctp_nets *net)
1618 int cur_val, i, indx, incr;
1619 int old_cwnd = net->cwnd;
1621 cur_val = net->cwnd >> 10;
1622 indx = SCTP_HS_TABLE_SIZE - 1;
1624 if (cur_val < sctp_cwnd_adjust[0].cwnd) {
1626 if (net->net_ack > net->mtu) {
1627 net->cwnd += net->mtu;
1629 net->cwnd += net->net_ack;
1632 for (i = net->last_hs_used; i < SCTP_HS_TABLE_SIZE; i++) {
1633 if (cur_val < sctp_cwnd_adjust[i].cwnd) {
1638 net->last_hs_used = indx;
1639 incr = (((int32_t)sctp_cwnd_adjust[indx].increase) << 10);
1642 sctp_enforce_cwnd_limit(&stcb->asoc, net);
1643 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
1644 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SS);
1649 sctp_hs_cwnd_decrease(struct sctp_tcb *stcb, struct sctp_nets *net)
1651 int cur_val, i, indx;
1652 int old_cwnd = net->cwnd;
1654 cur_val = net->cwnd >> 10;
1655 if (cur_val < sctp_cwnd_adjust[0].cwnd) {
1657 net->ssthresh = net->cwnd / 2;
1658 if (net->ssthresh < (net->mtu * 2)) {
1659 net->ssthresh = 2 * net->mtu;
1661 net->cwnd = net->ssthresh;
1663 /* drop by the proper amount */
1664 net->ssthresh = net->cwnd - (int)((net->cwnd / 100) *
1665 (int32_t)sctp_cwnd_adjust[net->last_hs_used].drop_percent);
1666 net->cwnd = net->ssthresh;
1667 /* now where are we */
1668 indx = net->last_hs_used;
1669 cur_val = net->cwnd >> 10;
1670 /* reset where we are in the table */
1671 if (cur_val < sctp_cwnd_adjust[0].cwnd) {
1672 /* feel out of hs */
1673 net->last_hs_used = 0;
1675 for (i = indx; i >= 1; i--) {
1676 if (cur_val > sctp_cwnd_adjust[i - 1].cwnd) {
1680 net->last_hs_used = indx;
1683 sctp_enforce_cwnd_limit(&stcb->asoc, net);
1684 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
1685 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_FR);
1690 sctp_hs_cwnd_update_after_fr(struct sctp_tcb *stcb,
1691 struct sctp_association *asoc)
1693 struct sctp_nets *net;
1696 * CMT fast recovery code. Need to debug. ((sctp_cmt_on_off > 0) &&
1697 * (net->fast_retran_loss_recovery == 0)))
1699 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
1700 if ((asoc->fast_retran_loss_recovery == 0) ||
1701 (asoc->sctp_cmt_on_off > 0)) {
1702 /* out of a RFC2582 Fast recovery window? */
1703 if (net->net_ack > 0) {
1705 * per section 7.2.3, are there any
1706 * destinations that had a fast retransmit
1707 * to them. If so what we need to do is
1708 * adjust ssthresh and cwnd.
1710 struct sctp_tmit_chunk *lchk;
1712 sctp_hs_cwnd_decrease(stcb, net);
1714 lchk = TAILQ_FIRST(&asoc->send_queue);
1716 net->partial_bytes_acked = 0;
1717 /* Turn on fast recovery window */
1718 asoc->fast_retran_loss_recovery = 1;
1720 /* Mark end of the window */
1721 asoc->fast_recovery_tsn = asoc->sending_seq - 1;
1723 asoc->fast_recovery_tsn = lchk->rec.data.tsn - 1;
1727 * CMT fast recovery -- per destination
1728 * recovery variable.
1730 net->fast_retran_loss_recovery = 1;
1733 /* Mark end of the window */
1734 net->fast_recovery_tsn = asoc->sending_seq - 1;
1736 net->fast_recovery_tsn = lchk->rec.data.tsn - 1;
1739 sctp_timer_stop(SCTP_TIMER_TYPE_SEND,
1740 stcb->sctp_ep, stcb, net,
1741 SCTP_FROM_SCTP_CC_FUNCTIONS + SCTP_LOC_2);
1742 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
1743 stcb->sctp_ep, stcb, net);
1745 } else if (net->net_ack > 0) {
1747 * Mark a peg that we WOULD have done a cwnd
1748 * reduction but RFC2582 prevented this action.
1750 SCTP_STAT_INCR(sctps_fastretransinrtt);
1756 sctp_hs_cwnd_update_after_sack(struct sctp_tcb *stcb,
1757 struct sctp_association *asoc,
1758 int accum_moved, int reneged_all SCTP_UNUSED, int will_exit)
1760 struct sctp_nets *net;
1762 /******************************/
1763 /* update cwnd and Early FR */
1764 /******************************/
1765 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
1767 #ifdef JANA_CMT_FAST_RECOVERY
1769 * CMT fast recovery code. Need to debug.
1771 if (net->fast_retran_loss_recovery && net->new_pseudo_cumack) {
1772 if (SCTP_TSN_GE(asoc->last_acked_seq, net->fast_recovery_tsn) ||
1773 SCTP_TSN_GE(net->pseudo_cumack, net->fast_recovery_tsn)) {
1774 net->will_exit_fast_recovery = 1;
1778 /* if nothing was acked on this destination skip it */
1779 if (net->net_ack == 0) {
1780 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
1781 sctp_log_cwnd(stcb, net, 0, SCTP_CWND_LOG_FROM_SACK);
1785 #ifdef JANA_CMT_FAST_RECOVERY
1787 * CMT fast recovery code
1790 * if (sctp_cmt_on_off > 0 && net->fast_retran_loss_recovery
1791 * && net->will_exit_fast_recovery == 0) { @@@ Do something
1792 * } else if (sctp_cmt_on_off == 0 &&
1793 * asoc->fast_retran_loss_recovery && will_exit == 0) {
1797 if (asoc->fast_retran_loss_recovery &&
1799 (asoc->sctp_cmt_on_off == 0)) {
1801 * If we are in loss recovery we skip any cwnd
1807 * CMT: CUC algorithm. Update cwnd if pseudo-cumack has
1811 ((asoc->sctp_cmt_on_off > 0) && net->new_pseudo_cumack)) {
1812 /* If the cumulative ack moved we can proceed */
1813 if (net->cwnd <= net->ssthresh) {
1814 /* We are in slow start */
1815 if (net->flight_size + net->net_ack >= net->cwnd) {
1816 sctp_hs_cwnd_increase(stcb, net);
1818 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
1819 sctp_log_cwnd(stcb, net, net->net_ack,
1820 SCTP_CWND_LOG_NOADV_SS);
1824 /* We are in congestion avoidance */
1825 net->partial_bytes_acked += net->net_ack;
1826 if ((net->flight_size + net->net_ack >= net->cwnd) &&
1827 (net->partial_bytes_acked >= net->cwnd)) {
1828 net->partial_bytes_acked -= net->cwnd;
1829 net->cwnd += net->mtu;
1830 sctp_enforce_cwnd_limit(asoc, net);
1831 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
1832 sctp_log_cwnd(stcb, net, net->mtu,
1833 SCTP_CWND_LOG_FROM_CA);
1836 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
1837 sctp_log_cwnd(stcb, net, net->net_ack,
1838 SCTP_CWND_LOG_NOADV_CA);
1843 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
1844 sctp_log_cwnd(stcb, net, net->mtu,
1845 SCTP_CWND_LOG_NO_CUMACK);
1853 * H-TCP congestion control. The algorithm is detailed in:
1854 * R.N.Shorten, D.J.Leith:
1855 * "H-TCP: TCP for high-speed and long-distance networks"
1856 * Proc. PFLDnet, Argonne, 2004.
1857 * http://www.hamilton.ie/net/htcp3.pdf
1861 static int use_rtt_scaling = 1;
1862 static int use_bandwidth_switch = 1;
1865 between(uint32_t seq1, uint32_t seq2, uint32_t seq3)
1867 return (seq3 - seq2 >= seq1 - seq2);
1870 static inline uint32_t
1871 htcp_cong_time(struct htcp *ca)
1873 return (sctp_get_tick_count() - ca->last_cong);
1876 static inline uint32_t
1877 htcp_ccount(struct htcp *ca)
1879 return (htcp_cong_time(ca) / ca->minRTT);
1883 htcp_reset(struct htcp *ca)
1885 ca->undo_last_cong = ca->last_cong;
1886 ca->undo_maxRTT = ca->maxRTT;
1887 ca->undo_old_maxB = ca->old_maxB;
1888 ca->last_cong = sctp_get_tick_count();
1891 #ifdef SCTP_NOT_USED
1894 htcp_cwnd_undo(struct sctp_tcb *stcb, struct sctp_nets *net)
1896 net->cc_mod.htcp_ca.last_cong = net->cc_mod.htcp_ca.undo_last_cong;
1897 net->cc_mod.htcp_ca.maxRTT = net->cc_mod.htcp_ca.undo_maxRTT;
1898 net->cc_mod.htcp_ca.old_maxB = net->cc_mod.htcp_ca.undo_old_maxB;
1899 return (max(net->cwnd, ((net->ssthresh / net->mtu << 7) / net->cc_mod.htcp_ca.beta) * net->mtu));
1905 measure_rtt(struct sctp_nets *net)
1907 uint32_t srtt = net->lastsa >> SCTP_RTT_SHIFT;
1909 /* keep track of minimum RTT seen so far, minRTT is zero at first */
1910 if (net->cc_mod.htcp_ca.minRTT > srtt || !net->cc_mod.htcp_ca.minRTT)
1911 net->cc_mod.htcp_ca.minRTT = srtt;
1914 if (net->fast_retran_ip == 0 && net->ssthresh < 0xFFFF && htcp_ccount(&net->cc_mod.htcp_ca) > 3) {
1915 if (net->cc_mod.htcp_ca.maxRTT < net->cc_mod.htcp_ca.minRTT)
1916 net->cc_mod.htcp_ca.maxRTT = net->cc_mod.htcp_ca.minRTT;
1917 if (net->cc_mod.htcp_ca.maxRTT < srtt && srtt <= net->cc_mod.htcp_ca.maxRTT + MSEC_TO_TICKS(20))
1918 net->cc_mod.htcp_ca.maxRTT = srtt;
1923 measure_achieved_throughput(struct sctp_nets *net)
1925 uint32_t now = sctp_get_tick_count();
1927 if (net->fast_retran_ip == 0)
1928 net->cc_mod.htcp_ca.bytes_acked = net->net_ack;
1930 if (!use_bandwidth_switch)
1933 /* achieved throughput calculations */
1934 /* JRS - not 100% sure of this statement */
1935 if (net->fast_retran_ip == 1) {
1936 net->cc_mod.htcp_ca.bytecount = 0;
1937 net->cc_mod.htcp_ca.lasttime = now;
1941 net->cc_mod.htcp_ca.bytecount += net->net_ack;
1942 if ((net->cc_mod.htcp_ca.bytecount >= net->cwnd - (((net->cc_mod.htcp_ca.alpha >> 7) ? (net->cc_mod.htcp_ca.alpha >> 7) : 1) * net->mtu)) &&
1943 (now - net->cc_mod.htcp_ca.lasttime >= net->cc_mod.htcp_ca.minRTT) &&
1944 (net->cc_mod.htcp_ca.minRTT > 0)) {
1945 uint32_t cur_Bi = net->cc_mod.htcp_ca.bytecount / net->mtu * hz / (now - net->cc_mod.htcp_ca.lasttime);
1947 if (htcp_ccount(&net->cc_mod.htcp_ca) <= 3) {
1948 /* just after backoff */
1949 net->cc_mod.htcp_ca.minB = net->cc_mod.htcp_ca.maxB = net->cc_mod.htcp_ca.Bi = cur_Bi;
1951 net->cc_mod.htcp_ca.Bi = (3 * net->cc_mod.htcp_ca.Bi + cur_Bi) / 4;
1952 if (net->cc_mod.htcp_ca.Bi > net->cc_mod.htcp_ca.maxB)
1953 net->cc_mod.htcp_ca.maxB = net->cc_mod.htcp_ca.Bi;
1954 if (net->cc_mod.htcp_ca.minB > net->cc_mod.htcp_ca.maxB)
1955 net->cc_mod.htcp_ca.minB = net->cc_mod.htcp_ca.maxB;
1957 net->cc_mod.htcp_ca.bytecount = 0;
1958 net->cc_mod.htcp_ca.lasttime = now;
1963 htcp_beta_update(struct htcp *ca, uint32_t minRTT, uint32_t maxRTT)
1965 if (use_bandwidth_switch) {
1966 uint32_t maxB = ca->maxB;
1967 uint32_t old_maxB = ca->old_maxB;
1969 ca->old_maxB = ca->maxB;
1971 if (!between(5 * maxB, 4 * old_maxB, 6 * old_maxB)) {
1972 ca->beta = BETA_MIN;
1978 if (ca->modeswitch && minRTT > (uint32_t)MSEC_TO_TICKS(10) && maxRTT) {
1979 ca->beta = (minRTT << 7) / maxRTT;
1980 if (ca->beta < BETA_MIN)
1981 ca->beta = BETA_MIN;
1982 else if (ca->beta > BETA_MAX)
1983 ca->beta = BETA_MAX;
1985 ca->beta = BETA_MIN;
1991 htcp_alpha_update(struct htcp *ca)
1993 uint32_t minRTT = ca->minRTT;
1994 uint32_t factor = 1;
1995 uint32_t diff = htcp_cong_time(ca);
1997 if (diff > (uint32_t)hz) {
1999 factor = 1 + (10 * diff + ((diff / 2) * (diff / 2) / hz)) / hz;
2002 if (use_rtt_scaling && minRTT) {
2003 uint32_t scale = (hz << 3) / (10 * minRTT);
2005 scale = min(max(scale, 1U << 2), 10U << 3); /* clamping ratio to
2006 * interval [0.5,10]<<3 */
2007 factor = (factor << 3) / scale;
2012 ca->alpha = 2 * factor * ((1 << 7) - ca->beta);
2014 ca->alpha = ALPHA_BASE;
2017 /* After we have the rtt data to calculate beta, we'd still prefer to wait one
2018 * rtt before we adjust our beta to ensure we are working from a consistent
2021 * This function should be called when we hit a congestion event since only at
2022 * that point do we really have a real sense of maxRTT (the queues en route
2023 * were getting just too full now).
2026 htcp_param_update(struct sctp_nets *net)
2028 uint32_t minRTT = net->cc_mod.htcp_ca.minRTT;
2029 uint32_t maxRTT = net->cc_mod.htcp_ca.maxRTT;
2031 htcp_beta_update(&net->cc_mod.htcp_ca, minRTT, maxRTT);
2032 htcp_alpha_update(&net->cc_mod.htcp_ca);
2035 * add slowly fading memory for maxRTT to accommodate routing
2038 if (minRTT > 0 && maxRTT > minRTT)
2039 net->cc_mod.htcp_ca.maxRTT = minRTT + ((maxRTT - minRTT) * 95) / 100;
2043 htcp_recalc_ssthresh(struct sctp_nets *net)
2045 htcp_param_update(net);
2046 return (max(((net->cwnd / net->mtu * net->cc_mod.htcp_ca.beta) >> 7) * net->mtu, 2U * net->mtu));
2050 htcp_cong_avoid(struct sctp_tcb *stcb, struct sctp_nets *net)
2053 * How to handle these functions?
2054 * if (!tcp_is_cwnd_limited(sk, in_flight)) RRS - good question.
2057 if (net->cwnd <= net->ssthresh) {
2058 /* We are in slow start */
2059 if (net->flight_size + net->net_ack >= net->cwnd) {
2060 if (net->net_ack > (net->mtu * SCTP_BASE_SYSCTL(sctp_L2_abc_variable))) {
2061 net->cwnd += (net->mtu * SCTP_BASE_SYSCTL(sctp_L2_abc_variable));
2062 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
2063 sctp_log_cwnd(stcb, net, net->mtu,
2064 SCTP_CWND_LOG_FROM_SS);
2068 net->cwnd += net->net_ack;
2069 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
2070 sctp_log_cwnd(stcb, net, net->net_ack,
2071 SCTP_CWND_LOG_FROM_SS);
2075 sctp_enforce_cwnd_limit(&stcb->asoc, net);
2077 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
2078 sctp_log_cwnd(stcb, net, net->net_ack,
2079 SCTP_CWND_LOG_NOADV_SS);
2086 * In dangerous area, increase slowly. In theory this is
2087 * net->cwnd += alpha / net->cwnd
2089 /* What is snd_cwnd_cnt?? */
2090 if (((net->partial_bytes_acked / net->mtu * net->cc_mod.htcp_ca.alpha) >> 7) * net->mtu >= net->cwnd) {
2092 * Does SCTP have a cwnd clamp?
2093 * if (net->snd_cwnd < net->snd_cwnd_clamp) - Nope (RRS).
2095 net->cwnd += net->mtu;
2096 net->partial_bytes_acked = 0;
2097 sctp_enforce_cwnd_limit(&stcb->asoc, net);
2098 htcp_alpha_update(&net->cc_mod.htcp_ca);
2099 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
2100 sctp_log_cwnd(stcb, net, net->mtu,
2101 SCTP_CWND_LOG_FROM_CA);
2104 net->partial_bytes_acked += net->net_ack;
2105 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
2106 sctp_log_cwnd(stcb, net, net->net_ack,
2107 SCTP_CWND_LOG_NOADV_CA);
2111 net->cc_mod.htcp_ca.bytes_acked = net->mtu;
2115 #ifdef SCTP_NOT_USED
2116 /* Lower bound on congestion window. */
2118 htcp_min_cwnd(struct sctp_tcb *stcb, struct sctp_nets *net)
2120 return (net->ssthresh);
2125 htcp_init(struct sctp_nets *net)
2127 memset(&net->cc_mod.htcp_ca, 0, sizeof(struct htcp));
2128 net->cc_mod.htcp_ca.alpha = ALPHA_BASE;
2129 net->cc_mod.htcp_ca.beta = BETA_MIN;
2130 net->cc_mod.htcp_ca.bytes_acked = net->mtu;
2131 net->cc_mod.htcp_ca.last_cong = sctp_get_tick_count();
2135 sctp_htcp_set_initial_cc_param(struct sctp_tcb *stcb, struct sctp_nets *net)
2138 * We take the max of the burst limit times a MTU or the
2139 * INITIAL_CWND. We then limit this to 4 MTU's of sending.
2141 net->cwnd = min((net->mtu * 4), max((2 * net->mtu), SCTP_INITIAL_CWND));
2142 net->ssthresh = stcb->asoc.peers_rwnd;
2143 sctp_enforce_cwnd_limit(&stcb->asoc, net);
2146 if (SCTP_BASE_SYSCTL(sctp_logging_level) & (SCTP_CWND_MONITOR_ENABLE | SCTP_CWND_LOGGING_ENABLE)) {
2147 sctp_log_cwnd(stcb, net, 0, SCTP_CWND_INITIALIZATION);
2152 sctp_htcp_cwnd_update_after_sack(struct sctp_tcb *stcb,
2153 struct sctp_association *asoc,
2154 int accum_moved, int reneged_all SCTP_UNUSED, int will_exit)
2156 struct sctp_nets *net;
2158 /******************************/
2159 /* update cwnd and Early FR */
2160 /******************************/
2161 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
2163 #ifdef JANA_CMT_FAST_RECOVERY
2165 * CMT fast recovery code. Need to debug.
2167 if (net->fast_retran_loss_recovery && net->new_pseudo_cumack) {
2168 if (SCTP_TSN_GE(asoc->last_acked_seq, net->fast_recovery_tsn) ||
2169 SCTP_TSN_GE(net->pseudo_cumack, net->fast_recovery_tsn)) {
2170 net->will_exit_fast_recovery = 1;
2174 /* if nothing was acked on this destination skip it */
2175 if (net->net_ack == 0) {
2176 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
2177 sctp_log_cwnd(stcb, net, 0, SCTP_CWND_LOG_FROM_SACK);
2181 #ifdef JANA_CMT_FAST_RECOVERY
2183 * CMT fast recovery code
2186 * if (sctp_cmt_on_off > 0 && net->fast_retran_loss_recovery
2187 * && net->will_exit_fast_recovery == 0) { @@@ Do something
2188 * } else if (sctp_cmt_on_off == 0 &&
2189 * asoc->fast_retran_loss_recovery && will_exit == 0) {
2193 if (asoc->fast_retran_loss_recovery &&
2195 (asoc->sctp_cmt_on_off == 0)) {
2197 * If we are in loss recovery we skip any cwnd
2203 * CMT: CUC algorithm. Update cwnd if pseudo-cumack has
2207 ((asoc->sctp_cmt_on_off > 0) && net->new_pseudo_cumack)) {
2208 htcp_cong_avoid(stcb, net);
2209 measure_achieved_throughput(net);
2211 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_LOGGING_ENABLE) {
2212 sctp_log_cwnd(stcb, net, net->mtu,
2213 SCTP_CWND_LOG_NO_CUMACK);
2220 sctp_htcp_cwnd_update_after_fr(struct sctp_tcb *stcb,
2221 struct sctp_association *asoc)
2223 struct sctp_nets *net;
2226 * CMT fast recovery code. Need to debug. ((sctp_cmt_on_off > 0) &&
2227 * (net->fast_retran_loss_recovery == 0)))
2229 TAILQ_FOREACH(net, &asoc->nets, sctp_next) {
2230 if ((asoc->fast_retran_loss_recovery == 0) ||
2231 (asoc->sctp_cmt_on_off > 0)) {
2232 /* out of a RFC2582 Fast recovery window? */
2233 if (net->net_ack > 0) {
2235 * per section 7.2.3, are there any
2236 * destinations that had a fast retransmit
2237 * to them. If so what we need to do is
2238 * adjust ssthresh and cwnd.
2240 struct sctp_tmit_chunk *lchk;
2241 int old_cwnd = net->cwnd;
2243 /* JRS - reset as if state were changed */
2244 htcp_reset(&net->cc_mod.htcp_ca);
2245 net->ssthresh = htcp_recalc_ssthresh(net);
2246 net->cwnd = net->ssthresh;
2247 sctp_enforce_cwnd_limit(asoc, net);
2248 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
2249 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd),
2250 SCTP_CWND_LOG_FROM_FR);
2252 lchk = TAILQ_FIRST(&asoc->send_queue);
2254 net->partial_bytes_acked = 0;
2255 /* Turn on fast recovery window */
2256 asoc->fast_retran_loss_recovery = 1;
2258 /* Mark end of the window */
2259 asoc->fast_recovery_tsn = asoc->sending_seq - 1;
2261 asoc->fast_recovery_tsn = lchk->rec.data.tsn - 1;
2265 * CMT fast recovery -- per destination
2266 * recovery variable.
2268 net->fast_retran_loss_recovery = 1;
2271 /* Mark end of the window */
2272 net->fast_recovery_tsn = asoc->sending_seq - 1;
2274 net->fast_recovery_tsn = lchk->rec.data.tsn - 1;
2277 sctp_timer_stop(SCTP_TIMER_TYPE_SEND,
2278 stcb->sctp_ep, stcb, net,
2279 SCTP_FROM_SCTP_CC_FUNCTIONS + SCTP_LOC_3);
2280 sctp_timer_start(SCTP_TIMER_TYPE_SEND,
2281 stcb->sctp_ep, stcb, net);
2283 } else if (net->net_ack > 0) {
2285 * Mark a peg that we WOULD have done a cwnd
2286 * reduction but RFC2582 prevented this action.
2288 SCTP_STAT_INCR(sctps_fastretransinrtt);
2294 sctp_htcp_cwnd_update_after_timeout(struct sctp_tcb *stcb,
2295 struct sctp_nets *net)
2297 int old_cwnd = net->cwnd;
2299 /* JRS - reset as if the state were being changed to timeout */
2300 htcp_reset(&net->cc_mod.htcp_ca);
2301 net->ssthresh = htcp_recalc_ssthresh(net);
2302 net->cwnd = net->mtu;
2303 net->partial_bytes_acked = 0;
2304 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
2305 sctp_log_cwnd(stcb, net, net->cwnd - old_cwnd, SCTP_CWND_LOG_FROM_RTX);
2310 sctp_htcp_cwnd_update_after_ecn_echo(struct sctp_tcb *stcb,
2311 struct sctp_nets *net, int in_window, int num_pkt_lost SCTP_UNUSED)
2315 old_cwnd = net->cwnd;
2317 /* JRS - reset hctp as if state changed */
2318 if (in_window == 0) {
2319 htcp_reset(&net->cc_mod.htcp_ca);
2320 SCTP_STAT_INCR(sctps_ecnereducedcwnd);
2321 net->ssthresh = htcp_recalc_ssthresh(net);
2322 if (net->ssthresh < net->mtu) {
2323 net->ssthresh = net->mtu;
2324 /* here back off the timer as well, to slow us down */
2327 net->cwnd = net->ssthresh;
2328 sctp_enforce_cwnd_limit(&stcb->asoc, net);
2329 if (SCTP_BASE_SYSCTL(sctp_logging_level) & SCTP_CWND_MONITOR_ENABLE) {
2330 sctp_log_cwnd(stcb, net, (net->cwnd - old_cwnd), SCTP_CWND_LOG_FROM_SAT);
2335 const struct sctp_cc_functions sctp_cc_functions[] = {
2337 .sctp_set_initial_cc_param = sctp_set_initial_cc_param,
2338 .sctp_cwnd_update_after_sack = sctp_cwnd_update_after_sack,
2339 .sctp_cwnd_update_exit_pf = sctp_cwnd_update_exit_pf_common,
2340 .sctp_cwnd_update_after_fr = sctp_cwnd_update_after_fr,
2341 .sctp_cwnd_update_after_timeout = sctp_cwnd_update_after_timeout,
2342 .sctp_cwnd_update_after_ecn_echo = sctp_cwnd_update_after_ecn_echo,
2343 .sctp_cwnd_update_after_packet_dropped = sctp_cwnd_update_after_packet_dropped,
2344 .sctp_cwnd_update_after_output = sctp_cwnd_update_after_output,
2347 .sctp_set_initial_cc_param = sctp_set_initial_cc_param,
2348 .sctp_cwnd_update_after_sack = sctp_hs_cwnd_update_after_sack,
2349 .sctp_cwnd_update_exit_pf = sctp_cwnd_update_exit_pf_common,
2350 .sctp_cwnd_update_after_fr = sctp_hs_cwnd_update_after_fr,
2351 .sctp_cwnd_update_after_timeout = sctp_cwnd_update_after_timeout,
2352 .sctp_cwnd_update_after_ecn_echo = sctp_cwnd_update_after_ecn_echo,
2353 .sctp_cwnd_update_after_packet_dropped = sctp_cwnd_update_after_packet_dropped,
2354 .sctp_cwnd_update_after_output = sctp_cwnd_update_after_output,
2357 .sctp_set_initial_cc_param = sctp_htcp_set_initial_cc_param,
2358 .sctp_cwnd_update_after_sack = sctp_htcp_cwnd_update_after_sack,
2359 .sctp_cwnd_update_exit_pf = sctp_cwnd_update_exit_pf_common,
2360 .sctp_cwnd_update_after_fr = sctp_htcp_cwnd_update_after_fr,
2361 .sctp_cwnd_update_after_timeout = sctp_htcp_cwnd_update_after_timeout,
2362 .sctp_cwnd_update_after_ecn_echo = sctp_htcp_cwnd_update_after_ecn_echo,
2363 .sctp_cwnd_update_after_packet_dropped = sctp_cwnd_update_after_packet_dropped,
2364 .sctp_cwnd_update_after_output = sctp_cwnd_update_after_output,
2367 .sctp_set_initial_cc_param = sctp_set_rtcc_initial_cc_param,
2368 .sctp_cwnd_update_after_sack = sctp_cwnd_update_rtcc_after_sack,
2369 .sctp_cwnd_update_exit_pf = sctp_cwnd_update_exit_pf_common,
2370 .sctp_cwnd_update_after_fr = sctp_cwnd_update_after_fr,
2371 .sctp_cwnd_update_after_timeout = sctp_cwnd_update_after_timeout,
2372 .sctp_cwnd_update_after_ecn_echo = sctp_cwnd_update_rtcc_after_ecn_echo,
2373 .sctp_cwnd_update_after_packet_dropped = sctp_cwnd_update_after_packet_dropped,
2374 .sctp_cwnd_update_after_output = sctp_cwnd_update_after_output,
2375 .sctp_cwnd_update_packet_transmitted = sctp_cwnd_update_rtcc_packet_transmitted,
2376 .sctp_cwnd_update_tsn_acknowledged = sctp_cwnd_update_rtcc_tsn_acknowledged,
2377 .sctp_cwnd_new_transmission_begins = sctp_cwnd_new_rtcc_transmission_begins,
2378 .sctp_cwnd_prepare_net_for_sack = sctp_cwnd_prepare_rtcc_net_for_sack,
2379 .sctp_cwnd_socket_option = sctp_cwnd_rtcc_socket_option,
2380 .sctp_rtt_calculated = sctp_rtt_rtcc_calculated