2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2008-2010 Lawrence Stewart <lstewart@freebsd.org>
5 * Copyright (c) 2010 The FreeBSD Foundation
8 * This software was developed by Lawrence Stewart while studying at the Centre
9 * for Advanced Internet Architectures, Swinburne University of Technology, made
10 * possible in part by a grant from the Cisco University Research Program Fund
11 * at Community Foundation Silicon Valley.
13 * Portions of this software were developed at the Centre for Advanced
14 * Internet Architectures, Swinburne University of Technology, Melbourne,
15 * Australia by David Hayes under sponsorship from the FreeBSD Foundation.
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions
20 * 1. Redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer.
22 * 2. Redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution.
26 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
27 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
28 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
29 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
30 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
31 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
32 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
33 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
34 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
35 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
40 * An implementation of the CUBIC congestion control algorithm for FreeBSD,
41 * based on the Internet Draft "draft-rhee-tcpm-cubic-02" by Rhee, Xu and Ha.
42 * Originally released as part of the NewTCP research project at Swinburne
43 * University of Technology's Centre for Advanced Internet Architectures,
44 * Melbourne, Australia, which was made possible in part by a grant from the
45 * Cisco University Research Program Fund at Community Foundation Silicon
46 * Valley. More details are available at:
47 * http://caia.swin.edu.au/urp/newtcp/
50 #include <sys/cdefs.h>
51 __FBSDID("$FreeBSD$");
53 #include <sys/param.h>
54 #include <sys/kernel.h>
55 #include <sys/limits.h>
56 #include <sys/malloc.h>
57 #include <sys/module.h>
58 #include <sys/socket.h>
59 #include <sys/socketvar.h>
60 #include <sys/sysctl.h>
61 #include <sys/systm.h>
65 #include <netinet/tcp.h>
66 #include <netinet/tcp_seq.h>
67 #include <netinet/tcp_timer.h>
68 #include <netinet/tcp_var.h>
69 #include <netinet/cc/cc.h>
70 #include <netinet/cc/cc_cubic.h>
71 #include <netinet/cc/cc_module.h>
73 static void cubic_ack_received(struct cc_var *ccv, uint16_t type);
74 static void cubic_cb_destroy(struct cc_var *ccv);
75 static int cubic_cb_init(struct cc_var *ccv);
76 static void cubic_cong_signal(struct cc_var *ccv, uint32_t type);
77 static void cubic_conn_init(struct cc_var *ccv);
78 static int cubic_mod_init(void);
79 static void cubic_post_recovery(struct cc_var *ccv);
80 static void cubic_record_rtt(struct cc_var *ccv);
81 static void cubic_ssthresh_update(struct cc_var *ccv);
82 static void cubic_after_idle(struct cc_var *ccv);
85 /* Cubic K in fixed point form with CUBIC_SHIFT worth of precision. */
87 /* Sum of RTT samples across an epoch in ticks. */
88 int64_t sum_rtt_ticks;
89 /* cwnd at the most recent congestion event. */
90 unsigned long max_cwnd;
91 /* cwnd at the previous congestion event. */
92 unsigned long prev_max_cwnd;
95 #define CUBICFLAG_CONG_EVENT 0x00000001 /* congestion experienced */
96 #define CUBICFLAG_IN_SLOWSTART 0x00000002 /* in slow start */
97 #define CUBICFLAG_IN_APPLIMIT 0x00000004 /* application limited */
98 /* Minimum observed rtt in ticks. */
100 /* Mean observed rtt between congestion epochs. */
102 /* ACKs since last congestion event. */
104 /* Time of last congestion event in ticks. */
108 static MALLOC_DEFINE(M_CUBIC, "cubic data",
109 "Per connection data required for the CUBIC congestion control algorithm");
111 struct cc_algo cubic_cc_algo = {
113 .ack_received = cubic_ack_received,
114 .cb_destroy = cubic_cb_destroy,
115 .cb_init = cubic_cb_init,
116 .cong_signal = cubic_cong_signal,
117 .conn_init = cubic_conn_init,
118 .mod_init = cubic_mod_init,
119 .post_recovery = cubic_post_recovery,
120 .after_idle = cubic_after_idle,
124 cubic_ack_received(struct cc_var *ccv, uint16_t type)
126 struct cubic *cubic_data;
127 unsigned long w_tf, w_cubic_next;
128 int ticks_since_cong;
130 cubic_data = ccv->cc_data;
131 cubic_record_rtt(ccv);
134 * For a regular ACK and we're not in cong/fast recovery and
135 * we're cwnd limited, always recalculate cwnd.
137 if (type == CC_ACK && !IN_RECOVERY(CCV(ccv, t_flags)) &&
138 (ccv->flags & CCF_CWND_LIMITED)) {
139 /* Use the logic in NewReno ack_received() for slow start. */
140 if (CCV(ccv, snd_cwnd) <= CCV(ccv, snd_ssthresh) ||
141 cubic_data->min_rtt_ticks == TCPTV_SRTTBASE) {
142 cubic_data->flags |= CUBICFLAG_IN_SLOWSTART;
143 newreno_cc_algo.ack_received(ccv, type);
145 if (cubic_data->flags & (CUBICFLAG_IN_SLOWSTART |
146 CUBICFLAG_IN_APPLIMIT)) {
147 cubic_data->flags &= ~(CUBICFLAG_IN_SLOWSTART |
148 CUBICFLAG_IN_APPLIMIT);
149 cubic_data->t_last_cong = ticks;
150 cubic_data->K = cubic_k(cubic_data->max_cwnd /
153 if ((ticks_since_cong =
154 ticks - cubic_data->t_last_cong) < 0) {
156 * dragging t_last_cong along
158 ticks_since_cong = INT_MAX;
159 cubic_data->t_last_cong = ticks - INT_MAX;
162 * The mean RTT is used to best reflect the equations in
163 * the I-D. Using min_rtt in the tf_cwnd calculation
164 * causes w_tf to grow much faster than it should if the
165 * RTT is dominated by network buffering rather than
168 w_tf = tf_cwnd(ticks_since_cong,
169 cubic_data->mean_rtt_ticks, cubic_data->max_cwnd,
172 w_cubic_next = cubic_cwnd(ticks_since_cong +
173 cubic_data->mean_rtt_ticks, cubic_data->max_cwnd,
174 CCV(ccv, t_maxseg), cubic_data->K);
176 ccv->flags &= ~CCF_ABC_SENTAWND;
178 if (w_cubic_next < w_tf) {
180 * TCP-friendly region, follow tf
183 if (CCV(ccv, snd_cwnd) < w_tf)
184 CCV(ccv, snd_cwnd) = ulmin(w_tf, INT_MAX);
185 } else if (CCV(ccv, snd_cwnd) < w_cubic_next) {
187 * Concave or convex region, follow CUBIC
189 * Only update snd_cwnd, if it doesn't shrink.
191 CCV(ccv, snd_cwnd) = ulmin(w_cubic_next,
196 * If we're not in slow start and we're probing for a
197 * new cwnd limit at the start of a connection
198 * (happens when hostcache has a relevant entry),
199 * keep updating our current estimate of the
202 if (((cubic_data->flags & CUBICFLAG_CONG_EVENT) == 0) &&
203 cubic_data->max_cwnd < CCV(ccv, snd_cwnd)) {
204 cubic_data->max_cwnd = CCV(ccv, snd_cwnd);
205 cubic_data->K = cubic_k(cubic_data->max_cwnd /
209 } else if (type == CC_ACK && !IN_RECOVERY(CCV(ccv, t_flags)) &&
210 !(ccv->flags & CCF_CWND_LIMITED)) {
211 cubic_data->flags |= CUBICFLAG_IN_APPLIMIT;
216 * This is a Cubic specific implementation of after_idle.
217 * - Reset cwnd by calling New Reno implementation of after_idle.
218 * - Reset t_last_cong.
221 cubic_after_idle(struct cc_var *ccv)
223 struct cubic *cubic_data;
225 cubic_data = ccv->cc_data;
227 cubic_data->max_cwnd = ulmax(cubic_data->max_cwnd, CCV(ccv, snd_cwnd));
228 cubic_data->K = cubic_k(cubic_data->max_cwnd / CCV(ccv, t_maxseg));
230 newreno_cc_algo.after_idle(ccv);
231 cubic_data->t_last_cong = ticks;
235 cubic_cb_destroy(struct cc_var *ccv)
237 free(ccv->cc_data, M_CUBIC);
241 cubic_cb_init(struct cc_var *ccv)
243 struct cubic *cubic_data;
245 cubic_data = malloc(sizeof(struct cubic), M_CUBIC, M_NOWAIT|M_ZERO);
247 if (cubic_data == NULL)
250 /* Init some key variables with sensible defaults. */
251 cubic_data->t_last_cong = ticks;
252 cubic_data->min_rtt_ticks = TCPTV_SRTTBASE;
253 cubic_data->mean_rtt_ticks = 1;
255 ccv->cc_data = cubic_data;
261 * Perform any necessary tasks before we enter congestion recovery.
264 cubic_cong_signal(struct cc_var *ccv, uint32_t type)
266 struct cubic *cubic_data;
269 cubic_data = ccv->cc_data;
270 mss = tcp_maxseg(ccv->ccvc.tcp);
274 if (!IN_FASTRECOVERY(CCV(ccv, t_flags))) {
275 if (!IN_CONGRECOVERY(CCV(ccv, t_flags))) {
276 cubic_ssthresh_update(ccv);
277 cubic_data->flags |= CUBICFLAG_CONG_EVENT;
278 cubic_data->t_last_cong = ticks;
279 cubic_data->K = cubic_k(cubic_data->max_cwnd / CCV(ccv, t_maxseg));
281 ENTER_RECOVERY(CCV(ccv, t_flags));
286 if (!IN_CONGRECOVERY(CCV(ccv, t_flags))) {
287 cubic_ssthresh_update(ccv);
288 cubic_data->flags |= CUBICFLAG_CONG_EVENT;
289 cubic_data->t_last_cong = ticks;
290 cubic_data->K = cubic_k(cubic_data->max_cwnd / CCV(ccv, t_maxseg));
291 CCV(ccv, snd_cwnd) = CCV(ccv, snd_ssthresh);
292 ENTER_CONGRECOVERY(CCV(ccv, t_flags));
297 CCV(ccv, snd_ssthresh) = max(min(CCV(ccv, snd_wnd),
298 CCV(ccv, snd_cwnd)) / 2 / mss,
300 CCV(ccv, snd_cwnd) = mss;
302 * Grab the current time and record it so we know when the
303 * most recent congestion event was. Only record it when the
304 * timeout has fired more than once, as there is a reasonable
305 * chance the first one is a false alarm and may not indicate
307 * This will put Cubic firmly into the concave / TCP friendly
308 * region, for a slower ramp-up after two consecutive RTOs.
310 if (CCV(ccv, t_rxtshift) >= 2) {
311 cubic_data->flags |= CUBICFLAG_CONG_EVENT;
312 cubic_data->t_last_cong = ticks;
313 cubic_data->max_cwnd = CCV(ccv, snd_cwnd_prev);
314 cubic_data->K = cubic_k(cubic_data->max_cwnd /
322 cubic_conn_init(struct cc_var *ccv)
324 struct cubic *cubic_data;
326 cubic_data = ccv->cc_data;
329 * Ensure we have a sane initial value for max_cwnd recorded. Without
330 * this here bad things happen when entries from the TCP hostcache
333 cubic_data->max_cwnd = CCV(ccv, snd_cwnd);
343 * Perform any necessary tasks before we exit congestion recovery.
346 cubic_post_recovery(struct cc_var *ccv)
348 struct cubic *cubic_data;
351 cubic_data = ccv->cc_data;
354 if (IN_FASTRECOVERY(CCV(ccv, t_flags))) {
356 * If inflight data is less than ssthresh, set cwnd
357 * conservatively to avoid a burst of data, as suggested in
358 * the NewReno RFC. Otherwise, use the CUBIC method.
360 * XXXLAS: Find a way to do this without needing curack
362 if (V_tcp_do_rfc6675_pipe)
363 pipe = tcp_compute_pipe(ccv->ccvc.tcp);
365 pipe = CCV(ccv, snd_max) - ccv->curack;
367 if (pipe < CCV(ccv, snd_ssthresh))
369 * Ensure that cwnd does not collapse to 1 MSS under
370 * adverse conditions. Implements RFC6582
372 CCV(ccv, snd_cwnd) = max(pipe, CCV(ccv, t_maxseg)) +
375 /* Update cwnd based on beta and adjusted max_cwnd. */
376 CCV(ccv, snd_cwnd) = max(((uint64_t)cubic_data->max_cwnd *
377 CUBIC_BETA) >> CUBIC_SHIFT,
378 2 * CCV(ccv, t_maxseg));
381 /* Calculate the average RTT between congestion epochs. */
382 if (cubic_data->epoch_ack_count > 0 &&
383 cubic_data->sum_rtt_ticks >= cubic_data->epoch_ack_count) {
384 cubic_data->mean_rtt_ticks = (int)(cubic_data->sum_rtt_ticks /
385 cubic_data->epoch_ack_count);
388 cubic_data->epoch_ack_count = 0;
389 cubic_data->sum_rtt_ticks = 0;
393 * Record the min RTT and sum samples for the epoch average RTT calculation.
396 cubic_record_rtt(struct cc_var *ccv)
398 struct cubic *cubic_data;
401 /* Ignore srtt until a min number of samples have been taken. */
402 if (CCV(ccv, t_rttupdated) >= CUBIC_MIN_RTT_SAMPLES) {
403 cubic_data = ccv->cc_data;
404 t_srtt_ticks = CCV(ccv, t_srtt) / TCP_RTT_SCALE;
407 * Record the current SRTT as our minrtt if it's the smallest
408 * we've seen or minrtt is currently equal to its initialised
411 * XXXLAS: Should there be some hysteresis for minrtt?
413 if ((t_srtt_ticks < cubic_data->min_rtt_ticks ||
414 cubic_data->min_rtt_ticks == TCPTV_SRTTBASE)) {
415 cubic_data->min_rtt_ticks = max(1, t_srtt_ticks);
418 * If the connection is within its first congestion
419 * epoch, ensure we prime mean_rtt_ticks with a
420 * reasonable value until the epoch average RTT is
421 * calculated in cubic_post_recovery().
423 if (cubic_data->min_rtt_ticks >
424 cubic_data->mean_rtt_ticks)
425 cubic_data->mean_rtt_ticks =
426 cubic_data->min_rtt_ticks;
429 /* Sum samples for epoch average RTT calculation. */
430 cubic_data->sum_rtt_ticks += t_srtt_ticks;
431 cubic_data->epoch_ack_count++;
436 * Update the ssthresh in the event of congestion.
439 cubic_ssthresh_update(struct cc_var *ccv)
441 struct cubic *cubic_data;
445 cubic_data = ccv->cc_data;
446 cwnd = CCV(ccv, snd_cwnd);
448 /* Fast convergence heuristic. */
449 if (cwnd < cubic_data->max_cwnd) {
450 cwnd = ((uint64_t)cwnd * CUBIC_FC_FACTOR) >> CUBIC_SHIFT;
452 cubic_data->prev_max_cwnd = cubic_data->max_cwnd;
453 cubic_data->max_cwnd = cwnd;
456 * On the first congestion event, set ssthresh to cwnd * 0.5
457 * and reduce max_cwnd to cwnd * beta. This aligns the cubic concave
458 * region appropriately. On subsequent congestion events, set
459 * ssthresh to cwnd * beta.
461 if ((cubic_data->flags & CUBICFLAG_CONG_EVENT) == 0) {
462 ssthresh = cwnd >> 1;
463 cubic_data->max_cwnd = ((uint64_t)cwnd *
464 CUBIC_BETA) >> CUBIC_SHIFT;
466 ssthresh = ((uint64_t)cwnd *
467 CUBIC_BETA) >> CUBIC_SHIFT;
469 CCV(ccv, snd_ssthresh) = max(ssthresh, 2 * CCV(ccv, t_maxseg));
472 DECLARE_CC_MODULE(cubic, &cubic_cc_algo);
473 MODULE_VERSION(cubic, 1);