QUICHE team | a6ef0a6 | 2019-03-07 20:34:33 -0500 | [diff] [blame] | 1 | // Copyright (c) 2015 The Chromium Authors. All rights reserved. |
| 2 | // Use of this source code is governed by a BSD-style license that can be |
| 3 | // found in the LICENSE file. |
| 4 | |
QUICHE team | 5be974e | 2020-12-29 18:35:24 -0500 | [diff] [blame] | 5 | #include "quic/core/congestion_control/cubic_bytes.h" |
QUICHE team | a6ef0a6 | 2019-03-07 20:34:33 -0500 | [diff] [blame] | 6 | |
| 7 | #include <cstdint> |
| 8 | |
QUICHE team | 5be974e | 2020-12-29 18:35:24 -0500 | [diff] [blame] | 9 | #include "quic/platform/api/quic_flags.h" |
| 10 | #include "quic/platform/api/quic_test.h" |
| 11 | #include "quic/test_tools/mock_clock.h" |
QUICHE team | a6ef0a6 | 2019-03-07 20:34:33 -0500 | [diff] [blame] | 12 | |
| 13 | namespace quic { |
| 14 | namespace test { |
| 15 | namespace { |
| 16 | |
| 17 | const float kBeta = 0.7f; // Default Cubic backoff factor. |
| 18 | const float kBetaLastMax = 0.85f; // Default Cubic backoff factor. |
| 19 | const uint32_t kNumConnections = 2; |
| 20 | const float kNConnectionBeta = (kNumConnections - 1 + kBeta) / kNumConnections; |
| 21 | const float kNConnectionBetaLastMax = |
| 22 | (kNumConnections - 1 + kBetaLastMax) / kNumConnections; |
| 23 | const float kNConnectionAlpha = 3 * kNumConnections * kNumConnections * |
| 24 | (1 - kNConnectionBeta) / (1 + kNConnectionBeta); |
| 25 | |
| 26 | } // namespace |
| 27 | |
| 28 | class CubicBytesTest : public QuicTest { |
| 29 | protected: |
| 30 | CubicBytesTest() |
| 31 | : one_ms_(QuicTime::Delta::FromMilliseconds(1)), |
| 32 | hundred_ms_(QuicTime::Delta::FromMilliseconds(100)), |
| 33 | cubic_(&clock_) {} |
| 34 | |
| 35 | QuicByteCount RenoCwndInBytes(QuicByteCount current_cwnd) { |
| 36 | QuicByteCount reno_estimated_cwnd = |
| 37 | current_cwnd + |
| 38 | kDefaultTCPMSS * (kNConnectionAlpha * kDefaultTCPMSS) / current_cwnd; |
| 39 | return reno_estimated_cwnd; |
| 40 | } |
| 41 | |
| 42 | QuicByteCount ConservativeCwndInBytes(QuicByteCount current_cwnd) { |
| 43 | QuicByteCount conservative_cwnd = current_cwnd + kDefaultTCPMSS / 2; |
| 44 | return conservative_cwnd; |
| 45 | } |
| 46 | |
| 47 | QuicByteCount CubicConvexCwndInBytes(QuicByteCount initial_cwnd, |
| 48 | QuicTime::Delta rtt, |
| 49 | QuicTime::Delta elapsed_time) { |
| 50 | const int64_t offset = |
| 51 | ((elapsed_time + rtt).ToMicroseconds() << 10) / 1000000; |
| 52 | const QuicByteCount delta_congestion_window = |
| 53 | ((410 * offset * offset * offset) * kDefaultTCPMSS >> 40); |
| 54 | const QuicByteCount cubic_cwnd = initial_cwnd + delta_congestion_window; |
| 55 | return cubic_cwnd; |
| 56 | } |
| 57 | |
| 58 | QuicByteCount LastMaxCongestionWindow() { |
| 59 | return cubic_.last_max_congestion_window(); |
| 60 | } |
| 61 | |
| 62 | QuicTime::Delta MaxCubicTimeInterval() { |
| 63 | return cubic_.MaxCubicTimeInterval(); |
| 64 | } |
| 65 | |
| 66 | const QuicTime::Delta one_ms_; |
| 67 | const QuicTime::Delta hundred_ms_; |
| 68 | MockClock clock_; |
| 69 | CubicBytes cubic_; |
| 70 | }; |
| 71 | |
| 72 | // TODO(jokulik): The original "AboveOrigin" test, below, is very |
| 73 | // loose. It's nearly impossible to make the test tighter without |
| 74 | // deploying the fix for convex mode. Once cubic convex is deployed, |
| 75 | // replace "AboveOrigin" with this test. |
| 76 | TEST_F(CubicBytesTest, AboveOriginWithTighterBounds) { |
| 77 | // Convex growth. |
| 78 | const QuicTime::Delta rtt_min = hundred_ms_; |
| 79 | int64_t rtt_min_ms = rtt_min.ToMilliseconds(); |
| 80 | float rtt_min_s = rtt_min_ms / 1000.0; |
| 81 | QuicByteCount current_cwnd = 10 * kDefaultTCPMSS; |
| 82 | const QuicByteCount initial_cwnd = current_cwnd; |
| 83 | |
| 84 | clock_.AdvanceTime(one_ms_); |
| 85 | const QuicTime initial_time = clock_.ApproximateNow(); |
| 86 | const QuicByteCount expected_first_cwnd = RenoCwndInBytes(current_cwnd); |
| 87 | current_cwnd = cubic_.CongestionWindowAfterAck(kDefaultTCPMSS, current_cwnd, |
| 88 | rtt_min, initial_time); |
| 89 | ASSERT_EQ(expected_first_cwnd, current_cwnd); |
| 90 | |
| 91 | // Normal TCP phase. |
| 92 | // The maximum number of expected Reno RTTs is calculated by |
| 93 | // finding the point where the cubic curve and the reno curve meet. |
| 94 | const int max_reno_rtts = |
| 95 | std::sqrt(kNConnectionAlpha / (.4 * rtt_min_s * rtt_min_s * rtt_min_s)) - |
| 96 | 2; |
| 97 | for (int i = 0; i < max_reno_rtts; ++i) { |
| 98 | // Alternatively, we expect it to increase by one, every time we |
| 99 | // receive current_cwnd/Alpha acks back. (This is another way of |
| 100 | // saying we expect cwnd to increase by approximately Alpha once |
| 101 | // we receive current_cwnd number ofacks back). |
| 102 | const uint64_t num_acks_this_epoch = |
| 103 | current_cwnd / kDefaultTCPMSS / kNConnectionAlpha; |
| 104 | const QuicByteCount initial_cwnd_this_epoch = current_cwnd; |
| 105 | for (QuicPacketCount n = 0; n < num_acks_this_epoch; ++n) { |
| 106 | // Call once per ACK. |
| 107 | const QuicByteCount expected_next_cwnd = RenoCwndInBytes(current_cwnd); |
| 108 | current_cwnd = cubic_.CongestionWindowAfterAck( |
| 109 | kDefaultTCPMSS, current_cwnd, rtt_min, clock_.ApproximateNow()); |
| 110 | ASSERT_EQ(expected_next_cwnd, current_cwnd); |
| 111 | } |
| 112 | // Our byte-wise Reno implementation is an estimate. We expect |
| 113 | // the cwnd to increase by approximately one MSS every |
| 114 | // cwnd/kDefaultTCPMSS/Alpha acks, but it may be off by as much as |
| 115 | // half a packet for smaller values of current_cwnd. |
| 116 | const QuicByteCount cwnd_change_this_epoch = |
| 117 | current_cwnd - initial_cwnd_this_epoch; |
| 118 | ASSERT_NEAR(kDefaultTCPMSS, cwnd_change_this_epoch, kDefaultTCPMSS / 2); |
| 119 | clock_.AdvanceTime(hundred_ms_); |
| 120 | } |
| 121 | |
| 122 | for (int i = 0; i < 54; ++i) { |
| 123 | const uint64_t max_acks_this_epoch = current_cwnd / kDefaultTCPMSS; |
| 124 | const QuicTime::Delta interval = QuicTime::Delta::FromMicroseconds( |
| 125 | hundred_ms_.ToMicroseconds() / max_acks_this_epoch); |
| 126 | for (QuicPacketCount n = 0; n < max_acks_this_epoch; ++n) { |
| 127 | clock_.AdvanceTime(interval); |
| 128 | current_cwnd = cubic_.CongestionWindowAfterAck( |
| 129 | kDefaultTCPMSS, current_cwnd, rtt_min, clock_.ApproximateNow()); |
| 130 | const QuicByteCount expected_cwnd = CubicConvexCwndInBytes( |
| 131 | initial_cwnd, rtt_min, (clock_.ApproximateNow() - initial_time)); |
| 132 | // If we allow per-ack updates, every update is a small cubic update. |
| 133 | ASSERT_EQ(expected_cwnd, current_cwnd); |
| 134 | } |
| 135 | } |
| 136 | const QuicByteCount expected_cwnd = CubicConvexCwndInBytes( |
| 137 | initial_cwnd, rtt_min, (clock_.ApproximateNow() - initial_time)); |
| 138 | current_cwnd = cubic_.CongestionWindowAfterAck( |
| 139 | kDefaultTCPMSS, current_cwnd, rtt_min, clock_.ApproximateNow()); |
| 140 | ASSERT_EQ(expected_cwnd, current_cwnd); |
| 141 | } |
| 142 | |
| 143 | // TODO(ianswett): This test was disabled when all fixes were enabled, but it |
| 144 | // may be worth fixing. |
| 145 | TEST_F(CubicBytesTest, DISABLED_AboveOrigin) { |
| 146 | // Convex growth. |
| 147 | const QuicTime::Delta rtt_min = hundred_ms_; |
| 148 | QuicByteCount current_cwnd = 10 * kDefaultTCPMSS; |
| 149 | // Without the signed-integer, cubic-convex fix, we start out in the |
| 150 | // wrong mode. |
| 151 | QuicPacketCount expected_cwnd = RenoCwndInBytes(current_cwnd); |
| 152 | // Initialize the state. |
| 153 | clock_.AdvanceTime(one_ms_); |
| 154 | ASSERT_EQ(expected_cwnd, |
| 155 | cubic_.CongestionWindowAfterAck(kDefaultTCPMSS, current_cwnd, |
| 156 | rtt_min, clock_.ApproximateNow())); |
| 157 | current_cwnd = expected_cwnd; |
| 158 | const QuicPacketCount initial_cwnd = expected_cwnd; |
| 159 | // Normal TCP phase. |
| 160 | for (int i = 0; i < 48; ++i) { |
| 161 | for (QuicPacketCount n = 1; |
| 162 | n < current_cwnd / kDefaultTCPMSS / kNConnectionAlpha; ++n) { |
| 163 | // Call once per ACK. |
| 164 | ASSERT_NEAR( |
| 165 | current_cwnd, |
| 166 | cubic_.CongestionWindowAfterAck(kDefaultTCPMSS, current_cwnd, rtt_min, |
| 167 | clock_.ApproximateNow()), |
| 168 | kDefaultTCPMSS); |
| 169 | } |
| 170 | clock_.AdvanceTime(hundred_ms_); |
| 171 | current_cwnd = cubic_.CongestionWindowAfterAck( |
| 172 | kDefaultTCPMSS, current_cwnd, rtt_min, clock_.ApproximateNow()); |
| 173 | // When we fix convex mode and the uint64 arithmetic, we |
| 174 | // increase the expected_cwnd only after after the first 100ms, |
| 175 | // rather than after the initial 1ms. |
| 176 | expected_cwnd += kDefaultTCPMSS; |
| 177 | ASSERT_NEAR(expected_cwnd, current_cwnd, kDefaultTCPMSS); |
| 178 | } |
| 179 | // Cubic phase. |
| 180 | for (int i = 0; i < 52; ++i) { |
| 181 | for (QuicPacketCount n = 1; n < current_cwnd / kDefaultTCPMSS; ++n) { |
| 182 | // Call once per ACK. |
| 183 | ASSERT_NEAR( |
| 184 | current_cwnd, |
| 185 | cubic_.CongestionWindowAfterAck(kDefaultTCPMSS, current_cwnd, rtt_min, |
| 186 | clock_.ApproximateNow()), |
| 187 | kDefaultTCPMSS); |
| 188 | } |
| 189 | clock_.AdvanceTime(hundred_ms_); |
| 190 | current_cwnd = cubic_.CongestionWindowAfterAck( |
| 191 | kDefaultTCPMSS, current_cwnd, rtt_min, clock_.ApproximateNow()); |
| 192 | } |
| 193 | // Total time elapsed so far; add min_rtt (0.1s) here as well. |
| 194 | float elapsed_time_s = 10.0f + 0.1f; |
| 195 | // |expected_cwnd| is initial value of cwnd + K * t^3, where K = 0.4. |
| 196 | expected_cwnd = |
| 197 | initial_cwnd / kDefaultTCPMSS + |
| 198 | (elapsed_time_s * elapsed_time_s * elapsed_time_s * 410) / 1024; |
| 199 | EXPECT_EQ(expected_cwnd, current_cwnd / kDefaultTCPMSS); |
| 200 | } |
| 201 | |
| 202 | // Constructs an artificial scenario to ensure that cubic-convex |
| 203 | // increases are truly fine-grained: |
| 204 | // |
| 205 | // - After starting the epoch, this test advances the elapsed time |
| 206 | // sufficiently far that cubic will do small increases at less than |
| 207 | // MaxCubicTimeInterval() intervals. |
| 208 | // |
| 209 | // - Sets an artificially large initial cwnd to prevent Reno from the |
| 210 | // convex increases on every ack. |
| 211 | TEST_F(CubicBytesTest, AboveOriginFineGrainedCubing) { |
| 212 | // Start the test with an artificially large cwnd to prevent Reno |
| 213 | // from over-taking cubic. |
| 214 | QuicByteCount current_cwnd = 1000 * kDefaultTCPMSS; |
| 215 | const QuicByteCount initial_cwnd = current_cwnd; |
| 216 | const QuicTime::Delta rtt_min = hundred_ms_; |
| 217 | clock_.AdvanceTime(one_ms_); |
| 218 | QuicTime initial_time = clock_.ApproximateNow(); |
| 219 | |
| 220 | // Start the epoch and then artificially advance the time. |
| 221 | current_cwnd = cubic_.CongestionWindowAfterAck( |
| 222 | kDefaultTCPMSS, current_cwnd, rtt_min, clock_.ApproximateNow()); |
| 223 | clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(600)); |
| 224 | current_cwnd = cubic_.CongestionWindowAfterAck( |
| 225 | kDefaultTCPMSS, current_cwnd, rtt_min, clock_.ApproximateNow()); |
| 226 | |
| 227 | // We expect the algorithm to perform only non-zero, fine-grained cubic |
| 228 | // increases on every ack in this case. |
| 229 | for (int i = 0; i < 100; ++i) { |
| 230 | clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(10)); |
| 231 | const QuicByteCount expected_cwnd = CubicConvexCwndInBytes( |
| 232 | initial_cwnd, rtt_min, (clock_.ApproximateNow() - initial_time)); |
| 233 | const QuicByteCount next_cwnd = cubic_.CongestionWindowAfterAck( |
| 234 | kDefaultTCPMSS, current_cwnd, rtt_min, clock_.ApproximateNow()); |
| 235 | // Make sure we are performing cubic increases. |
| 236 | ASSERT_EQ(expected_cwnd, next_cwnd); |
| 237 | // Make sure that these are non-zero, less-than-packet sized |
| 238 | // increases. |
| 239 | ASSERT_GT(next_cwnd, current_cwnd); |
| 240 | const QuicByteCount cwnd_delta = next_cwnd - current_cwnd; |
| 241 | ASSERT_GT(kDefaultTCPMSS * .1, cwnd_delta); |
| 242 | |
| 243 | current_cwnd = next_cwnd; |
| 244 | } |
| 245 | } |
| 246 | |
| 247 | // Constructs an artificial scenario to show what happens when we |
| 248 | // allow per-ack updates, rather than limititing update freqency. In |
| 249 | // this scenario, the first two acks of the epoch produce the same |
| 250 | // cwnd. When we limit per-ack updates, this would cause the |
| 251 | // cessation of cubic updates for 30ms. When we allow per-ack |
| 252 | // updates, the window continues to grow on every ack. |
| 253 | TEST_F(CubicBytesTest, PerAckUpdates) { |
| 254 | // Start the test with a large cwnd and RTT, to force the first |
| 255 | // increase to be a cubic increase. |
| 256 | QuicPacketCount initial_cwnd_packets = 150; |
| 257 | QuicByteCount current_cwnd = initial_cwnd_packets * kDefaultTCPMSS; |
| 258 | const QuicTime::Delta rtt_min = 350 * one_ms_; |
| 259 | |
| 260 | // Initialize the epoch |
| 261 | clock_.AdvanceTime(one_ms_); |
| 262 | // Keep track of the growth of the reno-equivalent cwnd. |
| 263 | QuicByteCount reno_cwnd = RenoCwndInBytes(current_cwnd); |
| 264 | current_cwnd = cubic_.CongestionWindowAfterAck( |
| 265 | kDefaultTCPMSS, current_cwnd, rtt_min, clock_.ApproximateNow()); |
| 266 | const QuicByteCount initial_cwnd = current_cwnd; |
| 267 | |
| 268 | // Simulate the return of cwnd packets in less than |
| 269 | // MaxCubicInterval() time. |
| 270 | const QuicPacketCount max_acks = initial_cwnd_packets / kNConnectionAlpha; |
| 271 | const QuicTime::Delta interval = QuicTime::Delta::FromMicroseconds( |
| 272 | MaxCubicTimeInterval().ToMicroseconds() / (max_acks + 1)); |
| 273 | |
| 274 | // In this scenario, the first increase is dictated by the cubic |
| 275 | // equation, but it is less than one byte, so the cwnd doesn't |
| 276 | // change. Normally, without per-ack increases, any cwnd plateau |
| 277 | // will cause the cwnd to be pinned for MaxCubicTimeInterval(). If |
| 278 | // we enable per-ack updates, the cwnd will continue to grow, |
| 279 | // regardless of the temporary plateau. |
| 280 | clock_.AdvanceTime(interval); |
| 281 | reno_cwnd = RenoCwndInBytes(reno_cwnd); |
| 282 | ASSERT_EQ(current_cwnd, |
| 283 | cubic_.CongestionWindowAfterAck(kDefaultTCPMSS, current_cwnd, |
| 284 | rtt_min, clock_.ApproximateNow())); |
| 285 | for (QuicPacketCount i = 1; i < max_acks; ++i) { |
| 286 | clock_.AdvanceTime(interval); |
| 287 | const QuicByteCount next_cwnd = cubic_.CongestionWindowAfterAck( |
| 288 | kDefaultTCPMSS, current_cwnd, rtt_min, clock_.ApproximateNow()); |
| 289 | reno_cwnd = RenoCwndInBytes(reno_cwnd); |
| 290 | // The window shoud increase on every ack. |
| 291 | ASSERT_LT(current_cwnd, next_cwnd); |
| 292 | ASSERT_EQ(reno_cwnd, next_cwnd); |
| 293 | current_cwnd = next_cwnd; |
| 294 | } |
| 295 | |
| 296 | // After all the acks are returned from the epoch, we expect the |
| 297 | // cwnd to have increased by nearly one packet. (Not exactly one |
| 298 | // packet, because our byte-wise Reno algorithm is always a slight |
| 299 | // under-estimation). Without per-ack updates, the current_cwnd |
| 300 | // would otherwise be unchanged. |
| 301 | const QuicByteCount minimum_expected_increase = kDefaultTCPMSS * .9; |
| 302 | EXPECT_LT(minimum_expected_increase + initial_cwnd, current_cwnd); |
| 303 | } |
| 304 | |
| 305 | TEST_F(CubicBytesTest, LossEvents) { |
| 306 | const QuicTime::Delta rtt_min = hundred_ms_; |
| 307 | QuicByteCount current_cwnd = 422 * kDefaultTCPMSS; |
| 308 | // Without the signed-integer, cubic-convex fix, we mistakenly |
| 309 | // increment cwnd after only one_ms_ and a single ack. |
| 310 | QuicPacketCount expected_cwnd = RenoCwndInBytes(current_cwnd); |
| 311 | // Initialize the state. |
| 312 | clock_.AdvanceTime(one_ms_); |
| 313 | EXPECT_EQ(expected_cwnd, |
| 314 | cubic_.CongestionWindowAfterAck(kDefaultTCPMSS, current_cwnd, |
| 315 | rtt_min, clock_.ApproximateNow())); |
| 316 | |
| 317 | // On the first loss, the last max congestion window is set to the |
| 318 | // congestion window before the loss. |
| 319 | QuicByteCount pre_loss_cwnd = current_cwnd; |
| 320 | ASSERT_EQ(0u, LastMaxCongestionWindow()); |
| 321 | expected_cwnd = static_cast<QuicByteCount>(current_cwnd * kNConnectionBeta); |
| 322 | EXPECT_EQ(expected_cwnd, |
| 323 | cubic_.CongestionWindowAfterPacketLoss(current_cwnd)); |
| 324 | ASSERT_EQ(pre_loss_cwnd, LastMaxCongestionWindow()); |
| 325 | current_cwnd = expected_cwnd; |
| 326 | |
| 327 | // On the second loss, the current congestion window has not yet |
| 328 | // reached the last max congestion window. The last max congestion |
| 329 | // window will be reduced by an additional backoff factor to allow |
| 330 | // for competition. |
| 331 | pre_loss_cwnd = current_cwnd; |
| 332 | expected_cwnd = static_cast<QuicByteCount>(current_cwnd * kNConnectionBeta); |
| 333 | ASSERT_EQ(expected_cwnd, |
| 334 | cubic_.CongestionWindowAfterPacketLoss(current_cwnd)); |
| 335 | current_cwnd = expected_cwnd; |
| 336 | EXPECT_GT(pre_loss_cwnd, LastMaxCongestionWindow()); |
| 337 | QuicByteCount expected_last_max = |
| 338 | static_cast<QuicByteCount>(pre_loss_cwnd * kNConnectionBetaLastMax); |
| 339 | EXPECT_EQ(expected_last_max, LastMaxCongestionWindow()); |
| 340 | EXPECT_LT(expected_cwnd, LastMaxCongestionWindow()); |
| 341 | // Simulate an increase, and check that we are below the origin. |
| 342 | current_cwnd = cubic_.CongestionWindowAfterAck( |
| 343 | kDefaultTCPMSS, current_cwnd, rtt_min, clock_.ApproximateNow()); |
| 344 | EXPECT_GT(LastMaxCongestionWindow(), current_cwnd); |
| 345 | |
| 346 | // On the final loss, simulate the condition where the congestion |
| 347 | // window had a chance to grow nearly to the last congestion window. |
| 348 | current_cwnd = LastMaxCongestionWindow() - 1; |
| 349 | pre_loss_cwnd = current_cwnd; |
| 350 | expected_cwnd = static_cast<QuicByteCount>(current_cwnd * kNConnectionBeta); |
| 351 | EXPECT_EQ(expected_cwnd, |
| 352 | cubic_.CongestionWindowAfterPacketLoss(current_cwnd)); |
| 353 | expected_last_max = pre_loss_cwnd; |
| 354 | ASSERT_EQ(expected_last_max, LastMaxCongestionWindow()); |
| 355 | } |
| 356 | |
| 357 | TEST_F(CubicBytesTest, BelowOrigin) { |
| 358 | // Concave growth. |
| 359 | const QuicTime::Delta rtt_min = hundred_ms_; |
| 360 | QuicByteCount current_cwnd = 422 * kDefaultTCPMSS; |
| 361 | // Without the signed-integer, cubic-convex fix, we mistakenly |
| 362 | // increment cwnd after only one_ms_ and a single ack. |
| 363 | QuicPacketCount expected_cwnd = RenoCwndInBytes(current_cwnd); |
| 364 | // Initialize the state. |
| 365 | clock_.AdvanceTime(one_ms_); |
| 366 | EXPECT_EQ(expected_cwnd, |
| 367 | cubic_.CongestionWindowAfterAck(kDefaultTCPMSS, current_cwnd, |
| 368 | rtt_min, clock_.ApproximateNow())); |
| 369 | expected_cwnd = static_cast<QuicPacketCount>(current_cwnd * kNConnectionBeta); |
| 370 | EXPECT_EQ(expected_cwnd, |
| 371 | cubic_.CongestionWindowAfterPacketLoss(current_cwnd)); |
| 372 | current_cwnd = expected_cwnd; |
| 373 | // First update after loss to initialize the epoch. |
| 374 | current_cwnd = cubic_.CongestionWindowAfterAck( |
| 375 | kDefaultTCPMSS, current_cwnd, rtt_min, clock_.ApproximateNow()); |
| 376 | // Cubic phase. |
| 377 | for (int i = 0; i < 40; ++i) { |
| 378 | clock_.AdvanceTime(hundred_ms_); |
| 379 | current_cwnd = cubic_.CongestionWindowAfterAck( |
| 380 | kDefaultTCPMSS, current_cwnd, rtt_min, clock_.ApproximateNow()); |
| 381 | } |
| 382 | expected_cwnd = 553632; |
| 383 | EXPECT_EQ(expected_cwnd, current_cwnd); |
| 384 | } |
| 385 | |
| 386 | } // namespace test |
| 387 | } // namespace quic |