Add QuicBidiTest cases for aggregation and small, frequent competing bursts.
Small, frequent spikes demonstrate a problem with GoogCC, as it doesn't compete
effectively for bandwidth--it repeatedly yields until it's sending at about 100
kbps. The competing flow (10 kb every 2 seconds) should only take ~40 kbps,
leaving ~260 kbps for the GoogCC flow. However, the bursty nature of the
competition seems to push out the Quartc/GoogCC flow.
BBR has a different problem with these spikes--it seems to result in very high
end-to-end delay (approximately 3 seconds).
Aggregation seems to be handled fairly well by both algorithms, but neither
handles it perfectly. Both see slight reduction in throughput and slight
increase in average/max one-way delay.
GoogCC: http://sponge/00221ae4-e916-4277-b1c5-e2a0509d8128
BBR: http://sponge/18e0ab84-0005-4cf6-b2a3-b133f40d9cd8
gfe-relnote: n/a (Quartc test only)
PiperOrigin-RevId: 247069192
Change-Id: I2cda25e404e533f89cd8a4b0a44d8b9df142431d
diff --git a/quic/quartc/test/quartc_bidi_test.cc b/quic/quartc/test/quartc_bidi_test.cc
index 87e4cd2..93dee2e 100644
--- a/quic/quartc/test/quartc_bidi_test.cc
+++ b/quic/quartc/test/quartc_bidi_test.cc
@@ -155,6 +155,40 @@
EXPECT_TRUE(runner.RunTest(QuicTime::Delta::FromSeconds(30)));
}
+TEST_F(QuartcBidiTest, 300kbps200ms2PercentLossSmallCompetingSpikes) {
+ QuicBandwidth bandwidth = QuicBandwidth::FromKBitsPerSecond(300);
+ CreateTransports(bandwidth, QuicTime::Delta::FromMilliseconds(200),
+ 10 * quic::kDefaultMaxPacketSize, /*loss_percent=*/2);
+
+ // Competition sends a small amount of data (10 kb) every 2 seconds.
+ SetupCompetingEndpoints(bandwidth, QuicTime::Delta::FromSeconds(2),
+ /*bytes_per_interval=*/10 * 1024);
+
+ quic::test::BidiTestRunner runner(&simulator_, client_transport_.get(),
+ server_transport_.get());
+ runner.set_client_interceptor(client_trace_interceptor_.get());
+ runner.set_server_interceptor(server_trace_interceptor_.get());
+ EXPECT_TRUE(runner.RunTest(QuicTime::Delta::FromSeconds(30)));
+}
+
+TEST_F(QuartcBidiTest, 300kbps200ms2PercentLossAggregation) {
+ QuicBandwidth bandwidth = QuicBandwidth::FromKBitsPerSecond(300);
+ CreateTransports(bandwidth, QuicTime::Delta::FromMilliseconds(200),
+ 10 * quic::kDefaultMaxPacketSize, /*loss_percent=*/2);
+
+ // Set aggregation on the queues at either end of the bottleneck.
+ client_switch_->port_queue(2)->EnableAggregation(
+ 10 * 1024, QuicTime::Delta::FromMilliseconds(100));
+ server_switch_->port_queue(2)->EnableAggregation(
+ 10 * 1024, QuicTime::Delta::FromMilliseconds(100));
+
+ quic::test::BidiTestRunner runner(&simulator_, client_transport_.get(),
+ server_transport_.get());
+ runner.set_client_interceptor(client_trace_interceptor_.get());
+ runner.set_server_interceptor(server_trace_interceptor_.get());
+ EXPECT_TRUE(runner.RunTest(QuicTime::Delta::FromSeconds(30)));
+}
+
} // namespace
} // namespace test
} // namespace quic