Project import generated by Copybara.
PiperOrigin-RevId: 237361882
Change-Id: I109a68f44db867b20f8c6a7732b0ce657133e52a
diff --git a/quic/core/congestion_control/bandwidth_sampler.cc b/quic/core/congestion_control/bandwidth_sampler.cc
new file mode 100644
index 0000000..c60eaa2
--- /dev/null
+++ b/quic/core/congestion_control/bandwidth_sampler.cc
@@ -0,0 +1,183 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/third_party/quiche/src/quic/core/congestion_control/bandwidth_sampler.h"
+
+#include <algorithm>
+
+#include "net/third_party/quiche/src/quic/platform/api/quic_bug_tracker.h"
+#include "net/third_party/quiche/src/quic/platform/api/quic_flag_utils.h"
+#include "net/third_party/quiche/src/quic/platform/api/quic_flags.h"
+#include "net/third_party/quiche/src/quic/platform/api/quic_logging.h"
+
+namespace quic {
+BandwidthSampler::BandwidthSampler()
+ : total_bytes_sent_(0),
+ total_bytes_acked_(0),
+ total_bytes_sent_at_last_acked_packet_(0),
+ last_acked_packet_sent_time_(QuicTime::Zero()),
+ last_acked_packet_ack_time_(QuicTime::Zero()),
+ is_app_limited_(false),
+ connection_state_map_() {}
+
+BandwidthSampler::~BandwidthSampler() {}
+
+void BandwidthSampler::OnPacketSent(
+ QuicTime sent_time,
+ QuicPacketNumber packet_number,
+ QuicByteCount bytes,
+ QuicByteCount bytes_in_flight,
+ HasRetransmittableData has_retransmittable_data) {
+ last_sent_packet_ = packet_number;
+
+ if (has_retransmittable_data != HAS_RETRANSMITTABLE_DATA) {
+ return;
+ }
+
+ total_bytes_sent_ += bytes;
+
+ // If there are no packets in flight, the time at which the new transmission
+ // opens can be treated as the A_0 point for the purpose of bandwidth
+ // sampling. This underestimates bandwidth to some extent, and produces some
+ // artificially low samples for most packets in flight, but it provides with
+ // samples at important points where we would not have them otherwise, most
+ // importantly at the beginning of the connection.
+ if (bytes_in_flight == 0) {
+ last_acked_packet_ack_time_ = sent_time;
+ total_bytes_sent_at_last_acked_packet_ = total_bytes_sent_;
+
+ // In this situation ack compression is not a concern, set send rate to
+ // effectively infinite.
+ last_acked_packet_sent_time_ = sent_time;
+ }
+
+ if (!connection_state_map_.IsEmpty() &&
+ packet_number >
+ connection_state_map_.last_packet() + kMaxTrackedPackets) {
+ QUIC_BUG << "BandwidthSampler in-flight packet map has exceeded maximum "
+ "number "
+ "of tracked packets.";
+ }
+
+ bool success =
+ connection_state_map_.Emplace(packet_number, sent_time, bytes, *this);
+ QUIC_BUG_IF(!success) << "BandwidthSampler failed to insert the packet "
+ "into the map, most likely because it's already "
+ "in it.";
+}
+
+BandwidthSample BandwidthSampler::OnPacketAcknowledged(
+ QuicTime ack_time,
+ QuicPacketNumber packet_number) {
+ ConnectionStateOnSentPacket* sent_packet_pointer =
+ connection_state_map_.GetEntry(packet_number);
+ if (sent_packet_pointer == nullptr) {
+ // See the TODO below.
+ return BandwidthSample();
+ }
+ BandwidthSample sample =
+ OnPacketAcknowledgedInner(ack_time, packet_number, *sent_packet_pointer);
+ connection_state_map_.Remove(packet_number);
+ return sample;
+}
+
+BandwidthSample BandwidthSampler::OnPacketAcknowledgedInner(
+ QuicTime ack_time,
+ QuicPacketNumber packet_number,
+ const ConnectionStateOnSentPacket& sent_packet) {
+ total_bytes_acked_ += sent_packet.size;
+ total_bytes_sent_at_last_acked_packet_ = sent_packet.total_bytes_sent;
+ last_acked_packet_sent_time_ = sent_packet.sent_time;
+ last_acked_packet_ack_time_ = ack_time;
+
+ // Exit app-limited phase once a packet that was sent while the connection is
+ // not app-limited is acknowledged.
+ if (is_app_limited_ && packet_number > end_of_app_limited_phase_) {
+ is_app_limited_ = false;
+ }
+
+ // There might have been no packets acknowledged at the moment when the
+ // current packet was sent. In that case, there is no bandwidth sample to
+ // make.
+ if (sent_packet.last_acked_packet_sent_time == QuicTime::Zero()) {
+ return BandwidthSample();
+ }
+
+ // Infinite rate indicates that the sampler is supposed to discard the
+ // current send rate sample and use only the ack rate.
+ QuicBandwidth send_rate = QuicBandwidth::Infinite();
+ if (sent_packet.sent_time > sent_packet.last_acked_packet_sent_time) {
+ send_rate = QuicBandwidth::FromBytesAndTimeDelta(
+ sent_packet.total_bytes_sent -
+ sent_packet.total_bytes_sent_at_last_acked_packet,
+ sent_packet.sent_time - sent_packet.last_acked_packet_sent_time);
+ }
+
+ // During the slope calculation, ensure that ack time of the current packet is
+ // always larger than the time of the previous packet, otherwise division by
+ // zero or integer underflow can occur.
+ if (ack_time <= sent_packet.last_acked_packet_ack_time) {
+ // TODO(wub): Compare this code count before and after fixing clock jitter
+ // issue.
+ if (sent_packet.last_acked_packet_ack_time == sent_packet.sent_time) {
+ // This is the 1st packet after quiescense.
+ QUIC_CODE_COUNT_N(quic_prev_ack_time_larger_than_current_ack_time, 1, 2);
+ } else {
+ QUIC_CODE_COUNT_N(quic_prev_ack_time_larger_than_current_ack_time, 2, 2);
+ }
+ QUIC_LOG(ERROR) << "Time of the previously acked packet:"
+ << sent_packet.last_acked_packet_ack_time.ToDebuggingValue()
+ << " is larger than the ack time of the current packet:"
+ << ack_time.ToDebuggingValue();
+ return BandwidthSample();
+ }
+ QuicBandwidth ack_rate = QuicBandwidth::FromBytesAndTimeDelta(
+ total_bytes_acked_ -
+ sent_packet.total_bytes_acked_at_the_last_acked_packet,
+ ack_time - sent_packet.last_acked_packet_ack_time);
+
+ BandwidthSample sample;
+ sample.bandwidth = std::min(send_rate, ack_rate);
+ // Note: this sample does not account for delayed acknowledgement time. This
+ // means that the RTT measurements here can be artificially high, especially
+ // on low bandwidth connections.
+ sample.rtt = ack_time - sent_packet.sent_time;
+ // A sample is app-limited if the packet was sent during the app-limited
+ // phase.
+ sample.is_app_limited = sent_packet.is_app_limited;
+ return sample;
+}
+
+void BandwidthSampler::OnPacketLost(QuicPacketNumber packet_number) {
+ // TODO(vasilvv): see the comment for the case of missing packets in
+ // BandwidthSampler::OnPacketAcknowledged on why this does not raise a
+ // QUIC_BUG when removal fails.
+ connection_state_map_.Remove(packet_number);
+}
+
+void BandwidthSampler::OnAppLimited() {
+ is_app_limited_ = true;
+ end_of_app_limited_phase_ = last_sent_packet_;
+}
+
+void BandwidthSampler::RemoveObsoletePackets(QuicPacketNumber least_unacked) {
+ while (!connection_state_map_.IsEmpty() &&
+ connection_state_map_.first_packet() < least_unacked) {
+ connection_state_map_.Remove(connection_state_map_.first_packet());
+ }
+}
+
+QuicByteCount BandwidthSampler::total_bytes_acked() const {
+ return total_bytes_acked_;
+}
+
+bool BandwidthSampler::is_app_limited() const {
+ return is_app_limited_;
+}
+
+QuicPacketNumber BandwidthSampler::end_of_app_limited_phase() const {
+ return end_of_app_limited_phase_;
+}
+
+} // namespace quic
diff --git a/quic/core/congestion_control/bandwidth_sampler.h b/quic/core/congestion_control/bandwidth_sampler.h
new file mode 100644
index 0000000..69aaae7
--- /dev/null
+++ b/quic/core/congestion_control/bandwidth_sampler.h
@@ -0,0 +1,294 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef QUICHE_QUIC_CORE_CONGESTION_CONTROL_BANDWIDTH_SAMPLER_H_
+#define QUICHE_QUIC_CORE_CONGESTION_CONTROL_BANDWIDTH_SAMPLER_H_
+
+#include "net/third_party/quiche/src/quic/core/packet_number_indexed_queue.h"
+#include "net/third_party/quiche/src/quic/core/quic_bandwidth.h"
+#include "net/third_party/quiche/src/quic/core/quic_packets.h"
+#include "net/third_party/quiche/src/quic/core/quic_time.h"
+#include "net/third_party/quiche/src/quic/platform/api/quic_export.h"
+
+namespace quic {
+
+namespace test {
+class BandwidthSamplerPeer;
+} // namespace test
+
+struct QUIC_EXPORT_PRIVATE BandwidthSample {
+ // The bandwidth at that particular sample. Zero if no valid bandwidth sample
+ // is available.
+ QuicBandwidth bandwidth;
+
+ // The RTT measurement at this particular sample. Zero if no RTT sample is
+ // available. Does not correct for delayed ack time.
+ QuicTime::Delta rtt;
+
+ // Indicates whether the sample might be artificially low because the sender
+ // did not have enough data to send in order to saturate the link.
+ bool is_app_limited;
+
+ BandwidthSample()
+ : bandwidth(QuicBandwidth::Zero()),
+ rtt(QuicTime::Delta::Zero()),
+ is_app_limited(false) {}
+};
+
+// An interface common to any class that can provide bandwidth samples from the
+// information per individual acknowledged packet.
+class QUIC_EXPORT_PRIVATE BandwidthSamplerInterface {
+ public:
+ virtual ~BandwidthSamplerInterface() {}
+
+ // Inputs the sent packet information into the sampler. Assumes that all
+ // packets are sent in order. The information about the packet will not be
+ // released from the sampler until it the packet is either acknowledged or
+ // declared lost.
+ virtual void OnPacketSent(
+ QuicTime sent_time,
+ QuicPacketNumber packet_number,
+ QuicByteCount bytes,
+ QuicByteCount bytes_in_flight,
+ HasRetransmittableData has_retransmittable_data) = 0;
+
+ // Notifies the sampler that the |packet_number| is acknowledged. Returns a
+ // bandwidth sample. If no bandwidth sample is available,
+ // QuicBandwidth::Zero() is returned.
+ virtual BandwidthSample OnPacketAcknowledged(
+ QuicTime ack_time,
+ QuicPacketNumber packet_number) = 0;
+
+ // Informs the sampler that a packet is considered lost and it should no
+ // longer keep track of it.
+ virtual void OnPacketLost(QuicPacketNumber packet_number) = 0;
+
+ // Informs the sampler that the connection is currently app-limited, causing
+ // the sampler to enter the app-limited phase. The phase will expire by
+ // itself.
+ virtual void OnAppLimited() = 0;
+
+ // Remove all the packets lower than the specified packet number.
+ virtual void RemoveObsoletePackets(QuicPacketNumber least_unacked) = 0;
+
+ // Total number of bytes currently acknowledged by the receiver.
+ virtual QuicByteCount total_bytes_acked() const = 0;
+
+ // Application-limited information exported for debugging.
+ virtual bool is_app_limited() const = 0;
+ virtual QuicPacketNumber end_of_app_limited_phase() const = 0;
+};
+
+// BandwidthSampler keeps track of sent and acknowledged packets and outputs a
+// bandwidth sample for every packet acknowledged. The samples are taken for
+// individual packets, and are not filtered; the consumer has to filter the
+// bandwidth samples itself. In certain cases, the sampler will locally severely
+// underestimate the bandwidth, hence a maximum filter with a size of at least
+// one RTT is recommended.
+//
+// This class bases its samples on the slope of two curves: the number of bytes
+// sent over time, and the number of bytes acknowledged as received over time.
+// It produces a sample of both slopes for every packet that gets acknowledged,
+// based on a slope between two points on each of the corresponding curves. Note
+// that due to the packet loss, the number of bytes on each curve might get
+// further and further away from each other, meaning that it is not feasible to
+// compare byte values coming from different curves with each other.
+//
+// The obvious points for measuring slope sample are the ones corresponding to
+// the packet that was just acknowledged. Let us denote them as S_1 (point at
+// which the current packet was sent) and A_1 (point at which the current packet
+// was acknowledged). However, taking a slope requires two points on each line,
+// so estimating bandwidth requires picking a packet in the past with respect to
+// which the slope is measured.
+//
+// For that purpose, BandwidthSampler always keeps track of the most recently
+// acknowledged packet, and records it together with every outgoing packet.
+// When a packet gets acknowledged (A_1), it has not only information about when
+// it itself was sent (S_1), but also the information about the latest
+// acknowledged packet right before it was sent (S_0 and A_0).
+//
+// Based on that data, send and ack rate are estimated as:
+// send_rate = (bytes(S_1) - bytes(S_0)) / (time(S_1) - time(S_0))
+// ack_rate = (bytes(A_1) - bytes(A_0)) / (time(A_1) - time(A_0))
+//
+// Here, the ack rate is intuitively the rate we want to treat as bandwidth.
+// However, in certain cases (e.g. ack compression) the ack rate at a point may
+// end up higher than the rate at which the data was originally sent, which is
+// not indicative of the real bandwidth. Hence, we use the send rate as an upper
+// bound, and the sample value is
+// rate_sample = min(send_rate, ack_rate)
+//
+// An important edge case handled by the sampler is tracking the app-limited
+// samples. There are multiple meaning of "app-limited" used interchangeably,
+// hence it is important to understand and to be able to distinguish between
+// them.
+//
+// Meaning 1: connection state. The connection is said to be app-limited when
+// there is no outstanding data to send. This means that certain bandwidth
+// samples in the future would not be an accurate indication of the link
+// capacity, and it is important to inform consumer about that. Whenever
+// connection becomes app-limited, the sampler is notified via OnAppLimited()
+// method.
+//
+// Meaning 2: a phase in the bandwidth sampler. As soon as the bandwidth
+// sampler becomes notified about the connection being app-limited, it enters
+// app-limited phase. In that phase, all *sent* packets are marked as
+// app-limited. Note that the connection itself does not have to be
+// app-limited during the app-limited phase, and in fact it will not be
+// (otherwise how would it send packets?). The boolean flag below indicates
+// whether the sampler is in that phase.
+//
+// Meaning 3: a flag on the sent packet and on the sample. If a sent packet is
+// sent during the app-limited phase, the resulting sample related to the
+// packet will be marked as app-limited.
+//
+// With the terminology issue out of the way, let us consider the question of
+// what kind of situation it addresses.
+//
+// Consider a scenario where we first send packets 1 to 20 at a regular
+// bandwidth, and then immediately run out of data. After a few seconds, we send
+// packets 21 to 60, and only receive ack for 21 between sending packets 40 and
+// 41. In this case, when we sample bandwidth for packets 21 to 40, the S_0/A_0
+// we use to compute the slope is going to be packet 20, a few seconds apart
+// from the current packet, hence the resulting estimate would be extremely low
+// and not indicative of anything. Only at packet 41 the S_0/A_0 will become 21,
+// meaning that the bandwidth sample would exclude the quiescence.
+//
+// Based on the analysis of that scenario, we implement the following rule: once
+// OnAppLimited() is called, all sent packets will produce app-limited samples
+// up until an ack for a packet that was sent after OnAppLimited() was called.
+// Note that while the scenario above is not the only scenario when the
+// connection is app-limited, the approach works in other cases too.
+class QUIC_EXPORT_PRIVATE BandwidthSampler : public BandwidthSamplerInterface {
+ public:
+ BandwidthSampler();
+ ~BandwidthSampler() override;
+
+ void OnPacketSent(QuicTime sent_time,
+ QuicPacketNumber packet_number,
+ QuicByteCount bytes,
+ QuicByteCount bytes_in_flight,
+ HasRetransmittableData has_retransmittable_data) override;
+ BandwidthSample OnPacketAcknowledged(QuicTime ack_time,
+ QuicPacketNumber packet_number) override;
+ void OnPacketLost(QuicPacketNumber packet_number) override;
+
+ void OnAppLimited() override;
+
+ void RemoveObsoletePackets(QuicPacketNumber least_unacked) override;
+
+ QuicByteCount total_bytes_acked() const override;
+ bool is_app_limited() const override;
+ QuicPacketNumber end_of_app_limited_phase() const override;
+
+ private:
+ friend class test::BandwidthSamplerPeer;
+
+ // ConnectionStateOnSentPacket represents the information about a sent packet
+ // and the state of the connection at the moment the packet was sent,
+ // specifically the information about the most recently acknowledged packet at
+ // that moment.
+ struct ConnectionStateOnSentPacket {
+ // Time at which the packet is sent.
+ QuicTime sent_time;
+
+ // Size of the packet.
+ QuicByteCount size;
+
+ // The value of |total_bytes_sent_| at the time the packet was sent.
+ // Includes the packet itself.
+ QuicByteCount total_bytes_sent;
+
+ // The value of |total_bytes_sent_at_last_acked_packet_| at the time the
+ // packet was sent.
+ QuicByteCount total_bytes_sent_at_last_acked_packet;
+
+ // The value of |last_acked_packet_sent_time_| at the time the packet was
+ // sent.
+ QuicTime last_acked_packet_sent_time;
+
+ // The value of |last_acked_packet_ack_time_| at the time the packet was
+ // sent.
+ QuicTime last_acked_packet_ack_time;
+
+ // The value of |total_bytes_acked_| at the time the packet was
+ // sent.
+ QuicByteCount total_bytes_acked_at_the_last_acked_packet;
+
+ // The value of |is_app_limited_| at the time the packet was
+ // sent.
+ bool is_app_limited;
+
+ // Snapshot constructor. Records the current state of the bandwidth
+ // sampler.
+ ConnectionStateOnSentPacket(QuicTime sent_time,
+ QuicByteCount size,
+ const BandwidthSampler& sampler)
+ : sent_time(sent_time),
+ size(size),
+ total_bytes_sent(sampler.total_bytes_sent_),
+ total_bytes_sent_at_last_acked_packet(
+ sampler.total_bytes_sent_at_last_acked_packet_),
+ last_acked_packet_sent_time(sampler.last_acked_packet_sent_time_),
+ last_acked_packet_ack_time(sampler.last_acked_packet_ack_time_),
+ total_bytes_acked_at_the_last_acked_packet(
+ sampler.total_bytes_acked_),
+ is_app_limited(sampler.is_app_limited_) {}
+
+ // Default constructor. Required to put this structure into
+ // PacketNumberIndexedQueue.
+ ConnectionStateOnSentPacket()
+ : sent_time(QuicTime::Zero()),
+ size(0),
+ total_bytes_sent(0),
+ total_bytes_sent_at_last_acked_packet(0),
+ last_acked_packet_sent_time(QuicTime::Zero()),
+ last_acked_packet_ack_time(QuicTime::Zero()),
+ total_bytes_acked_at_the_last_acked_packet(0),
+ is_app_limited(false) {}
+ };
+
+ // The total number of congestion controlled bytes sent during the connection.
+ QuicByteCount total_bytes_sent_;
+
+ // The total number of congestion controlled bytes which were acknowledged.
+ QuicByteCount total_bytes_acked_;
+
+ // The value of |total_bytes_sent_| at the time the last acknowledged packet
+ // was sent. Valid only when |last_acked_packet_sent_time_| is valid.
+ QuicByteCount total_bytes_sent_at_last_acked_packet_;
+
+ // The time at which the last acknowledged packet was sent. Set to
+ // QuicTime::Zero() if no valid timestamp is available.
+ QuicTime last_acked_packet_sent_time_;
+
+ // The time at which the most recent packet was acknowledged.
+ QuicTime last_acked_packet_ack_time_;
+
+ // The most recently sent packet.
+ QuicPacketNumber last_sent_packet_;
+
+ // Indicates whether the bandwidth sampler is currently in an app-limited
+ // phase.
+ bool is_app_limited_;
+
+ // The packet that will be acknowledged after this one will cause the sampler
+ // to exit the app-limited phase.
+ QuicPacketNumber end_of_app_limited_phase_;
+
+ // Record of the connection state at the point where each packet in flight was
+ // sent, indexed by the packet number.
+ PacketNumberIndexedQueue<ConnectionStateOnSentPacket> connection_state_map_;
+
+ // Handles the actual bandwidth calculations, whereas the outer method handles
+ // retrieving and removing |sent_packet|.
+ BandwidthSample OnPacketAcknowledgedInner(
+ QuicTime ack_time,
+ QuicPacketNumber packet_number,
+ const ConnectionStateOnSentPacket& sent_packet);
+};
+
+} // namespace quic
+
+#endif // QUICHE_QUIC_CORE_CONGESTION_CONTROL_BANDWIDTH_SAMPLER_H_
diff --git a/quic/core/congestion_control/bandwidth_sampler_test.cc b/quic/core/congestion_control/bandwidth_sampler_test.cc
new file mode 100644
index 0000000..e9b74c7
--- /dev/null
+++ b/quic/core/congestion_control/bandwidth_sampler_test.cc
@@ -0,0 +1,398 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/third_party/quiche/src/quic/core/congestion_control/bandwidth_sampler.h"
+
+#include "net/third_party/quiche/src/quic/platform/api/quic_flags.h"
+#include "net/third_party/quiche/src/quic/platform/api/quic_test.h"
+#include "net/third_party/quiche/src/quic/test_tools/mock_clock.h"
+
+namespace quic {
+namespace test {
+
+class BandwidthSamplerPeer {
+ public:
+ static size_t GetNumberOfTrackedPackets(const BandwidthSampler& sampler) {
+ return sampler.connection_state_map_.number_of_present_entries();
+ }
+
+ static QuicByteCount GetPacketSize(const BandwidthSampler& sampler,
+ QuicPacketNumber packet_number) {
+ return sampler.connection_state_map_.GetEntry(packet_number)->size;
+ }
+};
+
+const QuicByteCount kRegularPacketSize = 1280;
+// Enforce divisibility for some of the tests.
+static_assert((kRegularPacketSize & 31) == 0,
+ "kRegularPacketSize has to be five times divisible by 2");
+
+// A test fixture with utility methods for BandwidthSampler tests.
+class BandwidthSamplerTest : public QuicTest {
+ protected:
+ BandwidthSamplerTest() : bytes_in_flight_(0) {
+ // Ensure that the clock does not start at zero.
+ clock_.AdvanceTime(QuicTime::Delta::FromSeconds(1));
+ }
+
+ MockClock clock_;
+ BandwidthSampler sampler_;
+ QuicByteCount bytes_in_flight_;
+
+ void SendPacketInner(uint64_t packet_number,
+ QuicByteCount bytes,
+ HasRetransmittableData has_retransmittable_data) {
+ sampler_.OnPacketSent(clock_.Now(), QuicPacketNumber(packet_number), bytes,
+ bytes_in_flight_, has_retransmittable_data);
+ if (has_retransmittable_data == HAS_RETRANSMITTABLE_DATA) {
+ bytes_in_flight_ += bytes;
+ }
+ }
+
+ void SendPacket(uint64_t packet_number) {
+ SendPacketInner(packet_number, kRegularPacketSize,
+ HAS_RETRANSMITTABLE_DATA);
+ }
+
+ BandwidthSample AckPacketInner(uint64_t packet_number) {
+ QuicByteCount size = BandwidthSamplerPeer::GetPacketSize(
+ sampler_, QuicPacketNumber(packet_number));
+ bytes_in_flight_ -= size;
+ return sampler_.OnPacketAcknowledged(clock_.Now(),
+ QuicPacketNumber(packet_number));
+ }
+
+ // Acknowledge receipt of a packet and expect it to be not app-limited.
+ QuicBandwidth AckPacket(uint64_t packet_number) {
+ BandwidthSample sample = AckPacketInner(packet_number);
+ EXPECT_FALSE(sample.is_app_limited);
+ return sample.bandwidth;
+ }
+
+ void LosePacket(uint64_t packet_number) {
+ QuicByteCount size = BandwidthSamplerPeer::GetPacketSize(
+ sampler_, QuicPacketNumber(packet_number));
+ bytes_in_flight_ -= size;
+ sampler_.OnPacketLost(QuicPacketNumber(packet_number));
+ }
+
+ // Sends one packet and acks it. Then, send 20 packets. Finally, send
+ // another 20 packets while acknowledging previous 20.
+ void Send40PacketsAndAckFirst20(QuicTime::Delta time_between_packets) {
+ // Send 20 packets at a constant inter-packet time.
+ for (int i = 1; i <= 20; i++) {
+ SendPacket(i);
+ clock_.AdvanceTime(time_between_packets);
+ }
+
+ // Ack packets 1 to 20, while sending new packets at the same rate as
+ // before.
+ for (int i = 1; i <= 20; i++) {
+ AckPacket(i);
+ SendPacket(i + 20);
+ clock_.AdvanceTime(time_between_packets);
+ }
+ }
+};
+
+// Test the sampler in a simple stop-and-wait sender setting.
+TEST_F(BandwidthSamplerTest, SendAndWait) {
+ QuicTime::Delta time_between_packets = QuicTime::Delta::FromMilliseconds(10);
+ QuicBandwidth expected_bandwidth =
+ QuicBandwidth::FromBytesPerSecond(kRegularPacketSize * 100);
+
+ // Send packets at the constant bandwidth.
+ for (int i = 1; i < 20; i++) {
+ SendPacket(i);
+ clock_.AdvanceTime(time_between_packets);
+ QuicBandwidth current_sample = AckPacket(i);
+ EXPECT_EQ(expected_bandwidth, current_sample);
+ }
+
+ // Send packets at the exponentially decreasing bandwidth.
+ for (int i = 20; i < 25; i++) {
+ time_between_packets = time_between_packets * 2;
+ expected_bandwidth = expected_bandwidth * 0.5;
+
+ SendPacket(i);
+ clock_.AdvanceTime(time_between_packets);
+ QuicBandwidth current_sample = AckPacket(i);
+ EXPECT_EQ(expected_bandwidth, current_sample);
+ }
+ EXPECT_EQ(0u, BandwidthSamplerPeer::GetNumberOfTrackedPackets(sampler_));
+ EXPECT_EQ(0u, bytes_in_flight_);
+}
+
+// Test the sampler during regular windowed sender scenario with fixed
+// CWND of 20.
+TEST_F(BandwidthSamplerTest, SendPaced) {
+ const QuicTime::Delta time_between_packets =
+ QuicTime::Delta::FromMilliseconds(1);
+ QuicBandwidth expected_bandwidth =
+ QuicBandwidth::FromKBytesPerSecond(kRegularPacketSize);
+
+ Send40PacketsAndAckFirst20(time_between_packets);
+
+ // Ack the packets 21 to 40, arriving at the correct bandwidth.
+ QuicBandwidth last_bandwidth = QuicBandwidth::Zero();
+ for (int i = 21; i <= 40; i++) {
+ last_bandwidth = AckPacket(i);
+ EXPECT_EQ(expected_bandwidth, last_bandwidth);
+ clock_.AdvanceTime(time_between_packets);
+ }
+ EXPECT_EQ(0u, BandwidthSamplerPeer::GetNumberOfTrackedPackets(sampler_));
+ EXPECT_EQ(0u, bytes_in_flight_);
+}
+
+// Test the sampler in a scenario where 50% of packets is consistently lost.
+TEST_F(BandwidthSamplerTest, SendWithLosses) {
+ const QuicTime::Delta time_between_packets =
+ QuicTime::Delta::FromMilliseconds(1);
+ QuicBandwidth expected_bandwidth =
+ QuicBandwidth::FromKBytesPerSecond(kRegularPacketSize) * 0.5;
+
+ // Send 20 packets, each 1 ms apart.
+ for (int i = 1; i <= 20; i++) {
+ SendPacket(i);
+ clock_.AdvanceTime(time_between_packets);
+ }
+
+ // Ack packets 1 to 20, losing every even-numbered packet, while sending new
+ // packets at the same rate as before.
+ for (int i = 1; i <= 20; i++) {
+ if (i % 2 == 0) {
+ AckPacket(i);
+ } else {
+ LosePacket(i);
+ }
+ SendPacket(i + 20);
+ clock_.AdvanceTime(time_between_packets);
+ }
+
+ // Ack the packets 21 to 40 with the same loss pattern.
+ QuicBandwidth last_bandwidth = QuicBandwidth::Zero();
+ for (int i = 21; i <= 40; i++) {
+ if (i % 2 == 0) {
+ last_bandwidth = AckPacket(i);
+ EXPECT_EQ(expected_bandwidth, last_bandwidth);
+ } else {
+ LosePacket(i);
+ }
+ clock_.AdvanceTime(time_between_packets);
+ }
+ EXPECT_EQ(0u, BandwidthSamplerPeer::GetNumberOfTrackedPackets(sampler_));
+ EXPECT_EQ(0u, bytes_in_flight_);
+}
+
+// Test the sampler in a scenario where the 50% of packets are not
+// congestion controlled (specifically, non-retransmittable data is not
+// congestion controlled). Should be functionally consistent in behavior with
+// the SendWithLosses test.
+TEST_F(BandwidthSamplerTest, NotCongestionControlled) {
+ const QuicTime::Delta time_between_packets =
+ QuicTime::Delta::FromMilliseconds(1);
+ QuicBandwidth expected_bandwidth =
+ QuicBandwidth::FromKBytesPerSecond(kRegularPacketSize) * 0.5;
+
+ // Send 20 packets, each 1 ms apart. Every even packet is not congestion
+ // controlled.
+ for (int i = 1; i <= 20; i++) {
+ SendPacketInner(
+ i, kRegularPacketSize,
+ i % 2 == 0 ? HAS_RETRANSMITTABLE_DATA : NO_RETRANSMITTABLE_DATA);
+ clock_.AdvanceTime(time_between_packets);
+ }
+
+ // Ensure only congestion controlled packets are tracked.
+ EXPECT_EQ(10u, BandwidthSamplerPeer::GetNumberOfTrackedPackets(sampler_));
+
+ // Ack packets 2 to 21, ignoring every even-numbered packet, while sending new
+ // packets at the same rate as before.
+ for (int i = 1; i <= 20; i++) {
+ if (i % 2 == 0) {
+ AckPacket(i);
+ }
+ SendPacketInner(
+ i + 20, kRegularPacketSize,
+ i % 2 == 0 ? HAS_RETRANSMITTABLE_DATA : NO_RETRANSMITTABLE_DATA);
+ clock_.AdvanceTime(time_between_packets);
+ }
+
+ // Ack the packets 22 to 41 with the same congestion controlled pattern.
+ QuicBandwidth last_bandwidth = QuicBandwidth::Zero();
+ for (int i = 21; i <= 40; i++) {
+ if (i % 2 == 0) {
+ last_bandwidth = AckPacket(i);
+ EXPECT_EQ(expected_bandwidth, last_bandwidth);
+ }
+ clock_.AdvanceTime(time_between_packets);
+ }
+
+ // Since only congestion controlled packets are entered into the map, it has
+ // to be empty at this point.
+ EXPECT_EQ(0u, BandwidthSamplerPeer::GetNumberOfTrackedPackets(sampler_));
+ EXPECT_EQ(0u, bytes_in_flight_);
+}
+
+// Simulate a situation where ACKs arrive in burst and earlier than usual, thus
+// producing an ACK rate which is higher than the original send rate.
+TEST_F(BandwidthSamplerTest, CompressedAck) {
+ const QuicTime::Delta time_between_packets =
+ QuicTime::Delta::FromMilliseconds(1);
+ QuicBandwidth expected_bandwidth =
+ QuicBandwidth::FromKBytesPerSecond(kRegularPacketSize);
+
+ Send40PacketsAndAckFirst20(time_between_packets);
+
+ // Simulate an RTT somewhat lower than the one for 1-to-21 transmission.
+ clock_.AdvanceTime(time_between_packets * 15);
+
+ // Ack the packets 21 to 40 almost immediately at once.
+ QuicBandwidth last_bandwidth = QuicBandwidth::Zero();
+ QuicTime::Delta ridiculously_small_time_delta =
+ QuicTime::Delta::FromMicroseconds(20);
+ for (int i = 21; i <= 40; i++) {
+ last_bandwidth = AckPacket(i);
+ clock_.AdvanceTime(ridiculously_small_time_delta);
+ }
+ EXPECT_EQ(expected_bandwidth, last_bandwidth);
+ EXPECT_EQ(0u, BandwidthSamplerPeer::GetNumberOfTrackedPackets(sampler_));
+ EXPECT_EQ(0u, bytes_in_flight_);
+}
+
+// Tests receiving ACK packets in the reverse order.
+TEST_F(BandwidthSamplerTest, ReorderedAck) {
+ const QuicTime::Delta time_between_packets =
+ QuicTime::Delta::FromMilliseconds(1);
+ QuicBandwidth expected_bandwidth =
+ QuicBandwidth::FromKBytesPerSecond(kRegularPacketSize);
+
+ Send40PacketsAndAckFirst20(time_between_packets);
+
+ // Ack the packets 21 to 40 in the reverse order, while sending packets 41 to
+ // 60.
+ QuicBandwidth last_bandwidth = QuicBandwidth::Zero();
+ for (int i = 0; i < 20; i++) {
+ last_bandwidth = AckPacket(40 - i);
+ EXPECT_EQ(expected_bandwidth, last_bandwidth);
+ SendPacket(41 + i);
+ clock_.AdvanceTime(time_between_packets);
+ }
+
+ // Ack the packets 41 to 60, now in the regular order.
+ for (int i = 41; i <= 60; i++) {
+ last_bandwidth = AckPacket(i);
+ EXPECT_EQ(expected_bandwidth, last_bandwidth);
+ clock_.AdvanceTime(time_between_packets);
+ }
+ EXPECT_EQ(0u, BandwidthSamplerPeer::GetNumberOfTrackedPackets(sampler_));
+ EXPECT_EQ(0u, bytes_in_flight_);
+}
+
+// Test the app-limited logic.
+TEST_F(BandwidthSamplerTest, AppLimited) {
+ const QuicTime::Delta time_between_packets =
+ QuicTime::Delta::FromMilliseconds(1);
+ QuicBandwidth expected_bandwidth =
+ QuicBandwidth::FromKBytesPerSecond(kRegularPacketSize);
+
+ Send40PacketsAndAckFirst20(time_between_packets);
+
+ // We are now app-limited. Ack 21 to 40 as usual, but do not send anything for
+ // now.
+ sampler_.OnAppLimited();
+ for (int i = 21; i <= 40; i++) {
+ QuicBandwidth current_sample = AckPacket(i);
+ EXPECT_EQ(expected_bandwidth, current_sample);
+ clock_.AdvanceTime(time_between_packets);
+ }
+
+ // Enter quiescence.
+ clock_.AdvanceTime(QuicTime::Delta::FromSeconds(1));
+
+ // Send packets 41 to 60, all of which would be marked as app-limited.
+ for (int i = 41; i <= 60; i++) {
+ SendPacket(i);
+ clock_.AdvanceTime(time_between_packets);
+ }
+
+ // Ack packets 41 to 60, while sending packets 61 to 80. 41 to 60 should be
+ // app-limited and underestimate the bandwidth due to that.
+ for (int i = 41; i <= 60; i++) {
+ BandwidthSample sample = AckPacketInner(i);
+ EXPECT_TRUE(sample.is_app_limited);
+ EXPECT_LT(sample.bandwidth, 0.7f * expected_bandwidth);
+
+ SendPacket(i + 20);
+ clock_.AdvanceTime(time_between_packets);
+ }
+
+ // Run out of packets, and then ack packet 61 to 80, all of which should have
+ // correct non-app-limited samples.
+ for (int i = 61; i <= 80; i++) {
+ QuicBandwidth last_bandwidth = AckPacket(i);
+ EXPECT_EQ(expected_bandwidth, last_bandwidth);
+ clock_.AdvanceTime(time_between_packets);
+ }
+
+ EXPECT_EQ(0u, BandwidthSamplerPeer::GetNumberOfTrackedPackets(sampler_));
+ EXPECT_EQ(0u, bytes_in_flight_);
+}
+
+// Test the samples taken at the first flight of packets sent.
+TEST_F(BandwidthSamplerTest, FirstRoundTrip) {
+ const QuicTime::Delta time_between_packets =
+ QuicTime::Delta::FromMilliseconds(1);
+ const QuicTime::Delta rtt = QuicTime::Delta::FromMilliseconds(800);
+ const int num_packets = 10;
+ const QuicByteCount num_bytes = kRegularPacketSize * num_packets;
+ const QuicBandwidth real_bandwidth =
+ QuicBandwidth::FromBytesAndTimeDelta(num_bytes, rtt);
+
+ for (int i = 1; i <= 10; i++) {
+ SendPacket(i);
+ clock_.AdvanceTime(time_between_packets);
+ }
+
+ clock_.AdvanceTime(rtt - num_packets * time_between_packets);
+
+ QuicBandwidth last_sample = QuicBandwidth::Zero();
+ for (int i = 1; i <= 10; i++) {
+ QuicBandwidth sample = AckPacket(i);
+ EXPECT_GT(sample, last_sample);
+ last_sample = sample;
+ clock_.AdvanceTime(time_between_packets);
+ }
+
+ // The final measured sample for the first flight of sample is expected to be
+ // smaller than the real bandwidth, yet it should not lose more than 10%. The
+ // specific value of the error depends on the difference between the RTT and
+ // the time it takes to exhaust the congestion window (i.e. in the limit when
+ // all packets are sent simultaneously, last sample would indicate the real
+ // bandwidth).
+ EXPECT_LT(last_sample, real_bandwidth);
+ EXPECT_GT(last_sample, 0.9f * real_bandwidth);
+}
+
+// Test sampler's ability to remove obsolete packets.
+TEST_F(BandwidthSamplerTest, RemoveObsoletePackets) {
+ SendPacket(1);
+ SendPacket(2);
+ SendPacket(3);
+ SendPacket(4);
+ SendPacket(5);
+
+ clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(100));
+
+ EXPECT_EQ(5u, BandwidthSamplerPeer::GetNumberOfTrackedPackets(sampler_));
+ sampler_.RemoveObsoletePackets(QuicPacketNumber(4));
+ EXPECT_EQ(2u, BandwidthSamplerPeer::GetNumberOfTrackedPackets(sampler_));
+ sampler_.OnPacketLost(QuicPacketNumber(4));
+ EXPECT_EQ(1u, BandwidthSamplerPeer::GetNumberOfTrackedPackets(sampler_));
+ AckPacket(5);
+ EXPECT_EQ(0u, BandwidthSamplerPeer::GetNumberOfTrackedPackets(sampler_));
+}
+
+} // namespace test
+} // namespace quic
diff --git a/quic/core/congestion_control/bbr_sender.cc b/quic/core/congestion_control/bbr_sender.cc
new file mode 100644
index 0000000..6a7fe2b
--- /dev/null
+++ b/quic/core/congestion_control/bbr_sender.cc
@@ -0,0 +1,923 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/third_party/quiche/src/quic/core/congestion_control/bbr_sender.h"
+
+#include <algorithm>
+#include <sstream>
+
+#include "net/third_party/quiche/src/quic/core/congestion_control/rtt_stats.h"
+#include "net/third_party/quiche/src/quic/core/crypto/crypto_protocol.h"
+#include "net/third_party/quiche/src/quic/platform/api/quic_bug_tracker.h"
+#include "net/third_party/quiche/src/quic/platform/api/quic_fallthrough.h"
+#include "net/third_party/quiche/src/quic/platform/api/quic_flag_utils.h"
+#include "net/third_party/quiche/src/quic/platform/api/quic_flags.h"
+#include "net/third_party/quiche/src/quic/platform/api/quic_logging.h"
+#include "net/third_party/quiche/src/quic/platform/api/quic_string.h"
+
+namespace quic {
+
+namespace {
+// Constants based on TCP defaults.
+// The minimum CWND to ensure delayed acks don't reduce bandwidth measurements.
+// Does not inflate the pacing rate.
+const QuicByteCount kDefaultMinimumCongestionWindow = 4 * kMaxSegmentSize;
+
+// The gain used for the STARTUP, equal to 2/ln(2).
+const float kDefaultHighGain = 2.885f;
+// The newly derived gain for STARTUP, equal to 4 * ln(2)
+const float kDerivedHighGain = 2.773f;
+// The newly derived CWND gain for STARTUP, 2.
+const float kDerivedHighCWNDGain = 2.773f;
+// The gain used in STARTUP after loss has been detected.
+// 1.5 is enough to allow for 25% exogenous loss and still observe a 25% growth
+// in measured bandwidth.
+const float kStartupAfterLossGain = 1.5f;
+// The cycle of gains used during the PROBE_BW stage.
+const float kPacingGain[] = {1.25, 0.75, 1, 1, 1, 1, 1, 1};
+
+// The length of the gain cycle.
+const size_t kGainCycleLength = sizeof(kPacingGain) / sizeof(kPacingGain[0]);
+// The size of the bandwidth filter window, in round-trips.
+const QuicRoundTripCount kBandwidthWindowSize = kGainCycleLength + 2;
+
+// The time after which the current min_rtt value expires.
+const QuicTime::Delta kMinRttExpiry = QuicTime::Delta::FromSeconds(10);
+// The minimum time the connection can spend in PROBE_RTT mode.
+const QuicTime::Delta kProbeRttTime = QuicTime::Delta::FromMilliseconds(200);
+// If the bandwidth does not increase by the factor of |kStartupGrowthTarget|
+// within |kRoundTripsWithoutGrowthBeforeExitingStartup| rounds, the connection
+// will exit the STARTUP mode.
+const float kStartupGrowthTarget = 1.25;
+const QuicRoundTripCount kRoundTripsWithoutGrowthBeforeExitingStartup = 3;
+// Coefficient of target congestion window to use when basing PROBE_RTT on BDP.
+const float kModerateProbeRttMultiplier = 0.75;
+// Coefficient to determine if a new RTT is sufficiently similar to min_rtt that
+// we don't need to enter PROBE_RTT.
+const float kSimilarMinRttThreshold = 1.125;
+
+} // namespace
+
+BbrSender::DebugState::DebugState(const BbrSender& sender)
+ : mode(sender.mode_),
+ max_bandwidth(sender.max_bandwidth_.GetBest()),
+ round_trip_count(sender.round_trip_count_),
+ gain_cycle_index(sender.cycle_current_offset_),
+ congestion_window(sender.congestion_window_),
+ is_at_full_bandwidth(sender.is_at_full_bandwidth_),
+ bandwidth_at_last_round(sender.bandwidth_at_last_round_),
+ rounds_without_bandwidth_gain(sender.rounds_without_bandwidth_gain_),
+ min_rtt(sender.min_rtt_),
+ min_rtt_timestamp(sender.min_rtt_timestamp_),
+ recovery_state(sender.recovery_state_),
+ recovery_window(sender.recovery_window_),
+ last_sample_is_app_limited(sender.last_sample_is_app_limited_),
+ end_of_app_limited_phase(sender.sampler_.end_of_app_limited_phase()) {}
+
+BbrSender::DebugState::DebugState(const DebugState& state) = default;
+
+BbrSender::BbrSender(const RttStats* rtt_stats,
+ const QuicUnackedPacketMap* unacked_packets,
+ QuicPacketCount initial_tcp_congestion_window,
+ QuicPacketCount max_tcp_congestion_window,
+ QuicRandom* random)
+ : rtt_stats_(rtt_stats),
+ unacked_packets_(unacked_packets),
+ random_(random),
+ mode_(STARTUP),
+ round_trip_count_(0),
+ max_bandwidth_(kBandwidthWindowSize, QuicBandwidth::Zero(), 0),
+ max_ack_height_(kBandwidthWindowSize, 0, 0),
+ aggregation_epoch_start_time_(QuicTime::Zero()),
+ aggregation_epoch_bytes_(0),
+ min_rtt_(QuicTime::Delta::Zero()),
+ min_rtt_timestamp_(QuicTime::Zero()),
+ congestion_window_(initial_tcp_congestion_window * kDefaultTCPMSS),
+ initial_congestion_window_(initial_tcp_congestion_window *
+ kDefaultTCPMSS),
+ max_congestion_window_(max_tcp_congestion_window * kDefaultTCPMSS),
+ min_congestion_window_(kDefaultMinimumCongestionWindow),
+ high_gain_(kDefaultHighGain),
+ high_cwnd_gain_(kDefaultHighGain),
+ drain_gain_(1.f / kDefaultHighGain),
+ pacing_rate_(QuicBandwidth::Zero()),
+ pacing_gain_(1),
+ congestion_window_gain_(1),
+ congestion_window_gain_constant_(
+ static_cast<float>(FLAGS_quic_bbr_cwnd_gain)),
+ num_startup_rtts_(kRoundTripsWithoutGrowthBeforeExitingStartup),
+ exit_startup_on_loss_(false),
+ cycle_current_offset_(0),
+ last_cycle_start_(QuicTime::Zero()),
+ is_at_full_bandwidth_(false),
+ rounds_without_bandwidth_gain_(0),
+ bandwidth_at_last_round_(QuicBandwidth::Zero()),
+ exiting_quiescence_(false),
+ exit_probe_rtt_at_(QuicTime::Zero()),
+ probe_rtt_round_passed_(false),
+ last_sample_is_app_limited_(false),
+ has_non_app_limited_sample_(false),
+ flexible_app_limited_(false),
+ recovery_state_(NOT_IN_RECOVERY),
+ recovery_window_(max_congestion_window_),
+ is_app_limited_recovery_(false),
+ slower_startup_(false),
+ rate_based_startup_(false),
+ startup_rate_reduction_multiplier_(0),
+ startup_bytes_lost_(0),
+ enable_ack_aggregation_during_startup_(false),
+ expire_ack_aggregation_in_startup_(false),
+ drain_to_target_(false),
+ probe_rtt_based_on_bdp_(false),
+ probe_rtt_skipped_if_similar_rtt_(false),
+ probe_rtt_disabled_if_app_limited_(false),
+ app_limited_since_last_probe_rtt_(false),
+ min_rtt_since_last_probe_rtt_(QuicTime::Delta::Infinite()) {
+ EnterStartupMode();
+}
+
+BbrSender::~BbrSender() {}
+
+void BbrSender::SetInitialCongestionWindowInPackets(
+ QuicPacketCount congestion_window) {
+ if (mode_ == STARTUP) {
+ initial_congestion_window_ = congestion_window * kDefaultTCPMSS;
+ congestion_window_ = congestion_window * kDefaultTCPMSS;
+ }
+}
+
+bool BbrSender::InSlowStart() const {
+ return mode_ == STARTUP;
+}
+
+void BbrSender::OnPacketSent(QuicTime sent_time,
+ QuicByteCount bytes_in_flight,
+ QuicPacketNumber packet_number,
+ QuicByteCount bytes,
+ HasRetransmittableData is_retransmittable) {
+ last_sent_packet_ = packet_number;
+
+ if (bytes_in_flight == 0 && sampler_.is_app_limited()) {
+ exiting_quiescence_ = true;
+ }
+
+ if (!aggregation_epoch_start_time_.IsInitialized()) {
+ aggregation_epoch_start_time_ = sent_time;
+ }
+
+ sampler_.OnPacketSent(sent_time, packet_number, bytes, bytes_in_flight,
+ is_retransmittable);
+}
+
+bool BbrSender::CanSend(QuicByteCount bytes_in_flight) {
+ return bytes_in_flight < GetCongestionWindow();
+}
+
+QuicBandwidth BbrSender::PacingRate(QuicByteCount bytes_in_flight) const {
+ if (pacing_rate_.IsZero()) {
+ return high_gain_ * QuicBandwidth::FromBytesAndTimeDelta(
+ initial_congestion_window_, GetMinRtt());
+ }
+ return pacing_rate_;
+}
+
+QuicBandwidth BbrSender::BandwidthEstimate() const {
+ return max_bandwidth_.GetBest();
+}
+
+QuicByteCount BbrSender::GetCongestionWindow() const {
+ if (mode_ == PROBE_RTT) {
+ return ProbeRttCongestionWindow();
+ }
+
+ if (InRecovery() && !(rate_based_startup_ && mode_ == STARTUP)) {
+ return std::min(congestion_window_, recovery_window_);
+ }
+
+ return congestion_window_;
+}
+
+QuicByteCount BbrSender::GetSlowStartThreshold() const {
+ return 0;
+}
+
+bool BbrSender::InRecovery() const {
+ return recovery_state_ != NOT_IN_RECOVERY;
+}
+
+bool BbrSender::ShouldSendProbingPacket() const {
+ if (pacing_gain_ <= 1) {
+ return false;
+ }
+
+ // TODO(b/77975811): If the pipe is highly under-utilized, consider not
+ // sending a probing transmission, because the extra bandwidth is not needed.
+ // If flexible_app_limited is enabled, check if the pipe is sufficiently full.
+ if (flexible_app_limited_) {
+ return !IsPipeSufficientlyFull();
+ } else {
+ return true;
+ }
+}
+
+bool BbrSender::IsPipeSufficientlyFull() const {
+ // See if we need more bytes in flight to see more bandwidth.
+ if (mode_ == STARTUP) {
+ // STARTUP exits if it doesn't observe a 25% bandwidth increase, so the CWND
+ // must be more than 25% above the target.
+ return unacked_packets_->bytes_in_flight() >=
+ GetTargetCongestionWindow(1.5);
+ }
+ if (pacing_gain_ > 1) {
+ // Super-unity PROBE_BW doesn't exit until 1.25 * BDP is achieved.
+ return unacked_packets_->bytes_in_flight() >=
+ GetTargetCongestionWindow(pacing_gain_);
+ }
+ // If bytes_in_flight are above the target congestion window, it should be
+ // possible to observe the same or more bandwidth if it's available.
+ return unacked_packets_->bytes_in_flight() >= GetTargetCongestionWindow(1.1);
+}
+
+void BbrSender::SetFromConfig(const QuicConfig& config,
+ Perspective perspective) {
+ if (config.HasClientRequestedIndependentOption(kLRTT, perspective)) {
+ exit_startup_on_loss_ = true;
+ }
+ if (config.HasClientRequestedIndependentOption(k1RTT, perspective)) {
+ num_startup_rtts_ = 1;
+ }
+ if (config.HasClientRequestedIndependentOption(k2RTT, perspective)) {
+ num_startup_rtts_ = 2;
+ }
+ if (config.HasClientRequestedIndependentOption(kBBRS, perspective)) {
+ slower_startup_ = true;
+ }
+ if (config.HasClientRequestedIndependentOption(kBBR3, perspective)) {
+ drain_to_target_ = true;
+ }
+ if (config.HasClientRequestedIndependentOption(kBBS1, perspective)) {
+ rate_based_startup_ = true;
+ }
+ if (GetQuicReloadableFlag(quic_bbr_startup_rate_reduction) &&
+ config.HasClientRequestedIndependentOption(kBBS4, perspective)) {
+ rate_based_startup_ = true;
+ // Hits 1.25x pacing multiplier when ~2/3 CWND is lost.
+ startup_rate_reduction_multiplier_ = 1;
+ }
+ if (GetQuicReloadableFlag(quic_bbr_startup_rate_reduction) &&
+ config.HasClientRequestedIndependentOption(kBBS5, perspective)) {
+ rate_based_startup_ = true;
+ // Hits 1.25x pacing multiplier when ~1/3 CWND is lost.
+ startup_rate_reduction_multiplier_ = 2;
+ }
+ if (config.HasClientRequestedIndependentOption(kBBR4, perspective)) {
+ max_ack_height_.SetWindowLength(2 * kBandwidthWindowSize);
+ }
+ if (config.HasClientRequestedIndependentOption(kBBR5, perspective)) {
+ max_ack_height_.SetWindowLength(4 * kBandwidthWindowSize);
+ }
+ if (GetQuicReloadableFlag(quic_bbr_less_probe_rtt) &&
+ config.HasClientRequestedIndependentOption(kBBR6, perspective)) {
+ QUIC_RELOADABLE_FLAG_COUNT_N(quic_bbr_less_probe_rtt, 1, 3);
+ probe_rtt_based_on_bdp_ = true;
+ }
+ if (GetQuicReloadableFlag(quic_bbr_less_probe_rtt) &&
+ config.HasClientRequestedIndependentOption(kBBR7, perspective)) {
+ QUIC_RELOADABLE_FLAG_COUNT_N(quic_bbr_less_probe_rtt, 2, 3);
+ probe_rtt_skipped_if_similar_rtt_ = true;
+ }
+ if (GetQuicReloadableFlag(quic_bbr_less_probe_rtt) &&
+ config.HasClientRequestedIndependentOption(kBBR8, perspective)) {
+ QUIC_RELOADABLE_FLAG_COUNT_N(quic_bbr_less_probe_rtt, 3, 3);
+ probe_rtt_disabled_if_app_limited_ = true;
+ }
+ if (GetQuicReloadableFlag(quic_bbr_flexible_app_limited) &&
+ config.HasClientRequestedIndependentOption(kBBR9, perspective)) {
+ QUIC_RELOADABLE_FLAG_COUNT(quic_bbr_flexible_app_limited);
+ flexible_app_limited_ = true;
+ }
+ if (GetQuicReloadableFlag(quic_bbr_slower_startup3) &&
+ config.HasClientRequestedIndependentOption(kBBQ1, perspective)) {
+ QUIC_RELOADABLE_FLAG_COUNT_N(quic_bbr_slower_startup3, 1, 4);
+ set_high_gain(kDerivedHighGain);
+ set_high_cwnd_gain(kDerivedHighGain);
+ set_drain_gain(1.f / kDerivedHighGain);
+ }
+ if (GetQuicReloadableFlag(quic_bbr_slower_startup3) &&
+ config.HasClientRequestedIndependentOption(kBBQ2, perspective)) {
+ QUIC_RELOADABLE_FLAG_COUNT_N(quic_bbr_slower_startup3, 2, 4);
+ set_high_cwnd_gain(kDerivedHighCWNDGain);
+ }
+ if (GetQuicReloadableFlag(quic_bbr_slower_startup3) &&
+ config.HasClientRequestedIndependentOption(kBBQ3, perspective)) {
+ QUIC_RELOADABLE_FLAG_COUNT_N(quic_bbr_slower_startup3, 3, 4);
+ enable_ack_aggregation_during_startup_ = true;
+ }
+ if (GetQuicReloadableFlag(quic_bbr_slower_startup3) &&
+ config.HasClientRequestedIndependentOption(kBBQ4, perspective)) {
+ QUIC_RELOADABLE_FLAG_COUNT_N(quic_bbr_slower_startup3, 4, 4);
+ set_drain_gain(kModerateProbeRttMultiplier);
+ }
+ if (GetQuicReloadableFlag(quic_bbr_slower_startup4) &&
+ config.HasClientRequestedIndependentOption(kBBQ5, perspective)) {
+ QUIC_RELOADABLE_FLAG_COUNT(quic_bbr_slower_startup4);
+ expire_ack_aggregation_in_startup_ = true;
+ }
+ if (config.HasClientRequestedIndependentOption(kMIN1, perspective)) {
+ min_congestion_window_ = kMaxSegmentSize;
+ }
+}
+
+void BbrSender::AdjustNetworkParameters(QuicBandwidth bandwidth,
+ QuicTime::Delta rtt) {
+ if (!bandwidth.IsZero()) {
+ max_bandwidth_.Update(bandwidth, round_trip_count_);
+ }
+ if (!rtt.IsZero() && (min_rtt_ > rtt || min_rtt_.IsZero())) {
+ min_rtt_ = rtt;
+ }
+}
+
+void BbrSender::OnCongestionEvent(bool /*rtt_updated*/,
+ QuicByteCount prior_in_flight,
+ QuicTime event_time,
+ const AckedPacketVector& acked_packets,
+ const LostPacketVector& lost_packets) {
+ const QuicByteCount total_bytes_acked_before = sampler_.total_bytes_acked();
+
+ bool is_round_start = false;
+ bool min_rtt_expired = false;
+
+ DiscardLostPackets(lost_packets);
+
+ // Input the new data into the BBR model of the connection.
+ QuicByteCount excess_acked = 0;
+ if (!acked_packets.empty()) {
+ QuicPacketNumber last_acked_packet = acked_packets.rbegin()->packet_number;
+ is_round_start = UpdateRoundTripCounter(last_acked_packet);
+ min_rtt_expired = UpdateBandwidthAndMinRtt(event_time, acked_packets);
+ UpdateRecoveryState(last_acked_packet, !lost_packets.empty(),
+ is_round_start);
+
+ const QuicByteCount bytes_acked =
+ sampler_.total_bytes_acked() - total_bytes_acked_before;
+
+ excess_acked = UpdateAckAggregationBytes(event_time, bytes_acked);
+ }
+
+ // Handle logic specific to PROBE_BW mode.
+ if (mode_ == PROBE_BW) {
+ UpdateGainCyclePhase(event_time, prior_in_flight, !lost_packets.empty());
+ }
+
+ // Handle logic specific to STARTUP and DRAIN modes.
+ if (is_round_start && !is_at_full_bandwidth_) {
+ CheckIfFullBandwidthReached();
+ }
+ MaybeExitStartupOrDrain(event_time);
+
+ // Handle logic specific to PROBE_RTT.
+ MaybeEnterOrExitProbeRtt(event_time, is_round_start, min_rtt_expired);
+
+ // Calculate number of packets acked and lost.
+ QuicByteCount bytes_acked =
+ sampler_.total_bytes_acked() - total_bytes_acked_before;
+ QuicByteCount bytes_lost = 0;
+ for (const auto& packet : lost_packets) {
+ bytes_lost += packet.bytes_lost;
+ }
+
+ // After the model is updated, recalculate the pacing rate and congestion
+ // window.
+ CalculatePacingRate();
+ CalculateCongestionWindow(bytes_acked, excess_acked);
+ CalculateRecoveryWindow(bytes_acked, bytes_lost);
+
+ // Cleanup internal state.
+ sampler_.RemoveObsoletePackets(unacked_packets_->GetLeastUnacked());
+}
+
+CongestionControlType BbrSender::GetCongestionControlType() const {
+ return kBBR;
+}
+
+QuicTime::Delta BbrSender::GetMinRtt() const {
+ return !min_rtt_.IsZero() ? min_rtt_ : rtt_stats_->initial_rtt();
+}
+
+QuicByteCount BbrSender::GetTargetCongestionWindow(float gain) const {
+ QuicByteCount bdp = GetMinRtt() * BandwidthEstimate();
+ QuicByteCount congestion_window = gain * bdp;
+
+ // BDP estimate will be zero if no bandwidth samples are available yet.
+ if (congestion_window == 0) {
+ congestion_window = gain * initial_congestion_window_;
+ }
+
+ return std::max(congestion_window, min_congestion_window_);
+}
+
+QuicByteCount BbrSender::ProbeRttCongestionWindow() const {
+ if (probe_rtt_based_on_bdp_) {
+ return GetTargetCongestionWindow(kModerateProbeRttMultiplier);
+ }
+ return min_congestion_window_;
+}
+
+void BbrSender::EnterStartupMode() {
+ mode_ = STARTUP;
+ pacing_gain_ = high_gain_;
+ congestion_window_gain_ = high_cwnd_gain_;
+}
+
+void BbrSender::EnterProbeBandwidthMode(QuicTime now) {
+ mode_ = PROBE_BW;
+ congestion_window_gain_ = congestion_window_gain_constant_;
+
+ // Pick a random offset for the gain cycle out of {0, 2..7} range. 1 is
+ // excluded because in that case increased gain and decreased gain would not
+ // follow each other.
+ cycle_current_offset_ = random_->RandUint64() % (kGainCycleLength - 1);
+ if (cycle_current_offset_ >= 1) {
+ cycle_current_offset_ += 1;
+ }
+
+ last_cycle_start_ = now;
+ pacing_gain_ = kPacingGain[cycle_current_offset_];
+}
+
+void BbrSender::DiscardLostPackets(const LostPacketVector& lost_packets) {
+ for (const LostPacket& packet : lost_packets) {
+ sampler_.OnPacketLost(packet.packet_number);
+ if (startup_rate_reduction_multiplier_ != 0 && mode_ == STARTUP) {
+ startup_bytes_lost_ += packet.bytes_lost;
+ }
+ }
+}
+
+bool BbrSender::UpdateRoundTripCounter(QuicPacketNumber last_acked_packet) {
+ if (!current_round_trip_end_.IsInitialized() ||
+ last_acked_packet > current_round_trip_end_) {
+ round_trip_count_++;
+ current_round_trip_end_ = last_sent_packet_;
+ return true;
+ }
+
+ return false;
+}
+
+bool BbrSender::UpdateBandwidthAndMinRtt(
+ QuicTime now,
+ const AckedPacketVector& acked_packets) {
+ QuicTime::Delta sample_min_rtt = QuicTime::Delta::Infinite();
+ for (const auto& packet : acked_packets) {
+ if (packet.bytes_acked == 0) {
+ // Skip acked packets with 0 in flight bytes when updating bandwidth.
+ continue;
+ }
+ BandwidthSample bandwidth_sample =
+ sampler_.OnPacketAcknowledged(now, packet.packet_number);
+ last_sample_is_app_limited_ = bandwidth_sample.is_app_limited;
+ has_non_app_limited_sample_ |= !bandwidth_sample.is_app_limited;
+ if (!bandwidth_sample.rtt.IsZero()) {
+ sample_min_rtt = std::min(sample_min_rtt, bandwidth_sample.rtt);
+ }
+
+ if (!bandwidth_sample.is_app_limited ||
+ bandwidth_sample.bandwidth > BandwidthEstimate()) {
+ max_bandwidth_.Update(bandwidth_sample.bandwidth, round_trip_count_);
+ }
+ }
+
+ // If none of the RTT samples are valid, return immediately.
+ if (sample_min_rtt.IsInfinite()) {
+ return false;
+ }
+ min_rtt_since_last_probe_rtt_ =
+ std::min(min_rtt_since_last_probe_rtt_, sample_min_rtt);
+
+ // Do not expire min_rtt if none was ever available.
+ bool min_rtt_expired =
+ !min_rtt_.IsZero() && (now > (min_rtt_timestamp_ + kMinRttExpiry));
+
+ if (min_rtt_expired || sample_min_rtt < min_rtt_ || min_rtt_.IsZero()) {
+ QUIC_DVLOG(2) << "Min RTT updated, old value: " << min_rtt_
+ << ", new value: " << sample_min_rtt
+ << ", current time: " << now.ToDebuggingValue();
+
+ if (min_rtt_expired && ShouldExtendMinRttExpiry()) {
+ min_rtt_expired = false;
+ } else {
+ min_rtt_ = sample_min_rtt;
+ }
+ min_rtt_timestamp_ = now;
+ // Reset since_last_probe_rtt fields.
+ min_rtt_since_last_probe_rtt_ = QuicTime::Delta::Infinite();
+ app_limited_since_last_probe_rtt_ = false;
+ }
+ DCHECK(!min_rtt_.IsZero());
+
+ return min_rtt_expired;
+}
+
+bool BbrSender::ShouldExtendMinRttExpiry() const {
+ if (probe_rtt_disabled_if_app_limited_ && app_limited_since_last_probe_rtt_) {
+ // Extend the current min_rtt if we've been app limited recently.
+ return true;
+ }
+ const bool min_rtt_increased_since_last_probe =
+ min_rtt_since_last_probe_rtt_ > min_rtt_ * kSimilarMinRttThreshold;
+ if (probe_rtt_skipped_if_similar_rtt_ && app_limited_since_last_probe_rtt_ &&
+ !min_rtt_increased_since_last_probe) {
+ // Extend the current min_rtt if we've been app limited recently and an rtt
+ // has been measured in that time that's less than 12.5% more than the
+ // current min_rtt.
+ return true;
+ }
+ return false;
+}
+
+void BbrSender::UpdateGainCyclePhase(QuicTime now,
+ QuicByteCount prior_in_flight,
+ bool has_losses) {
+ const QuicByteCount bytes_in_flight = unacked_packets_->bytes_in_flight();
+ // In most cases, the cycle is advanced after an RTT passes.
+ bool should_advance_gain_cycling = now - last_cycle_start_ > GetMinRtt();
+
+ // If the pacing gain is above 1.0, the connection is trying to probe the
+ // bandwidth by increasing the number of bytes in flight to at least
+ // pacing_gain * BDP. Make sure that it actually reaches the target, as long
+ // as there are no losses suggesting that the buffers are not able to hold
+ // that much.
+ if (pacing_gain_ > 1.0 && !has_losses &&
+ prior_in_flight < GetTargetCongestionWindow(pacing_gain_)) {
+ should_advance_gain_cycling = false;
+ }
+
+ // If pacing gain is below 1.0, the connection is trying to drain the extra
+ // queue which could have been incurred by probing prior to it. If the number
+ // of bytes in flight falls down to the estimated BDP value earlier, conclude
+ // that the queue has been successfully drained and exit this cycle early.
+ if (pacing_gain_ < 1.0 && bytes_in_flight <= GetTargetCongestionWindow(1)) {
+ should_advance_gain_cycling = true;
+ }
+
+ if (should_advance_gain_cycling) {
+ cycle_current_offset_ = (cycle_current_offset_ + 1) % kGainCycleLength;
+ last_cycle_start_ = now;
+ // Stay in low gain mode until the target BDP is hit.
+ // Low gain mode will be exited immediately when the target BDP is achieved.
+ if (drain_to_target_ && pacing_gain_ < 1 &&
+ kPacingGain[cycle_current_offset_] == 1 &&
+ bytes_in_flight > GetTargetCongestionWindow(1)) {
+ return;
+ }
+ pacing_gain_ = kPacingGain[cycle_current_offset_];
+ }
+}
+
+void BbrSender::CheckIfFullBandwidthReached() {
+ if (last_sample_is_app_limited_) {
+ return;
+ }
+
+ QuicBandwidth target = bandwidth_at_last_round_ * kStartupGrowthTarget;
+ if (BandwidthEstimate() >= target) {
+ bandwidth_at_last_round_ = BandwidthEstimate();
+ rounds_without_bandwidth_gain_ = 0;
+ if (expire_ack_aggregation_in_startup_) {
+ // Expire old excess delivery measurements now that bandwidth increased.
+ max_ack_height_.Reset(0, round_trip_count_);
+ }
+ return;
+ }
+
+ rounds_without_bandwidth_gain_++;
+ if ((rounds_without_bandwidth_gain_ >= num_startup_rtts_) ||
+ (exit_startup_on_loss_ && InRecovery())) {
+ DCHECK(has_non_app_limited_sample_);
+ is_at_full_bandwidth_ = true;
+ }
+}
+
+void BbrSender::MaybeExitStartupOrDrain(QuicTime now) {
+ if (mode_ == STARTUP && is_at_full_bandwidth_) {
+ mode_ = DRAIN;
+ pacing_gain_ = drain_gain_;
+ congestion_window_gain_ = high_cwnd_gain_;
+ }
+ if (mode_ == DRAIN &&
+ unacked_packets_->bytes_in_flight() <= GetTargetCongestionWindow(1)) {
+ EnterProbeBandwidthMode(now);
+ }
+}
+
+void BbrSender::MaybeEnterOrExitProbeRtt(QuicTime now,
+ bool is_round_start,
+ bool min_rtt_expired) {
+ if (min_rtt_expired && !exiting_quiescence_ && mode_ != PROBE_RTT) {
+ mode_ = PROBE_RTT;
+ pacing_gain_ = 1;
+ // Do not decide on the time to exit PROBE_RTT until the |bytes_in_flight|
+ // is at the target small value.
+ exit_probe_rtt_at_ = QuicTime::Zero();
+ }
+
+ if (mode_ == PROBE_RTT) {
+ sampler_.OnAppLimited();
+
+ if (exit_probe_rtt_at_ == QuicTime::Zero()) {
+ // If the window has reached the appropriate size, schedule exiting
+ // PROBE_RTT. The CWND during PROBE_RTT is kMinimumCongestionWindow, but
+ // we allow an extra packet since QUIC checks CWND before sending a
+ // packet.
+ if (unacked_packets_->bytes_in_flight() <
+ ProbeRttCongestionWindow() + kMaxPacketSize) {
+ exit_probe_rtt_at_ = now + kProbeRttTime;
+ probe_rtt_round_passed_ = false;
+ }
+ } else {
+ if (is_round_start) {
+ probe_rtt_round_passed_ = true;
+ }
+ if (now >= exit_probe_rtt_at_ && probe_rtt_round_passed_) {
+ min_rtt_timestamp_ = now;
+ if (!is_at_full_bandwidth_) {
+ EnterStartupMode();
+ } else {
+ EnterProbeBandwidthMode(now);
+ }
+ }
+ }
+ }
+
+ exiting_quiescence_ = false;
+}
+
+void BbrSender::UpdateRecoveryState(QuicPacketNumber last_acked_packet,
+ bool has_losses,
+ bool is_round_start) {
+ // Exit recovery when there are no losses for a round.
+ if (has_losses) {
+ end_recovery_at_ = last_sent_packet_;
+ }
+
+ switch (recovery_state_) {
+ case NOT_IN_RECOVERY:
+ // Enter conservation on the first loss.
+ if (has_losses) {
+ recovery_state_ = CONSERVATION;
+ // This will cause the |recovery_window_| to be set to the correct
+ // value in CalculateRecoveryWindow().
+ recovery_window_ = 0;
+ // Since the conservation phase is meant to be lasting for a whole
+ // round, extend the current round as if it were started right now.
+ current_round_trip_end_ = last_sent_packet_;
+ if (GetQuicReloadableFlag(quic_bbr_app_limited_recovery) &&
+ last_sample_is_app_limited_) {
+ QUIC_RELOADABLE_FLAG_COUNT(quic_bbr_app_limited_recovery);
+ is_app_limited_recovery_ = true;
+ }
+ }
+ break;
+
+ case CONSERVATION:
+ if (is_round_start) {
+ recovery_state_ = GROWTH;
+ }
+ QUIC_FALLTHROUGH_INTENDED;
+
+ case GROWTH:
+ // Exit recovery if appropriate.
+ if (!has_losses && last_acked_packet > end_recovery_at_) {
+ recovery_state_ = NOT_IN_RECOVERY;
+ is_app_limited_recovery_ = false;
+ }
+
+ break;
+ }
+ if (recovery_state_ != NOT_IN_RECOVERY && is_app_limited_recovery_) {
+ sampler_.OnAppLimited();
+ }
+}
+
+// TODO(ianswett): Move this logic into BandwidthSampler.
+QuicByteCount BbrSender::UpdateAckAggregationBytes(
+ QuicTime ack_time,
+ QuicByteCount newly_acked_bytes) {
+ // Compute how many bytes are expected to be delivered, assuming max bandwidth
+ // is correct.
+ QuicByteCount expected_bytes_acked =
+ max_bandwidth_.GetBest() * (ack_time - aggregation_epoch_start_time_);
+ // Reset the current aggregation epoch as soon as the ack arrival rate is less
+ // than or equal to the max bandwidth.
+ if (aggregation_epoch_bytes_ <= expected_bytes_acked) {
+ // Reset to start measuring a new aggregation epoch.
+ aggregation_epoch_bytes_ = newly_acked_bytes;
+ aggregation_epoch_start_time_ = ack_time;
+ return 0;
+ }
+
+ // Compute how many extra bytes were delivered vs max bandwidth.
+ // Include the bytes most recently acknowledged to account for stretch acks.
+ aggregation_epoch_bytes_ += newly_acked_bytes;
+ max_ack_height_.Update(aggregation_epoch_bytes_ - expected_bytes_acked,
+ round_trip_count_);
+ return aggregation_epoch_bytes_ - expected_bytes_acked;
+}
+
+void BbrSender::CalculatePacingRate() {
+ if (BandwidthEstimate().IsZero()) {
+ return;
+ }
+
+ QuicBandwidth target_rate = pacing_gain_ * BandwidthEstimate();
+ if (is_at_full_bandwidth_) {
+ pacing_rate_ = target_rate;
+ return;
+ }
+
+ // Pace at the rate of initial_window / RTT as soon as RTT measurements are
+ // available.
+ if (pacing_rate_.IsZero() && !rtt_stats_->min_rtt().IsZero()) {
+ pacing_rate_ = QuicBandwidth::FromBytesAndTimeDelta(
+ initial_congestion_window_, rtt_stats_->min_rtt());
+ return;
+ }
+ // Slow the pacing rate in STARTUP once loss has ever been detected.
+ const bool has_ever_detected_loss = end_recovery_at_.IsInitialized();
+ if (slower_startup_ && has_ever_detected_loss &&
+ has_non_app_limited_sample_) {
+ pacing_rate_ = kStartupAfterLossGain * BandwidthEstimate();
+ return;
+ }
+
+ // Slow the pacing rate in STARTUP by the bytes_lost / CWND.
+ if (startup_rate_reduction_multiplier_ != 0 && has_ever_detected_loss &&
+ has_non_app_limited_sample_) {
+ pacing_rate_ =
+ (1 - (startup_bytes_lost_ * startup_rate_reduction_multiplier_ * 1.0f /
+ congestion_window_)) *
+ target_rate;
+ // Ensure the pacing rate doesn't drop below the startup growth target times
+ // the bandwidth estimate.
+ pacing_rate_ =
+ std::max(pacing_rate_, kStartupGrowthTarget * BandwidthEstimate());
+ return;
+ }
+
+ // Do not decrease the pacing rate during startup.
+ pacing_rate_ = std::max(pacing_rate_, target_rate);
+}
+
+void BbrSender::CalculateCongestionWindow(QuicByteCount bytes_acked,
+ QuicByteCount excess_acked) {
+ if (mode_ == PROBE_RTT) {
+ return;
+ }
+
+ QuicByteCount target_window =
+ GetTargetCongestionWindow(congestion_window_gain_);
+ if (is_at_full_bandwidth_) {
+ // Add the max recently measured ack aggregation to CWND.
+ target_window += max_ack_height_.GetBest();
+ } else if (enable_ack_aggregation_during_startup_) {
+ // Add the most recent excess acked. Because CWND never decreases in
+ // STARTUP, this will automatically create a very localized max filter.
+ target_window += excess_acked;
+ }
+
+ // Instead of immediately setting the target CWND as the new one, BBR grows
+ // the CWND towards |target_window| by only increasing it |bytes_acked| at a
+ // time.
+ const bool add_bytes_acked =
+ !GetQuicReloadableFlag(quic_bbr_no_bytes_acked_in_startup_recovery) ||
+ !InRecovery();
+ if (is_at_full_bandwidth_) {
+ congestion_window_ =
+ std::min(target_window, congestion_window_ + bytes_acked);
+ } else if (add_bytes_acked &&
+ (congestion_window_ < target_window ||
+ sampler_.total_bytes_acked() < initial_congestion_window_)) {
+ // If the connection is not yet out of startup phase, do not decrease the
+ // window.
+ congestion_window_ = congestion_window_ + bytes_acked;
+ }
+
+ // Enforce the limits on the congestion window.
+ congestion_window_ = std::max(congestion_window_, min_congestion_window_);
+ congestion_window_ = std::min(congestion_window_, max_congestion_window_);
+}
+
+void BbrSender::CalculateRecoveryWindow(QuicByteCount bytes_acked,
+ QuicByteCount bytes_lost) {
+ if (rate_based_startup_ && mode_ == STARTUP) {
+ return;
+ }
+
+ if (recovery_state_ == NOT_IN_RECOVERY) {
+ return;
+ }
+
+ // Set up the initial recovery window.
+ if (recovery_window_ == 0) {
+ recovery_window_ = unacked_packets_->bytes_in_flight() + bytes_acked;
+ recovery_window_ = std::max(min_congestion_window_, recovery_window_);
+ return;
+ }
+
+ // Remove losses from the recovery window, while accounting for a potential
+ // integer underflow.
+ recovery_window_ = recovery_window_ >= bytes_lost
+ ? recovery_window_ - bytes_lost
+ : kMaxSegmentSize;
+
+ // In CONSERVATION mode, just subtracting losses is sufficient. In GROWTH,
+ // release additional |bytes_acked| to achieve a slow-start-like behavior.
+ if (recovery_state_ == GROWTH) {
+ recovery_window_ += bytes_acked;
+ }
+
+ // Sanity checks. Ensure that we always allow to send at least an MSS or
+ // |bytes_acked| in response, whichever is larger.
+ recovery_window_ = std::max(
+ recovery_window_, unacked_packets_->bytes_in_flight() + bytes_acked);
+ if (GetQuicReloadableFlag(quic_bbr_one_mss_conservation)) {
+ recovery_window_ =
+ std::max(recovery_window_,
+ unacked_packets_->bytes_in_flight() + kMaxSegmentSize);
+ }
+ recovery_window_ = std::max(min_congestion_window_, recovery_window_);
+}
+
+QuicString BbrSender::GetDebugState() const {
+ std::ostringstream stream;
+ stream << ExportDebugState();
+ return stream.str();
+}
+
+void BbrSender::OnApplicationLimited(QuicByteCount bytes_in_flight) {
+ if (bytes_in_flight >= GetCongestionWindow()) {
+ return;
+ }
+ if (flexible_app_limited_ && IsPipeSufficientlyFull()) {
+ return;
+ }
+
+ app_limited_since_last_probe_rtt_ = true;
+ sampler_.OnAppLimited();
+ QUIC_DVLOG(2) << "Becoming application limited. Last sent packet: "
+ << last_sent_packet_ << ", CWND: " << GetCongestionWindow();
+}
+
+BbrSender::DebugState BbrSender::ExportDebugState() const {
+ return DebugState(*this);
+}
+
+static QuicString ModeToString(BbrSender::Mode mode) {
+ switch (mode) {
+ case BbrSender::STARTUP:
+ return "STARTUP";
+ case BbrSender::DRAIN:
+ return "DRAIN";
+ case BbrSender::PROBE_BW:
+ return "PROBE_BW";
+ case BbrSender::PROBE_RTT:
+ return "PROBE_RTT";
+ }
+ return "???";
+}
+
+std::ostream& operator<<(std::ostream& os, const BbrSender::Mode& mode) {
+ os << ModeToString(mode);
+ return os;
+}
+
+std::ostream& operator<<(std::ostream& os, const BbrSender::DebugState& state) {
+ os << "Mode: " << ModeToString(state.mode) << std::endl;
+ os << "Maximum bandwidth: " << state.max_bandwidth << std::endl;
+ os << "Round trip counter: " << state.round_trip_count << std::endl;
+ os << "Gain cycle index: " << static_cast<int>(state.gain_cycle_index)
+ << std::endl;
+ os << "Congestion window: " << state.congestion_window << " bytes"
+ << std::endl;
+
+ if (state.mode == BbrSender::STARTUP) {
+ os << "(startup) Bandwidth at last round: " << state.bandwidth_at_last_round
+ << std::endl;
+ os << "(startup) Rounds without gain: "
+ << state.rounds_without_bandwidth_gain << std::endl;
+ }
+
+ os << "Minimum RTT: " << state.min_rtt << std::endl;
+ os << "Minimum RTT timestamp: " << state.min_rtt_timestamp.ToDebuggingValue()
+ << std::endl;
+
+ os << "Last sample is app-limited: "
+ << (state.last_sample_is_app_limited ? "yes" : "no");
+
+ return os;
+}
+
+} // namespace quic
diff --git a/quic/core/congestion_control/bbr_sender.h b/quic/core/congestion_control/bbr_sender.h
new file mode 100644
index 0000000..37e2695
--- /dev/null
+++ b/quic/core/congestion_control/bbr_sender.h
@@ -0,0 +1,408 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// BBR (Bottleneck Bandwidth and RTT) congestion control algorithm.
+
+#ifndef QUICHE_QUIC_CORE_CONGESTION_CONTROL_BBR_SENDER_H_
+#define QUICHE_QUIC_CORE_CONGESTION_CONTROL_BBR_SENDER_H_
+
+#include <cstdint>
+#include <ostream>
+
+#include "base/macros.h"
+#include "net/third_party/quiche/src/quic/core/congestion_control/bandwidth_sampler.h"
+#include "net/third_party/quiche/src/quic/core/congestion_control/send_algorithm_interface.h"
+#include "net/third_party/quiche/src/quic/core/congestion_control/windowed_filter.h"
+#include "net/third_party/quiche/src/quic/core/crypto/quic_random.h"
+#include "net/third_party/quiche/src/quic/core/quic_bandwidth.h"
+#include "net/third_party/quiche/src/quic/core/quic_packets.h"
+#include "net/third_party/quiche/src/quic/core/quic_time.h"
+#include "net/third_party/quiche/src/quic/core/quic_unacked_packet_map.h"
+#include "net/third_party/quiche/src/quic/platform/api/quic_export.h"
+#include "net/third_party/quiche/src/quic/platform/api/quic_string.h"
+
+namespace quic {
+
+class RttStats;
+
+typedef uint64_t QuicRoundTripCount;
+
+// BbrSender implements BBR congestion control algorithm. BBR aims to estimate
+// the current available Bottleneck Bandwidth and RTT (hence the name), and
+// regulates the pacing rate and the size of the congestion window based on
+// those signals.
+//
+// BBR relies on pacing in order to function properly. Do not use BBR when
+// pacing is disabled.
+//
+// TODO(vasilvv): implement traffic policer (long-term sampling) mode.
+class QUIC_EXPORT_PRIVATE BbrSender : public SendAlgorithmInterface {
+ public:
+ enum Mode {
+ // Startup phase of the connection.
+ STARTUP,
+ // After achieving the highest possible bandwidth during the startup, lower
+ // the pacing rate in order to drain the queue.
+ DRAIN,
+ // Cruising mode.
+ PROBE_BW,
+ // Temporarily slow down sending in order to empty the buffer and measure
+ // the real minimum RTT.
+ PROBE_RTT,
+ };
+
+ // Indicates how the congestion control limits the amount of bytes in flight.
+ enum RecoveryState {
+ // Do not limit.
+ NOT_IN_RECOVERY,
+ // Allow an extra outstanding byte for each byte acknowledged.
+ CONSERVATION,
+ // Allow two extra outstanding bytes for each byte acknowledged (slow
+ // start).
+ GROWTH
+ };
+
+ // Debug state can be exported in order to troubleshoot potential congestion
+ // control issues.
+ struct DebugState {
+ explicit DebugState(const BbrSender& sender);
+ DebugState(const DebugState& state);
+
+ Mode mode;
+ QuicBandwidth max_bandwidth;
+ QuicRoundTripCount round_trip_count;
+ int gain_cycle_index;
+ QuicByteCount congestion_window;
+
+ bool is_at_full_bandwidth;
+ QuicBandwidth bandwidth_at_last_round;
+ QuicRoundTripCount rounds_without_bandwidth_gain;
+
+ QuicTime::Delta min_rtt;
+ QuicTime min_rtt_timestamp;
+
+ RecoveryState recovery_state;
+ QuicByteCount recovery_window;
+
+ bool last_sample_is_app_limited;
+ QuicPacketNumber end_of_app_limited_phase;
+ };
+
+ BbrSender(const RttStats* rtt_stats,
+ const QuicUnackedPacketMap* unacked_packets,
+ QuicPacketCount initial_tcp_congestion_window,
+ QuicPacketCount max_tcp_congestion_window,
+ QuicRandom* random);
+ BbrSender(const BbrSender&) = delete;
+ BbrSender& operator=(const BbrSender&) = delete;
+ ~BbrSender() override;
+
+ // Start implementation of SendAlgorithmInterface.
+ bool InSlowStart() const override;
+ bool InRecovery() const override;
+ bool ShouldSendProbingPacket() const override;
+
+ void SetFromConfig(const QuicConfig& config,
+ Perspective perspective) override;
+
+ void AdjustNetworkParameters(QuicBandwidth bandwidth,
+ QuicTime::Delta rtt) override;
+ void SetNumEmulatedConnections(int num_connections) override {}
+ void SetInitialCongestionWindowInPackets(
+ QuicPacketCount congestion_window) override;
+ void OnCongestionEvent(bool rtt_updated,
+ QuicByteCount prior_in_flight,
+ QuicTime event_time,
+ const AckedPacketVector& acked_packets,
+ const LostPacketVector& lost_packets) override;
+ void OnPacketSent(QuicTime sent_time,
+ QuicByteCount bytes_in_flight,
+ QuicPacketNumber packet_number,
+ QuicByteCount bytes,
+ HasRetransmittableData is_retransmittable) override;
+ void OnRetransmissionTimeout(bool packets_retransmitted) override {}
+ void OnConnectionMigration() override {}
+ bool CanSend(QuicByteCount bytes_in_flight) override;
+ QuicBandwidth PacingRate(QuicByteCount bytes_in_flight) const override;
+ QuicBandwidth BandwidthEstimate() const override;
+ QuicByteCount GetCongestionWindow() const override;
+ QuicByteCount GetSlowStartThreshold() const override;
+ CongestionControlType GetCongestionControlType() const override;
+ QuicString GetDebugState() const override;
+ void OnApplicationLimited(QuicByteCount bytes_in_flight) override;
+ // End implementation of SendAlgorithmInterface.
+
+ // Gets the number of RTTs BBR remains in STARTUP phase.
+ QuicRoundTripCount num_startup_rtts() const { return num_startup_rtts_; }
+ bool has_non_app_limited_sample() const {
+ return has_non_app_limited_sample_;
+ }
+
+ // Sets the pacing gain used in STARTUP. Must be greater than 1.
+ void set_high_gain(float high_gain) {
+ DCHECK_LT(1.0f, high_gain);
+ high_gain_ = high_gain;
+ if (mode_ == STARTUP) {
+ pacing_gain_ = high_gain;
+ }
+ }
+
+ // Sets the CWND gain used in STARTUP. Must be greater than 1.
+ void set_high_cwnd_gain(float high_cwnd_gain) {
+ DCHECK_LT(1.0f, high_cwnd_gain);
+ high_cwnd_gain_ = high_cwnd_gain;
+ if (mode_ == STARTUP) {
+ congestion_window_gain_ = high_cwnd_gain;
+ }
+ }
+
+ // Sets the gain used in DRAIN. Must be less than 1.
+ void set_drain_gain(float drain_gain) {
+ DCHECK_GT(1.0f, drain_gain);
+ drain_gain_ = drain_gain;
+ }
+
+ DebugState ExportDebugState() const;
+
+ private:
+ typedef WindowedFilter<QuicBandwidth,
+ MaxFilter<QuicBandwidth>,
+ QuicRoundTripCount,
+ QuicRoundTripCount>
+ MaxBandwidthFilter;
+
+ typedef WindowedFilter<QuicByteCount,
+ MaxFilter<QuicByteCount>,
+ QuicRoundTripCount,
+ QuicRoundTripCount>
+ MaxAckHeightFilter;
+
+ // Returns the current estimate of the RTT of the connection. Outside of the
+ // edge cases, this is minimum RTT.
+ QuicTime::Delta GetMinRtt() const;
+ // Returns whether the connection has achieved full bandwidth required to exit
+ // the slow start.
+ bool IsAtFullBandwidth() const;
+ // Computes the target congestion window using the specified gain.
+ QuicByteCount GetTargetCongestionWindow(float gain) const;
+ // The target congestion window during PROBE_RTT.
+ QuicByteCount ProbeRttCongestionWindow() const;
+ // Returns true if the current min_rtt should be kept and we should not enter
+ // PROBE_RTT immediately.
+ bool ShouldExtendMinRttExpiry() const;
+
+ // Enters the STARTUP mode.
+ void EnterStartupMode();
+ // Enters the PROBE_BW mode.
+ void EnterProbeBandwidthMode(QuicTime now);
+
+ // Discards the lost packets from BandwidthSampler state.
+ void DiscardLostPackets(const LostPacketVector& lost_packets);
+ // Updates the round-trip counter if a round-trip has passed. Returns true if
+ // the counter has been advanced.
+ bool UpdateRoundTripCounter(QuicPacketNumber last_acked_packet);
+ // Updates the current bandwidth and min_rtt estimate based on the samples for
+ // the received acknowledgements. Returns true if min_rtt has expired.
+ bool UpdateBandwidthAndMinRtt(QuicTime now,
+ const AckedPacketVector& acked_packets);
+ // Updates the current gain used in PROBE_BW mode.
+ void UpdateGainCyclePhase(QuicTime now,
+ QuicByteCount prior_in_flight,
+ bool has_losses);
+ // Tracks for how many round-trips the bandwidth has not increased
+ // significantly.
+ void CheckIfFullBandwidthReached();
+ // Transitions from STARTUP to DRAIN and from DRAIN to PROBE_BW if
+ // appropriate.
+ void MaybeExitStartupOrDrain(QuicTime now);
+ // Decides whether to enter or exit PROBE_RTT.
+ void MaybeEnterOrExitProbeRtt(QuicTime now,
+ bool is_round_start,
+ bool min_rtt_expired);
+ // Determines whether BBR needs to enter, exit or advance state of the
+ // recovery.
+ void UpdateRecoveryState(QuicPacketNumber last_acked_packet,
+ bool has_losses,
+ bool is_round_start);
+
+ // Updates the ack aggregation max filter in bytes.
+ // Returns the most recent addition to the filter, or |newly_acked_bytes| if
+ // nothing was fed in to the filter.
+ QuicByteCount UpdateAckAggregationBytes(QuicTime ack_time,
+ QuicByteCount newly_acked_bytes);
+
+ // Determines the appropriate pacing rate for the connection.
+ void CalculatePacingRate();
+ // Determines the appropriate congestion window for the connection.
+ void CalculateCongestionWindow(QuicByteCount bytes_acked,
+ QuicByteCount excess_acked);
+ // Determines the approriate window that constrains the in-flight during
+ // recovery.
+ void CalculateRecoveryWindow(QuicByteCount bytes_acked,
+ QuicByteCount bytes_lost);
+
+ // Returns true if there are enough bytes in flight to ensure more bandwidth
+ // will be observed if present.
+ bool IsPipeSufficientlyFull() const;
+
+ const RttStats* rtt_stats_;
+ const QuicUnackedPacketMap* unacked_packets_;
+ QuicRandom* random_;
+
+ Mode mode_;
+
+ // Bandwidth sampler provides BBR with the bandwidth measurements at
+ // individual points.
+ BandwidthSampler sampler_;
+
+ // The number of the round trips that have occurred during the connection.
+ QuicRoundTripCount round_trip_count_;
+
+ // The packet number of the most recently sent packet.
+ QuicPacketNumber last_sent_packet_;
+ // Acknowledgement of any packet after |current_round_trip_end_| will cause
+ // the round trip counter to advance.
+ QuicPacketNumber current_round_trip_end_;
+
+ // The filter that tracks the maximum bandwidth over the multiple recent
+ // round-trips.
+ MaxBandwidthFilter max_bandwidth_;
+
+ // Tracks the maximum number of bytes acked faster than the sending rate.
+ MaxAckHeightFilter max_ack_height_;
+
+ // The time this aggregation started and the number of bytes acked during it.
+ QuicTime aggregation_epoch_start_time_;
+ QuicByteCount aggregation_epoch_bytes_;
+
+ // Minimum RTT estimate. Automatically expires within 10 seconds (and
+ // triggers PROBE_RTT mode) if no new value is sampled during that period.
+ QuicTime::Delta min_rtt_;
+ // The time at which the current value of |min_rtt_| was assigned.
+ QuicTime min_rtt_timestamp_;
+
+ // The maximum allowed number of bytes in flight.
+ QuicByteCount congestion_window_;
+
+ // The initial value of the |congestion_window_|.
+ QuicByteCount initial_congestion_window_;
+
+ // The largest value the |congestion_window_| can achieve.
+ QuicByteCount max_congestion_window_;
+
+ // The smallest value the |congestion_window_| can achieve.
+ QuicByteCount min_congestion_window_;
+
+ // The pacing gain applied during the STARTUP phase.
+ float high_gain_;
+
+ // The CWND gain applied during the STARTUP phase.
+ float high_cwnd_gain_;
+
+ // The pacing gain applied during the DRAIN phase.
+ float drain_gain_;
+
+ // The current pacing rate of the connection.
+ QuicBandwidth pacing_rate_;
+
+ // The gain currently applied to the pacing rate.
+ float pacing_gain_;
+ // The gain currently applied to the congestion window.
+ float congestion_window_gain_;
+
+ // The gain used for the congestion window during PROBE_BW. Latched from
+ // quic_bbr_cwnd_gain flag.
+ const float congestion_window_gain_constant_;
+ // The number of RTTs to stay in STARTUP mode. Defaults to 3.
+ QuicRoundTripCount num_startup_rtts_;
+ // If true, exit startup if 1RTT has passed with no bandwidth increase and
+ // the connection is in recovery.
+ bool exit_startup_on_loss_;
+
+ // Number of round-trips in PROBE_BW mode, used for determining the current
+ // pacing gain cycle.
+ int cycle_current_offset_;
+ // The time at which the last pacing gain cycle was started.
+ QuicTime last_cycle_start_;
+
+ // Indicates whether the connection has reached the full bandwidth mode.
+ bool is_at_full_bandwidth_;
+ // Number of rounds during which there was no significant bandwidth increase.
+ QuicRoundTripCount rounds_without_bandwidth_gain_;
+ // The bandwidth compared to which the increase is measured.
+ QuicBandwidth bandwidth_at_last_round_;
+
+ // Set to true upon exiting quiescence.
+ bool exiting_quiescence_;
+
+ // Time at which PROBE_RTT has to be exited. Setting it to zero indicates
+ // that the time is yet unknown as the number of packets in flight has not
+ // reached the required value.
+ QuicTime exit_probe_rtt_at_;
+ // Indicates whether a round-trip has passed since PROBE_RTT became active.
+ bool probe_rtt_round_passed_;
+
+ // Indicates whether the most recent bandwidth sample was marked as
+ // app-limited.
+ bool last_sample_is_app_limited_;
+ // Indicates whether any non app-limited samples have been recorded.
+ bool has_non_app_limited_sample_;
+ // Indicates app-limited calls should be ignored as long as there's
+ // enough data inflight to see more bandwidth when necessary.
+ bool flexible_app_limited_;
+
+ // Current state of recovery.
+ RecoveryState recovery_state_;
+ // Receiving acknowledgement of a packet after |end_recovery_at_| will cause
+ // BBR to exit the recovery mode. A value above zero indicates at least one
+ // loss has been detected, so it must not be set back to zero.
+ QuicPacketNumber end_recovery_at_;
+ // A window used to limit the number of bytes in flight during loss recovery.
+ QuicByteCount recovery_window_;
+ // If true, consider all samples in recovery app-limited.
+ bool is_app_limited_recovery_;
+
+ // When true, pace at 1.5x and disable packet conservation in STARTUP.
+ bool slower_startup_;
+ // When true, disables packet conservation in STARTUP.
+ bool rate_based_startup_;
+ // When non-zero, decreases the rate in STARTUP by the total number of bytes
+ // lost in STARTUP divided by CWND.
+ uint8_t startup_rate_reduction_multiplier_;
+ // Sum of bytes lost in STARTUP.
+ QuicByteCount startup_bytes_lost_;
+
+ // When true, add the most recent ack aggregation measurement during STARTUP.
+ bool enable_ack_aggregation_during_startup_;
+ // When true, expire the windowed ack aggregation values in STARTUP when
+ // bandwidth increases more than 25%.
+ bool expire_ack_aggregation_in_startup_;
+
+ // If true, will not exit low gain mode until bytes_in_flight drops below BDP
+ // or it's time for high gain mode.
+ bool drain_to_target_;
+
+ // If true, use a CWND of 0.75*BDP during probe_rtt instead of 4 packets.
+ bool probe_rtt_based_on_bdp_;
+ // If true, skip probe_rtt and update the timestamp of the existing min_rtt to
+ // now if min_rtt over the last cycle is within 12.5% of the current min_rtt.
+ // Even if the min_rtt is 12.5% too low, the 25% gain cycling and 2x CWND gain
+ // should overcome an overly small min_rtt.
+ bool probe_rtt_skipped_if_similar_rtt_;
+ // If true, disable PROBE_RTT entirely as long as the connection was recently
+ // app limited.
+ bool probe_rtt_disabled_if_app_limited_;
+ bool app_limited_since_last_probe_rtt_;
+ QuicTime::Delta min_rtt_since_last_probe_rtt_;
+};
+
+QUIC_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os,
+ const BbrSender::Mode& mode);
+QUIC_EXPORT_PRIVATE std::ostream& operator<<(
+ std::ostream& os,
+ const BbrSender::DebugState& state);
+
+} // namespace quic
+
+#endif // QUICHE_QUIC_CORE_CONGESTION_CONTROL_BBR_SENDER_H_
diff --git a/quic/core/congestion_control/bbr_sender_test.cc b/quic/core/congestion_control/bbr_sender_test.cc
new file mode 100644
index 0000000..205cf65
--- /dev/null
+++ b/quic/core/congestion_control/bbr_sender_test.cc
@@ -0,0 +1,1314 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/third_party/quiche/src/quic/core/congestion_control/bbr_sender.h"
+
+#include <algorithm>
+#include <map>
+#include <memory>
+
+#include "net/third_party/quiche/src/quic/core/congestion_control/rtt_stats.h"
+#include "net/third_party/quiche/src/quic/core/quic_packets.h"
+#include "net/third_party/quiche/src/quic/core/quic_utils.h"
+#include "net/third_party/quiche/src/quic/platform/api/quic_logging.h"
+#include "net/third_party/quiche/src/quic/platform/api/quic_ptr_util.h"
+#include "net/third_party/quiche/src/quic/platform/api/quic_test.h"
+#include "net/third_party/quiche/src/quic/test_tools/mock_clock.h"
+#include "net/third_party/quiche/src/quic/test_tools/quic_config_peer.h"
+#include "net/third_party/quiche/src/quic/test_tools/quic_connection_peer.h"
+#include "net/third_party/quiche/src/quic/test_tools/quic_sent_packet_manager_peer.h"
+#include "net/third_party/quiche/src/quic/test_tools/quic_test_utils.h"
+#include "net/third_party/quiche/src/quic/test_tools/simulator/quic_endpoint.h"
+#include "net/third_party/quiche/src/quic/test_tools/simulator/simulator.h"
+#include "net/third_party/quiche/src/quic/test_tools/simulator/switch.h"
+
+namespace quic {
+namespace test {
+
+// Use the initial CWND of 10, as 32 is too much for the test network.
+const uint32_t kInitialCongestionWindowPackets = 10;
+const uint32_t kDefaultWindowTCP =
+ kInitialCongestionWindowPackets * kDefaultTCPMSS;
+
+// Test network parameters. Here, the topology of the network is:
+//
+// BBR sender
+// |
+// | <-- local link (10 Mbps, 2 ms delay)
+// |
+// Network switch
+// * <-- the bottleneck queue in the direction
+// | of the receiver
+// |
+// | <-- test link (4 Mbps, 30 ms delay)
+// |
+// |
+// Receiver
+//
+// The reason the bandwidths chosen are relatively low is the fact that the
+// connection simulator uses QuicTime for its internal clock, and as such has
+// the granularity of 1us, meaning that at bandwidth higher than 20 Mbps the
+// packets can start to land on the same timestamp.
+const QuicBandwidth kTestLinkBandwidth =
+ QuicBandwidth::FromKBitsPerSecond(4000);
+const QuicBandwidth kLocalLinkBandwidth =
+ QuicBandwidth::FromKBitsPerSecond(10000);
+const QuicTime::Delta kTestPropagationDelay =
+ QuicTime::Delta::FromMilliseconds(30);
+const QuicTime::Delta kLocalPropagationDelay =
+ QuicTime::Delta::FromMilliseconds(2);
+const QuicTime::Delta kTestTransferTime =
+ kTestLinkBandwidth.TransferTime(kMaxPacketSize) +
+ kLocalLinkBandwidth.TransferTime(kMaxPacketSize);
+const QuicTime::Delta kTestRtt =
+ (kTestPropagationDelay + kLocalPropagationDelay + kTestTransferTime) * 2;
+const QuicByteCount kTestBdp = kTestRtt * kTestLinkBandwidth;
+
+class BbrSenderTest : public QuicTest {
+ protected:
+ BbrSenderTest()
+ : simulator_(),
+ bbr_sender_(&simulator_,
+ "BBR sender",
+ "Receiver",
+ Perspective::IS_CLIENT,
+ /*connection_id=*/TestConnectionId(42)),
+ competing_sender_(&simulator_,
+ "Competing sender",
+ "Competing receiver",
+ Perspective::IS_CLIENT,
+ /*connection_id=*/TestConnectionId(43)),
+ receiver_(&simulator_,
+ "Receiver",
+ "BBR sender",
+ Perspective::IS_SERVER,
+ /*connection_id=*/TestConnectionId(42)),
+ competing_receiver_(&simulator_,
+ "Competing receiver",
+ "Competing sender",
+ Perspective::IS_SERVER,
+ /*connection_id=*/TestConnectionId(43)),
+ receiver_multiplexer_("Receiver multiplexer",
+ {&receiver_, &competing_receiver_}) {
+ rtt_stats_ = bbr_sender_.connection()->sent_packet_manager().GetRttStats();
+ sender_ = SetupBbrSender(&bbr_sender_);
+
+ clock_ = simulator_.GetClock();
+ simulator_.set_random_generator(&random_);
+
+ uint64_t seed = QuicRandom::GetInstance()->RandUint64();
+ random_.set_seed(seed);
+ QUIC_LOG(INFO) << "BbrSenderTest simulator set up. Seed: " << seed;
+ }
+
+ simulator::Simulator simulator_;
+ simulator::QuicEndpoint bbr_sender_;
+ simulator::QuicEndpoint competing_sender_;
+ simulator::QuicEndpoint receiver_;
+ simulator::QuicEndpoint competing_receiver_;
+ simulator::QuicEndpointMultiplexer receiver_multiplexer_;
+ std::unique_ptr<simulator::Switch> switch_;
+ std::unique_ptr<simulator::SymmetricLink> bbr_sender_link_;
+ std::unique_ptr<simulator::SymmetricLink> competing_sender_link_;
+ std::unique_ptr<simulator::SymmetricLink> receiver_link_;
+
+ SimpleRandom random_;
+
+ // Owned by different components of the connection.
+ const QuicClock* clock_;
+ const RttStats* rtt_stats_;
+ BbrSender* sender_;
+
+ // Enables BBR on |endpoint| and returns the associated BBR congestion
+ // controller.
+ BbrSender* SetupBbrSender(simulator::QuicEndpoint* endpoint) {
+ const RttStats* rtt_stats =
+ endpoint->connection()->sent_packet_manager().GetRttStats();
+ // Ownership of the sender will be overtaken by the endpoint.
+ BbrSender* sender = new BbrSender(
+ rtt_stats,
+ QuicSentPacketManagerPeer::GetUnackedPacketMap(
+ QuicConnectionPeer::GetSentPacketManager(endpoint->connection())),
+ kInitialCongestionWindowPackets, kDefaultMaxCongestionWindowPackets,
+ &random_);
+ QuicConnectionPeer::SetSendAlgorithm(endpoint->connection(), sender);
+ endpoint->RecordTrace();
+ return sender;
+ }
+
+ // Creates a default setup, which is a network with a bottleneck between the
+ // receiver and the switch. The switch has the buffers four times larger than
+ // the bottleneck BDP, which should guarantee a lack of losses.
+ void CreateDefaultSetup() {
+ switch_ = QuicMakeUnique<simulator::Switch>(&simulator_, "Switch", 8,
+ 2 * kTestBdp);
+ bbr_sender_link_ = QuicMakeUnique<simulator::SymmetricLink>(
+ &bbr_sender_, switch_->port(1), kLocalLinkBandwidth,
+ kLocalPropagationDelay);
+ receiver_link_ = QuicMakeUnique<simulator::SymmetricLink>(
+ &receiver_, switch_->port(2), kTestLinkBandwidth,
+ kTestPropagationDelay);
+ }
+
+ // Same as the default setup, except the buffer now is half of the BDP.
+ void CreateSmallBufferSetup() {
+ switch_ = QuicMakeUnique<simulator::Switch>(&simulator_, "Switch", 8,
+ 0.5 * kTestBdp);
+ bbr_sender_link_ = QuicMakeUnique<simulator::SymmetricLink>(
+ &bbr_sender_, switch_->port(1), kLocalLinkBandwidth,
+ kTestPropagationDelay);
+ receiver_link_ = QuicMakeUnique<simulator::SymmetricLink>(
+ &receiver_, switch_->port(2), kTestLinkBandwidth,
+ kTestPropagationDelay);
+ }
+
+ // Creates the variation of the default setup in which there is another sender
+ // that competes for the same bottleneck link.
+ void CreateCompetitionSetup() {
+ switch_ = QuicMakeUnique<simulator::Switch>(&simulator_, "Switch", 8,
+ 2 * kTestBdp);
+
+ // Add a small offset to the competing link in order to avoid
+ // synchronization effects.
+ const QuicTime::Delta small_offset = QuicTime::Delta::FromMicroseconds(3);
+ bbr_sender_link_ = QuicMakeUnique<simulator::SymmetricLink>(
+ &bbr_sender_, switch_->port(1), kLocalLinkBandwidth,
+ kLocalPropagationDelay);
+ competing_sender_link_ = QuicMakeUnique<simulator::SymmetricLink>(
+ &competing_sender_, switch_->port(3), kLocalLinkBandwidth,
+ kLocalPropagationDelay + small_offset);
+ receiver_link_ = QuicMakeUnique<simulator::SymmetricLink>(
+ &receiver_multiplexer_, switch_->port(2), kTestLinkBandwidth,
+ kTestPropagationDelay);
+ }
+
+ // Creates a BBR vs BBR competition setup.
+ void CreateBbrVsBbrSetup() {
+ SetupBbrSender(&competing_sender_);
+ CreateCompetitionSetup();
+ }
+
+ void EnableAggregation(QuicByteCount aggregation_bytes,
+ QuicTime::Delta aggregation_timeout) {
+ // Enable aggregation on the path from the receiver to the sender.
+ switch_->port_queue(1)->EnableAggregation(aggregation_bytes,
+ aggregation_timeout);
+ }
+
+ void DoSimpleTransfer(QuicByteCount transfer_size, QuicTime::Delta deadline) {
+ bbr_sender_.AddBytesToTransfer(transfer_size);
+ // TODO(vasilvv): consider rewriting this to run until the receiver actually
+ // receives the intended amount of bytes.
+ bool simulator_result = simulator_.RunUntilOrTimeout(
+ [this]() { return bbr_sender_.bytes_to_transfer() == 0; }, deadline);
+ EXPECT_TRUE(simulator_result)
+ << "Simple transfer failed. Bytes remaining: "
+ << bbr_sender_.bytes_to_transfer();
+ QUIC_LOG(INFO) << "Simple transfer state: " << sender_->ExportDebugState();
+ }
+
+ // Drive the simulator by sending enough data to enter PROBE_BW.
+ void DriveOutOfStartup() {
+ ASSERT_FALSE(sender_->ExportDebugState().is_at_full_bandwidth);
+ DoSimpleTransfer(1024 * 1024, QuicTime::Delta::FromSeconds(15));
+ EXPECT_EQ(BbrSender::PROBE_BW, sender_->ExportDebugState().mode);
+ ExpectApproxEq(kTestLinkBandwidth,
+ sender_->ExportDebugState().max_bandwidth, 0.02f);
+ }
+
+ // Send |bytes|-sized bursts of data |number_of_bursts| times, waiting for
+ // |wait_time| between each burst.
+ void SendBursts(size_t number_of_bursts,
+ QuicByteCount bytes,
+ QuicTime::Delta wait_time) {
+ ASSERT_EQ(0u, bbr_sender_.bytes_to_transfer());
+ for (size_t i = 0; i < number_of_bursts; i++) {
+ bbr_sender_.AddBytesToTransfer(bytes);
+
+ // Transfer data and wait for three seconds between each transfer.
+ simulator_.RunFor(wait_time);
+
+ // Ensure the connection did not time out.
+ ASSERT_TRUE(bbr_sender_.connection()->connected());
+ ASSERT_TRUE(receiver_.connection()->connected());
+ }
+
+ simulator_.RunFor(wait_time + kTestRtt);
+ ASSERT_EQ(0u, bbr_sender_.bytes_to_transfer());
+ }
+
+ void SetConnectionOption(QuicTag option) {
+ QuicConfig config;
+ QuicTagVector options;
+ options.push_back(option);
+ QuicConfigPeer::SetReceivedConnectionOptions(&config, options);
+ sender_->SetFromConfig(config, Perspective::IS_SERVER);
+ }
+};
+
+TEST_F(BbrSenderTest, SetInitialCongestionWindow) {
+ EXPECT_NE(3u * kDefaultTCPMSS, sender_->GetCongestionWindow());
+ sender_->SetInitialCongestionWindowInPackets(3);
+ EXPECT_EQ(3u * kDefaultTCPMSS, sender_->GetCongestionWindow());
+}
+
+// Test a simple long data transfer in the default setup.
+TEST_F(BbrSenderTest, SimpleTransfer) {
+ // Disable Ack Decimation on the receiver, because it can increase srtt.
+ QuicConnectionPeer::SetAckMode(receiver_.connection(), AckMode::TCP_ACKING);
+ CreateDefaultSetup();
+
+ // At startup make sure we are at the default.
+ EXPECT_EQ(kDefaultWindowTCP, sender_->GetCongestionWindow());
+ // At startup make sure we can send.
+ EXPECT_TRUE(sender_->CanSend(0));
+ // And that window is un-affected.
+ EXPECT_EQ(kDefaultWindowTCP, sender_->GetCongestionWindow());
+
+ // Verify that Sender is in slow start.
+ EXPECT_TRUE(sender_->InSlowStart());
+
+ // Verify that pacing rate is based on the initial RTT.
+ QuicBandwidth expected_pacing_rate = QuicBandwidth::FromBytesAndTimeDelta(
+ 2.885 * kDefaultWindowTCP, rtt_stats_->initial_rtt());
+ ExpectApproxEq(expected_pacing_rate.ToBitsPerSecond(),
+ sender_->PacingRate(0).ToBitsPerSecond(), 0.01f);
+
+ ASSERT_GE(kTestBdp, kDefaultWindowTCP + kDefaultTCPMSS);
+
+ DoSimpleTransfer(12 * 1024 * 1024, QuicTime::Delta::FromSeconds(30));
+ EXPECT_EQ(BbrSender::PROBE_BW, sender_->ExportDebugState().mode);
+ EXPECT_EQ(0u, bbr_sender_.connection()->GetStats().packets_lost);
+ EXPECT_FALSE(sender_->ExportDebugState().last_sample_is_app_limited);
+
+ // The margin here is quite high, since there exists a possibility that the
+ // connection just exited high gain cycle.
+ ExpectApproxEq(kTestRtt, rtt_stats_->smoothed_rtt(), 0.2f);
+}
+
+// Test a simple transfer in a situation when the buffer is less than BDP.
+TEST_F(BbrSenderTest, SimpleTransferSmallBuffer) {
+ CreateSmallBufferSetup();
+
+ DoSimpleTransfer(12 * 1024 * 1024, QuicTime::Delta::FromSeconds(30));
+ EXPECT_EQ(BbrSender::PROBE_BW, sender_->ExportDebugState().mode);
+ ExpectApproxEq(kTestLinkBandwidth, sender_->ExportDebugState().max_bandwidth,
+ 0.01f);
+ EXPECT_GE(bbr_sender_.connection()->GetStats().packets_lost, 0u);
+ EXPECT_FALSE(sender_->ExportDebugState().last_sample_is_app_limited);
+}
+
+TEST_F(BbrSenderTest, SimpleTransferEarlyPacketLoss) {
+ SetQuicReloadableFlag(quic_bbr_no_bytes_acked_in_startup_recovery, true);
+ // Enable rate based startup so the recovery window doesn't hide the true
+ // congestion_window_ in GetCongestionWindow().
+ SetConnectionOption(kBBS1);
+ // Disable Ack Decimation on the receiver, because it can increase srtt.
+ QuicConnectionPeer::SetAckMode(receiver_.connection(), AckMode::TCP_ACKING);
+ CreateDefaultSetup();
+
+ // At startup make sure we are at the default.
+ EXPECT_EQ(kDefaultWindowTCP, sender_->GetCongestionWindow());
+ // Verify that Sender is in slow start.
+ EXPECT_TRUE(sender_->InSlowStart());
+ // At startup make sure we can send.
+ EXPECT_TRUE(sender_->CanSend(0));
+ // And that window is un-affected.
+ EXPECT_EQ(kDefaultWindowTCP, sender_->GetCongestionWindow());
+
+ // Transfer 12MB.
+ bbr_sender_.AddBytesToTransfer(12 * 1024 * 1024);
+ // Drop the first packet.
+ receiver_.DropNextIncomingPacket();
+ bool simulator_result = simulator_.RunUntilOrTimeout(
+ [this]() {
+ if (sender_->InRecovery()) {
+ // Two packets are acked before the first is declared lost.
+ EXPECT_LE(sender_->GetCongestionWindow(),
+ (kDefaultWindowTCP + 2 * kDefaultTCPMSS));
+ }
+ return bbr_sender_.bytes_to_transfer() == 0 || !sender_->InSlowStart();
+ },
+ QuicTime::Delta::FromSeconds(30));
+ EXPECT_TRUE(simulator_result) << "Simple transfer failed. Bytes remaining: "
+ << bbr_sender_.bytes_to_transfer();
+ EXPECT_EQ(BbrSender::DRAIN, sender_->ExportDebugState().mode);
+ EXPECT_EQ(1u, bbr_sender_.connection()->GetStats().packets_lost);
+ EXPECT_FALSE(sender_->ExportDebugState().last_sample_is_app_limited);
+}
+
+// Test a simple long data transfer with 2 rtts of aggregation.
+TEST_F(BbrSenderTest, SimpleTransfer2RTTAggregationBytes) {
+ CreateDefaultSetup();
+ // 2 RTTs of aggregation, with a max of 10kb.
+ EnableAggregation(10 * 1024, 2 * kTestRtt);
+
+ // Transfer 12MB.
+ DoSimpleTransfer(12 * 1024 * 1024, QuicTime::Delta::FromSeconds(35));
+ EXPECT_EQ(BbrSender::PROBE_BW, sender_->ExportDebugState().mode);
+ // It's possible to read a bandwidth as much as 50% too high with aggregation.
+ EXPECT_LE(kTestLinkBandwidth * 0.99f,
+ sender_->ExportDebugState().max_bandwidth);
+ // TODO(ianswett): Tighten this bound once we understand why BBR is
+ // overestimating bandwidth with aggregation. b/36022633
+ EXPECT_GE(kTestLinkBandwidth * 1.5f,
+ sender_->ExportDebugState().max_bandwidth);
+ // TODO(ianswett): Expect 0 packets are lost once BBR no longer measures
+ // bandwidth higher than the link rate.
+ // The margin here is high, because the aggregation greatly increases
+ // smoothed rtt.
+ EXPECT_GE(kTestRtt * 4, rtt_stats_->smoothed_rtt());
+ ExpectApproxEq(kTestRtt, rtt_stats_->min_rtt(), 0.2f);
+}
+
+// Test a simple long data transfer with 2 rtts of aggregation.
+TEST_F(BbrSenderTest, SimpleTransferAckDecimation) {
+ // Decrease the CWND gain so extra CWND is required with stretch acks.
+ FLAGS_quic_bbr_cwnd_gain = 1.0;
+ sender_ = new BbrSender(
+ rtt_stats_,
+ QuicSentPacketManagerPeer::GetUnackedPacketMap(
+ QuicConnectionPeer::GetSentPacketManager(bbr_sender_.connection())),
+ kInitialCongestionWindowPackets, kDefaultMaxCongestionWindowPackets,
+ &random_);
+ QuicConnectionPeer::SetSendAlgorithm(bbr_sender_.connection(), sender_);
+ // Enable Ack Decimation on the receiver.
+ QuicConnectionPeer::SetAckMode(receiver_.connection(),
+ AckMode::ACK_DECIMATION);
+ CreateDefaultSetup();
+
+ // Transfer 12MB.
+ DoSimpleTransfer(12 * 1024 * 1024, QuicTime::Delta::FromSeconds(35));
+ EXPECT_EQ(BbrSender::PROBE_BW, sender_->ExportDebugState().mode);
+ // It's possible to read a bandwidth as much as 50% too high with aggregation.
+ EXPECT_LE(kTestLinkBandwidth * 0.99f,
+ sender_->ExportDebugState().max_bandwidth);
+ // TODO(ianswett): Tighten this bound once we understand why BBR is
+ // overestimating bandwidth with aggregation. b/36022633
+ EXPECT_GE(kTestLinkBandwidth * 1.5f,
+ sender_->ExportDebugState().max_bandwidth);
+ // TODO(ianswett): Expect 0 packets are lost once BBR no longer measures
+ // bandwidth higher than the link rate.
+ EXPECT_FALSE(sender_->ExportDebugState().last_sample_is_app_limited);
+ // The margin here is high, because the aggregation greatly increases
+ // smoothed rtt.
+ EXPECT_GE(kTestRtt * 2, rtt_stats_->smoothed_rtt());
+ ExpectApproxEq(kTestRtt, rtt_stats_->min_rtt(), 0.1f);
+}
+
+// Test a simple long data transfer with 2 rtts of aggregation.
+TEST_F(BbrSenderTest, SimpleTransfer2RTTAggregationBytes20RTTWindow) {
+ // Disable Ack Decimation on the receiver, because it can increase srtt.
+ QuicConnectionPeer::SetAckMode(receiver_.connection(), AckMode::TCP_ACKING);
+ CreateDefaultSetup();
+ SetConnectionOption(kBBR4);
+ // 2 RTTs of aggregation, with a max of 10kb.
+ EnableAggregation(10 * 1024, 2 * kTestRtt);
+
+ // Transfer 12MB.
+ DoSimpleTransfer(12 * 1024 * 1024, QuicTime::Delta::FromSeconds(35));
+ EXPECT_EQ(BbrSender::PROBE_BW, sender_->ExportDebugState().mode);
+ // It's possible to read a bandwidth as much as 50% too high with aggregation.
+ EXPECT_LE(kTestLinkBandwidth * 0.99f,
+ sender_->ExportDebugState().max_bandwidth);
+ // TODO(ianswett): Tighten this bound once we understand why BBR is
+ // overestimating bandwidth with aggregation. b/36022633
+ EXPECT_GE(kTestLinkBandwidth * 1.5f,
+ sender_->ExportDebugState().max_bandwidth);
+ // TODO(ianswett): Expect 0 packets are lost once BBR no longer measures
+ // bandwidth higher than the link rate.
+ // The margin here is high, because the aggregation greatly increases
+ // smoothed rtt.
+ EXPECT_GE(kTestRtt * 4, rtt_stats_->smoothed_rtt());
+ ExpectApproxEq(kTestRtt, rtt_stats_->min_rtt(), 0.12f);
+}
+
+// Test a simple long data transfer with 2 rtts of aggregation.
+TEST_F(BbrSenderTest, SimpleTransfer2RTTAggregationBytes40RTTWindow) {
+ // Disable Ack Decimation on the receiver, because it can increase srtt.
+ QuicConnectionPeer::SetAckMode(receiver_.connection(), AckMode::TCP_ACKING);
+ CreateDefaultSetup();
+ SetConnectionOption(kBBR5);
+ // 2 RTTs of aggregation, with a max of 10kb.
+ EnableAggregation(10 * 1024, 2 * kTestRtt);
+
+ // Transfer 12MB.
+ DoSimpleTransfer(12 * 1024 * 1024, QuicTime::Delta::FromSeconds(35));
+ EXPECT_EQ(BbrSender::PROBE_BW, sender_->ExportDebugState().mode);
+ // It's possible to read a bandwidth as much as 50% too high with aggregation.
+ EXPECT_LE(kTestLinkBandwidth * 0.99f,
+ sender_->ExportDebugState().max_bandwidth);
+ // TODO(ianswett): Tighten this bound once we understand why BBR is
+ // overestimating bandwidth with aggregation. b/36022633
+ EXPECT_GE(kTestLinkBandwidth * 1.5f,
+ sender_->ExportDebugState().max_bandwidth);
+ // TODO(ianswett): Expect 0 packets are lost once BBR no longer measures
+ // bandwidth higher than the link rate.
+ // The margin here is high, because the aggregation greatly increases
+ // smoothed rtt.
+ EXPECT_GE(kTestRtt * 4, rtt_stats_->smoothed_rtt());
+ ExpectApproxEq(kTestRtt, rtt_stats_->min_rtt(), 0.12f);
+}
+
+// Test the number of losses incurred by the startup phase in a situation when
+// the buffer is less than BDP.
+TEST_F(BbrSenderTest, PacketLossOnSmallBufferStartup) {
+ CreateSmallBufferSetup();
+
+ DriveOutOfStartup();
+ float loss_rate =
+ static_cast<float>(bbr_sender_.connection()->GetStats().packets_lost) /
+ bbr_sender_.connection()->GetStats().packets_sent;
+ EXPECT_LE(loss_rate, 0.31);
+}
+
+// Ensures the code transitions loss recovery states correctly (NOT_IN_RECOVERY
+// -> CONSERVATION -> GROWTH -> NOT_IN_RECOVERY).
+TEST_F(BbrSenderTest, RecoveryStates) {
+ // Set seed to the position where the gain cycling causes the sender go
+ // into conservation upon entering PROBE_BW.
+ //
+ // TODO(vasilvv): there should be a better way to test this.
+ random_.set_seed(UINT64_C(14719894707049085006));
+
+ const QuicTime::Delta timeout = QuicTime::Delta::FromSeconds(10);
+ bool simulator_result;
+ CreateSmallBufferSetup();
+
+ bbr_sender_.AddBytesToTransfer(100 * 1024 * 1024);
+ ASSERT_EQ(BbrSender::NOT_IN_RECOVERY,
+ sender_->ExportDebugState().recovery_state);
+
+ simulator_result = simulator_.RunUntilOrTimeout(
+ [this]() {
+ return sender_->ExportDebugState().recovery_state !=
+ BbrSender::NOT_IN_RECOVERY;
+ },
+ timeout);
+ ASSERT_TRUE(simulator_result);
+ ASSERT_EQ(BbrSender::CONSERVATION,
+ sender_->ExportDebugState().recovery_state);
+
+ simulator_result = simulator_.RunUntilOrTimeout(
+ [this]() {
+ return sender_->ExportDebugState().recovery_state !=
+ BbrSender::CONSERVATION;
+ },
+ timeout);
+ ASSERT_TRUE(simulator_result);
+ ASSERT_EQ(BbrSender::GROWTH, sender_->ExportDebugState().recovery_state);
+
+ simulator_result = simulator_.RunUntilOrTimeout(
+ [this]() {
+ return sender_->ExportDebugState().recovery_state != BbrSender::GROWTH;
+ },
+ timeout);
+
+ ASSERT_EQ(BbrSender::PROBE_BW, sender_->ExportDebugState().mode);
+ ASSERT_EQ(BbrSender::NOT_IN_RECOVERY,
+ sender_->ExportDebugState().recovery_state);
+ ASSERT_TRUE(simulator_result);
+}
+
+// Verify the behavior of the algorithm in the case when the connection sends
+// small bursts of data after sending continuously for a while.
+TEST_F(BbrSenderTest, ApplicationLimitedBursts) {
+ CreateDefaultSetup();
+
+ DriveOutOfStartup();
+ EXPECT_FALSE(sender_->ExportDebugState().last_sample_is_app_limited);
+
+ SendBursts(20, 512, QuicTime::Delta::FromSeconds(3));
+ EXPECT_TRUE(sender_->ExportDebugState().last_sample_is_app_limited);
+ ExpectApproxEq(kTestLinkBandwidth, sender_->ExportDebugState().max_bandwidth,
+ 0.01f);
+}
+
+// Verify the behavior of the algorithm in the case when the connection sends
+// small bursts of data and then starts sending continuously.
+TEST_F(BbrSenderTest, ApplicationLimitedBurstsWithoutPrior) {
+ CreateDefaultSetup();
+
+ SendBursts(40, 512, QuicTime::Delta::FromSeconds(3));
+ EXPECT_TRUE(sender_->ExportDebugState().last_sample_is_app_limited);
+
+ DriveOutOfStartup();
+ ExpectApproxEq(kTestLinkBandwidth, sender_->ExportDebugState().max_bandwidth,
+ 0.01f);
+ EXPECT_FALSE(sender_->ExportDebugState().last_sample_is_app_limited);
+}
+
+// Verify that the DRAIN phase works correctly.
+TEST_F(BbrSenderTest, Drain) {
+ // Disable Ack Decimation on the receiver, because it can increase srtt.
+ QuicConnectionPeer::SetAckMode(receiver_.connection(), AckMode::TCP_ACKING);
+ CreateDefaultSetup();
+ const QuicTime::Delta timeout = QuicTime::Delta::FromSeconds(10);
+ // Get the queue at the bottleneck, which is the outgoing queue at the port to
+ // which the receiver is connected.
+ const simulator::Queue* queue = switch_->port_queue(2);
+ bool simulator_result;
+
+ // We have no intention of ever finishing this transfer.
+ bbr_sender_.AddBytesToTransfer(100 * 1024 * 1024);
+
+ // Run the startup, and verify that it fills up the queue.
+ ASSERT_EQ(BbrSender::STARTUP, sender_->ExportDebugState().mode);
+ simulator_result = simulator_.RunUntilOrTimeout(
+ [this]() {
+ return sender_->ExportDebugState().mode != BbrSender::STARTUP;
+ },
+ timeout);
+ ASSERT_TRUE(simulator_result);
+ ASSERT_EQ(BbrSender::DRAIN, sender_->ExportDebugState().mode);
+ ExpectApproxEq(sender_->BandwidthEstimate() * (1 / 2.885f),
+ sender_->PacingRate(0), 0.01f);
+ // BBR uses CWND gain of 2.88 during STARTUP, hence it will fill the buffer
+ // with approximately 1.88 BDPs. Here, we use 1.5 to give some margin for
+ // error.
+ EXPECT_GE(queue->bytes_queued(), 1.5 * kTestBdp);
+
+ // Observe increased RTT due to bufferbloat.
+ const QuicTime::Delta queueing_delay =
+ kTestLinkBandwidth.TransferTime(queue->bytes_queued());
+ ExpectApproxEq(kTestRtt + queueing_delay, rtt_stats_->latest_rtt(), 0.1f);
+
+ // Transition to the drain phase and verify that it makes the queue
+ // have at most a BDP worth of packets.
+ simulator_result = simulator_.RunUntilOrTimeout(
+ [this]() { return sender_->ExportDebugState().mode != BbrSender::DRAIN; },
+ timeout);
+ ASSERT_TRUE(simulator_result);
+ ASSERT_EQ(BbrSender::PROBE_BW, sender_->ExportDebugState().mode);
+ EXPECT_LE(queue->bytes_queued(), kTestBdp);
+
+ // Wait for a few round trips and ensure we're in appropriate phase of gain
+ // cycling before taking an RTT measurement.
+ const QuicRoundTripCount start_round_trip =
+ sender_->ExportDebugState().round_trip_count;
+ simulator_result = simulator_.RunUntilOrTimeout(
+ [this, start_round_trip]() {
+ QuicRoundTripCount rounds_passed =
+ sender_->ExportDebugState().round_trip_count - start_round_trip;
+ return rounds_passed >= 4 &&
+ sender_->ExportDebugState().gain_cycle_index == 7;
+ },
+ timeout);
+ ASSERT_TRUE(simulator_result);
+
+ // Observe the bufferbloat go away.
+ ExpectApproxEq(kTestRtt, rtt_stats_->smoothed_rtt(), 0.1f);
+}
+
+// Verify that the DRAIN phase works correctly.
+TEST_F(BbrSenderTest, ShallowDrain) {
+ SetQuicReloadableFlag(quic_bbr_slower_startup3, true);
+ // Disable Ack Decimation on the receiver, because it can increase srtt.
+ QuicConnectionPeer::SetAckMode(receiver_.connection(), AckMode::TCP_ACKING);
+
+ CreateDefaultSetup();
+ // BBQ4 increases the pacing gain in DRAIN to 0.75
+ SetConnectionOption(kBBQ4);
+ const QuicTime::Delta timeout = QuicTime::Delta::FromSeconds(10);
+ // Get the queue at the bottleneck, which is the outgoing queue at the port to
+ // which the receiver is connected.
+ const simulator::Queue* queue = switch_->port_queue(2);
+ bool simulator_result;
+
+ // We have no intention of ever finishing this transfer.
+ bbr_sender_.AddBytesToTransfer(100 * 1024 * 1024);
+
+ // Run the startup, and verify that it fills up the queue.
+ ASSERT_EQ(BbrSender::STARTUP, sender_->ExportDebugState().mode);
+ simulator_result = simulator_.RunUntilOrTimeout(
+ [this]() {
+ return sender_->ExportDebugState().mode != BbrSender::STARTUP;
+ },
+ timeout);
+ ASSERT_TRUE(simulator_result);
+ ASSERT_EQ(BbrSender::DRAIN, sender_->ExportDebugState().mode);
+ EXPECT_EQ(0.75 * sender_->BandwidthEstimate(), sender_->PacingRate(0));
+ // BBR uses CWND gain of 2.88 during STARTUP, hence it will fill the buffer
+ // with approximately 1.88 BDPs. Here, we use 1.5 to give some margin for
+ // error.
+ EXPECT_GE(queue->bytes_queued(), 1.5 * kTestBdp);
+
+ // Observe increased RTT due to bufferbloat.
+ const QuicTime::Delta queueing_delay =
+ kTestLinkBandwidth.TransferTime(queue->bytes_queued());
+ ExpectApproxEq(kTestRtt + queueing_delay, rtt_stats_->latest_rtt(), 0.1f);
+
+ // Transition to the drain phase and verify that it makes the queue
+ // have at most a BDP worth of packets.
+ simulator_result = simulator_.RunUntilOrTimeout(
+ [this]() { return sender_->ExportDebugState().mode != BbrSender::DRAIN; },
+ timeout);
+ ASSERT_TRUE(simulator_result);
+ ASSERT_EQ(BbrSender::PROBE_BW, sender_->ExportDebugState().mode);
+ EXPECT_LE(queue->bytes_queued(), kTestBdp);
+
+ // Wait for a few round trips and ensure we're in appropriate phase of gain
+ // cycling before taking an RTT measurement.
+ const QuicRoundTripCount start_round_trip =
+ sender_->ExportDebugState().round_trip_count;
+ simulator_result = simulator_.RunUntilOrTimeout(
+ [this, start_round_trip]() {
+ QuicRoundTripCount rounds_passed =
+ sender_->ExportDebugState().round_trip_count - start_round_trip;
+ return rounds_passed >= 4 &&
+ sender_->ExportDebugState().gain_cycle_index == 7;
+ },
+ timeout);
+ ASSERT_TRUE(simulator_result);
+
+ // Observe the bufferbloat go away.
+ ExpectApproxEq(kTestRtt, rtt_stats_->smoothed_rtt(), 0.1f);
+}
+
+// Verify that the connection enters and exits PROBE_RTT correctly.
+TEST_F(BbrSenderTest, ProbeRtt) {
+ CreateDefaultSetup();
+ DriveOutOfStartup();
+
+ // We have no intention of ever finishing this transfer.
+ bbr_sender_.AddBytesToTransfer(100 * 1024 * 1024);
+
+ // Wait until the connection enters PROBE_RTT.
+ const QuicTime::Delta timeout = QuicTime::Delta::FromSeconds(12);
+ bool simulator_result = simulator_.RunUntilOrTimeout(
+ [this]() {
+ return sender_->ExportDebugState().mode == BbrSender::PROBE_RTT;
+ },
+ timeout);
+ ASSERT_TRUE(simulator_result);
+ ASSERT_EQ(BbrSender::PROBE_RTT, sender_->ExportDebugState().mode);
+
+ // Exit PROBE_RTT.
+ const QuicTime probe_rtt_start = clock_->Now();
+ const QuicTime::Delta time_to_exit_probe_rtt =
+ kTestRtt + QuicTime::Delta::FromMilliseconds(200);
+ simulator_.RunFor(1.5 * time_to_exit_probe_rtt);
+ EXPECT_EQ(BbrSender::PROBE_BW, sender_->ExportDebugState().mode);
+ EXPECT_GE(sender_->ExportDebugState().min_rtt_timestamp, probe_rtt_start);
+}
+
+// Verify that the first sample after PROBE_RTT is not used as the bandwidth,
+// because the round counter doesn't advance during PROBE_RTT.
+TEST_F(BbrSenderTest, AppLimitedRecoveryNoBandwidthDecrease) {
+ SetQuicReloadableFlag(quic_bbr_app_limited_recovery, true);
+ CreateDefaultSetup();
+ DriveOutOfStartup();
+
+ // We have no intention of ever finishing this transfer.
+ bbr_sender_.AddBytesToTransfer(100 * 1024 * 1024);
+
+ // Wait until the connection enters PROBE_RTT.
+ const QuicTime::Delta timeout = QuicTime::Delta::FromSeconds(12);
+ bool simulator_result = simulator_.RunUntilOrTimeout(
+ [this]() {
+ return sender_->ExportDebugState().mode == BbrSender::PROBE_RTT;
+ },
+ timeout);
+ ASSERT_TRUE(simulator_result);
+ ASSERT_EQ(BbrSender::PROBE_RTT, sender_->ExportDebugState().mode);
+
+ const QuicBandwidth beginning_bw = sender_->BandwidthEstimate();
+
+ // Run for most of PROBE_RTT.
+ const QuicTime probe_rtt_start = clock_->Now();
+ const QuicTime::Delta time_to_exit_probe_rtt =
+ kTestRtt + QuicTime::Delta::FromMilliseconds(200);
+ simulator_.RunFor(0.60 * time_to_exit_probe_rtt);
+ EXPECT_EQ(BbrSender::PROBE_RTT, sender_->ExportDebugState().mode);
+ // Lose a packet before exiting PROBE_RTT, which puts us in packet
+ // conservation and then continue there for a while and ensure the bandwidth
+ // estimate doesn't decrease.
+ for (int i = 0; i < 20; ++i) {
+ receiver_.DropNextIncomingPacket();
+ simulator_.RunFor(0.9 * kTestRtt);
+ // Ensure the bandwidth didn't decrease and the samples are app limited.
+ EXPECT_LE(beginning_bw, sender_->BandwidthEstimate());
+ EXPECT_TRUE(sender_->ExportDebugState().last_sample_is_app_limited);
+ }
+ EXPECT_GE(sender_->ExportDebugState().min_rtt_timestamp, probe_rtt_start);
+}
+
+// Verify that the connection enters and exits PROBE_RTT correctly.
+TEST_F(BbrSenderTest, ProbeRttBDPBasedCWNDTarget) {
+ CreateDefaultSetup();
+ SetQuicReloadableFlag(quic_bbr_less_probe_rtt, true);
+ SetConnectionOption(kBBR6);
+ DriveOutOfStartup();
+
+ // We have no intention of ever finishing this transfer.
+ bbr_sender_.AddBytesToTransfer(100 * 1024 * 1024);
+
+ // Wait until the connection enters PROBE_RTT.
+ const QuicTime::Delta timeout = QuicTime::Delta::FromSeconds(12);
+ bool simulator_result = simulator_.RunUntilOrTimeout(
+ [this]() {
+ return sender_->ExportDebugState().mode == BbrSender::PROBE_RTT;
+ },
+ timeout);
+ ASSERT_TRUE(simulator_result);
+ ASSERT_EQ(BbrSender::PROBE_RTT, sender_->ExportDebugState().mode);
+
+ // Exit PROBE_RTT.
+ const QuicTime probe_rtt_start = clock_->Now();
+ const QuicTime::Delta time_to_exit_probe_rtt =
+ kTestRtt + QuicTime::Delta::FromMilliseconds(200);
+ simulator_.RunFor(1.5 * time_to_exit_probe_rtt);
+ EXPECT_EQ(BbrSender::PROBE_BW, sender_->ExportDebugState().mode);
+ EXPECT_GE(sender_->ExportDebugState().min_rtt_timestamp, probe_rtt_start);
+}
+
+// Verify that the connection enters does not enter PROBE_RTT.
+TEST_F(BbrSenderTest, ProbeRttSkippedAfterAppLimitedAndStableRtt) {
+ CreateDefaultSetup();
+ SetQuicReloadableFlag(quic_bbr_less_probe_rtt, true);
+ SetConnectionOption(kBBR7);
+ DriveOutOfStartup();
+
+ // We have no intention of ever finishing this transfer.
+ bbr_sender_.AddBytesToTransfer(100 * 1024 * 1024);
+
+ // Wait until the connection enters PROBE_RTT.
+ const QuicTime::Delta timeout = QuicTime::Delta::FromSeconds(12);
+ bool simulator_result = simulator_.RunUntilOrTimeout(
+ [this]() {
+ return sender_->ExportDebugState().mode == BbrSender::PROBE_RTT;
+ },
+ timeout);
+ ASSERT_FALSE(simulator_result);
+ ASSERT_EQ(BbrSender::PROBE_BW, sender_->ExportDebugState().mode);
+}
+
+// Verify that the connection enters does not enter PROBE_RTT.
+TEST_F(BbrSenderTest, ProbeRttSkippedAfterAppLimited) {
+ CreateDefaultSetup();
+ SetQuicReloadableFlag(quic_bbr_less_probe_rtt, true);
+ SetConnectionOption(kBBR8);
+ DriveOutOfStartup();
+
+ // We have no intention of ever finishing this transfer.
+ bbr_sender_.AddBytesToTransfer(100 * 1024 * 1024);
+
+ // Wait until the connection enters PROBE_RTT.
+ const QuicTime::Delta timeout = QuicTime::Delta::FromSeconds(12);
+ bool simulator_result = simulator_.RunUntilOrTimeout(
+ [this]() {
+ return sender_->ExportDebugState().mode == BbrSender::PROBE_RTT;
+ },
+ timeout);
+ ASSERT_FALSE(simulator_result);
+ ASSERT_EQ(BbrSender::PROBE_BW, sender_->ExportDebugState().mode);
+}
+
+// Ensure that a connection that is app-limited and is at sufficiently low
+// bandwidth will not exit high gain phase, and similarly ensure that the
+// connection will exit low gain early if the number of bytes in flight is low.
+TEST_F(BbrSenderTest, InFlightAwareGainCycling) {
+ // Disable Ack Decimation on the receiver, because it can increase srtt.
+ QuicConnectionPeer::SetAckMode(receiver_.connection(), AckMode::TCP_ACKING);
+ CreateDefaultSetup();
+ DriveOutOfStartup();
+
+ const QuicTime::Delta timeout = QuicTime::Delta::FromSeconds(5);
+ bool simulator_result;
+
+ // Start a few cycles prior to the high gain one.
+ simulator_result = simulator_.RunUntilOrTimeout(
+ [this]() { return sender_->ExportDebugState().gain_cycle_index == 6; },
+ timeout);
+
+ // Send at 10% of available rate. Run for 3 seconds, checking in the middle
+ // and at the end. The pacing gain should be high throughout.
+ QuicBandwidth target_bandwidth = 0.1f * kTestLinkBandwidth;
+ QuicTime::Delta burst_interval = QuicTime::Delta::FromMilliseconds(300);
+ for (int i = 0; i < 2; i++) {
+ SendBursts(5, target_bandwidth * burst_interval, burst_interval);
+ EXPECT_EQ(BbrSender::PROBE_BW, sender_->ExportDebugState().mode);
+ EXPECT_EQ(0, sender_->ExportDebugState().gain_cycle_index);
+ ExpectApproxEq(kTestLinkBandwidth,
+ sender_->ExportDebugState().max_bandwidth, 0.01f);
+ }
+
+ // Now that in-flight is almost zero and the pacing gain is still above 1,
+ // send approximately 1.25 BDPs worth of data. This should cause the
+ // PROBE_BW mode to enter low gain cycle, and exit it earlier than one min_rtt
+ // due to running out of data to send.
+ bbr_sender_.AddBytesToTransfer(1.3 * kTestBdp);
+ simulator_result = simulator_.RunUntilOrTimeout(
+ [this]() { return sender_->ExportDebugState().gain_cycle_index == 1; },
+ timeout);
+ ASSERT_TRUE(simulator_result);
+ simulator_.RunFor(0.75 * sender_->ExportDebugState().min_rtt);
+ EXPECT_EQ(BbrSender::PROBE_BW, sender_->ExportDebugState().mode);
+ EXPECT_EQ(2, sender_->ExportDebugState().gain_cycle_index);
+}
+
+// Ensure that the pacing rate does not drop at startup.
+TEST_F(BbrSenderTest, NoBandwidthDropOnStartup) {
+ CreateDefaultSetup();
+
+ const QuicTime::Delta timeout = QuicTime::Delta::FromSeconds(5);
+ bool simulator_result;
+
+ QuicBandwidth initial_rate = QuicBandwidth::FromBytesAndTimeDelta(
+ kInitialCongestionWindowPackets * kDefaultTCPMSS,
+ rtt_stats_->initial_rtt());
+ EXPECT_GE(sender_->PacingRate(0), initial_rate);
+
+ // Send a packet.
+ bbr_sender_.AddBytesToTransfer(1000);
+ simulator_result = simulator_.RunUntilOrTimeout(
+ [this]() { return receiver_.bytes_received() == 1000; }, timeout);
+ ASSERT_TRUE(simulator_result);
+ EXPECT_GE(sender_->PacingRate(0), initial_rate);
+
+ // Wait for a while.
+ simulator_.RunFor(QuicTime::Delta::FromSeconds(2));
+ EXPECT_GE(sender_->PacingRate(0), initial_rate);
+
+ // Send another packet.
+ bbr_sender_.AddBytesToTransfer(1000);
+ simulator_result = simulator_.RunUntilOrTimeout(
+ [this]() { return receiver_.bytes_received() == 2000; }, timeout);
+ ASSERT_TRUE(simulator_result);
+ EXPECT_GE(sender_->PacingRate(0), initial_rate);
+}
+
+// Test exiting STARTUP earlier due to the 1RTT connection option.
+TEST_F(BbrSenderTest, SimpleTransfer1RTTStartup) {
+ CreateDefaultSetup();
+
+ SetConnectionOption(k1RTT);
+ EXPECT_EQ(1u, sender_->num_startup_rtts());
+
+ // Run until the full bandwidth is reached and check how many rounds it was.
+ bbr_sender_.AddBytesToTransfer(12 * 1024 * 1024);
+ QuicRoundTripCount max_bw_round = 0;
+ QuicBandwidth max_bw(QuicBandwidth::Zero());
+ bool simulator_result = simulator_.RunUntilOrTimeout(
+ [this, &max_bw, &max_bw_round]() {
+ if (max_bw < sender_->ExportDebugState().max_bandwidth) {
+ max_bw = sender_->ExportDebugState().max_bandwidth;
+ max_bw_round = sender_->ExportDebugState().round_trip_count;
+ }
+ return sender_->ExportDebugState().is_at_full_bandwidth;
+ },
+ QuicTime::Delta::FromSeconds(5));
+ ASSERT_TRUE(simulator_result);
+ EXPECT_EQ(BbrSender::DRAIN, sender_->ExportDebugState().mode);
+ EXPECT_EQ(1u, sender_->ExportDebugState().round_trip_count - max_bw_round);
+ EXPECT_EQ(1u, sender_->ExportDebugState().rounds_without_bandwidth_gain);
+ EXPECT_EQ(0u, bbr_sender_.connection()->GetStats().packets_lost);
+ EXPECT_FALSE(sender_->ExportDebugState().last_sample_is_app_limited);
+}
+
+// Test exiting STARTUP earlier due to the 2RTT connection option.
+TEST_F(BbrSenderTest, SimpleTransfer2RTTStartup) {
+ CreateDefaultSetup();
+
+ SetConnectionOption(k2RTT);
+ EXPECT_EQ(2u, sender_->num_startup_rtts());
+
+ // Run until the full bandwidth is reached and check how many rounds it was.
+ bbr_sender_.AddBytesToTransfer(12 * 1024 * 1024);
+ QuicRoundTripCount max_bw_round = 0;
+ QuicBandwidth max_bw(QuicBandwidth::Zero());
+ bool simulator_result = simulator_.RunUntilOrTimeout(
+ [this, &max_bw, &max_bw_round]() {
+ if (max_bw < sender_->ExportDebugState().max_bandwidth) {
+ max_bw = sender_->ExportDebugState().max_bandwidth;
+ max_bw_round = sender_->ExportDebugState().round_trip_count;
+ }
+ return sender_->ExportDebugState().is_at_full_bandwidth;
+ },
+ QuicTime::Delta::FromSeconds(5));
+ ASSERT_TRUE(simulator_result);
+ EXPECT_EQ(BbrSender::DRAIN, sender_->ExportDebugState().mode);
+ EXPECT_EQ(2u, sender_->ExportDebugState().round_trip_count - max_bw_round);
+ EXPECT_EQ(2u, sender_->ExportDebugState().rounds_without_bandwidth_gain);
+ EXPECT_EQ(0u, bbr_sender_.connection()->GetStats().packets_lost);
+ EXPECT_FALSE(sender_->ExportDebugState().last_sample_is_app_limited);
+}
+
+// Test exiting STARTUP earlier upon loss due to the LRTT connection option.
+TEST_F(BbrSenderTest, SimpleTransferLRTTStartup) {
+ CreateDefaultSetup();
+
+ SetConnectionOption(kLRTT);
+ EXPECT_EQ(3u, sender_->num_startup_rtts());
+
+ // Run until the full bandwidth is reached and check how many rounds it was.
+ bbr_sender_.AddBytesToTransfer(12 * 1024 * 1024);
+ QuicRoundTripCount max_bw_round = 0;
+ QuicBandwidth max_bw(QuicBandwidth::Zero());
+ bool simulator_result = simulator_.RunUntilOrTimeout(
+ [this, &max_bw, &max_bw_round]() {
+ if (max_bw < sender_->ExportDebugState().max_bandwidth) {
+ max_bw = sender_->ExportDebugState().max_bandwidth;
+ max_bw_round = sender_->ExportDebugState().round_trip_count;
+ }
+ return sender_->ExportDebugState().is_at_full_bandwidth;
+ },
+ QuicTime::Delta::FromSeconds(5));
+ ASSERT_TRUE(simulator_result);
+ EXPECT_EQ(BbrSender::DRAIN, sender_->ExportDebugState().mode);
+ EXPECT_EQ(3u, sender_->ExportDebugState().round_trip_count - max_bw_round);
+ EXPECT_EQ(3u, sender_->ExportDebugState().rounds_without_bandwidth_gain);
+ EXPECT_EQ(0u, bbr_sender_.connection()->GetStats().packets_lost);
+ EXPECT_FALSE(sender_->ExportDebugState().last_sample_is_app_limited);
+}
+
+// Test exiting STARTUP earlier upon loss due to the LRTT connection option.
+TEST_F(BbrSenderTest, SimpleTransferLRTTStartupSmallBuffer) {
+ CreateSmallBufferSetup();
+
+ SetConnectionOption(kLRTT);
+ EXPECT_EQ(3u, sender_->num_startup_rtts());
+
+ // Run until the full bandwidth is reached and check how many rounds it was.
+ bbr_sender_.AddBytesToTransfer(12 * 1024 * 1024);
+ QuicRoundTripCount max_bw_round = 0;
+ QuicBandwidth max_bw(QuicBandwidth::Zero());
+ bool simulator_result = simulator_.RunUntilOrTimeout(
+ [this, &max_bw, &max_bw_round]() {
+ if (max_bw < sender_->ExportDebugState().max_bandwidth) {
+ max_bw = sender_->ExportDebugState().max_bandwidth;
+ max_bw_round = sender_->ExportDebugState().round_trip_count;
+ }
+ return sender_->ExportDebugState().is_at_full_bandwidth;
+ },
+ QuicTime::Delta::FromSeconds(5));
+ ASSERT_TRUE(simulator_result);
+ EXPECT_EQ(BbrSender::DRAIN, sender_->ExportDebugState().mode);
+ EXPECT_GE(2u, sender_->ExportDebugState().round_trip_count - max_bw_round);
+ EXPECT_EQ(1u, sender_->ExportDebugState().rounds_without_bandwidth_gain);
+ EXPECT_NE(0u, bbr_sender_.connection()->GetStats().packets_lost);
+ EXPECT_FALSE(sender_->ExportDebugState().last_sample_is_app_limited);
+}
+
+// Test slower pacing after loss in STARTUP due to the BBRS connection option.
+TEST_F(BbrSenderTest, SimpleTransferSlowerStartup) {
+ CreateSmallBufferSetup();
+
+ SetConnectionOption(kBBRS);
+ EXPECT_EQ(3u, sender_->num_startup_rtts());
+
+ // Run until the full bandwidth is reached and check how many rounds it was.
+ bbr_sender_.AddBytesToTransfer(12 * 1024 * 1024);
+ QuicRoundTripCount max_bw_round = 0;
+ QuicBandwidth max_bw(QuicBandwidth::Zero());
+ bool simulator_result = simulator_.RunUntilOrTimeout(
+ [this, &max_bw, &max_bw_round]() {
+ if (max_bw < sender_->ExportDebugState().max_bandwidth) {
+ max_bw = sender_->ExportDebugState().max_bandwidth;
+ max_bw_round = sender_->ExportDebugState().round_trip_count;
+ }
+ // Expect the pacing rate in STARTUP to decrease once packet loss
+ // is observed, but the CWND does not.
+ if (bbr_sender_.connection()->GetStats().packets_lost > 0 &&
+ !sender_->ExportDebugState().is_at_full_bandwidth &&
+ sender_->has_non_app_limited_sample()) {
+ EXPECT_EQ(1.5f * max_bw, sender_->PacingRate(0));
+ }
+ return sender_->ExportDebugState().is_at_full_bandwidth;
+ },
+ QuicTime::Delta::FromSeconds(5));
+ ASSERT_TRUE(simulator_result);
+ EXPECT_EQ(BbrSender::DRAIN, sender_->ExportDebugState().mode);
+ EXPECT_GE(3u, sender_->ExportDebugState().round_trip_count - max_bw_round);
+ EXPECT_EQ(3u, sender_->ExportDebugState().rounds_without_bandwidth_gain);
+ EXPECT_NE(0u, bbr_sender_.connection()->GetStats().packets_lost);
+ EXPECT_FALSE(sender_->ExportDebugState().last_sample_is_app_limited);
+}
+
+// Ensures no change in congestion window in STARTUP after loss.
+TEST_F(BbrSenderTest, SimpleTransferNoConservationInStartup) {
+ CreateSmallBufferSetup();
+
+ SetConnectionOption(kBBS1);
+
+ // Run until the full bandwidth is reached and check how many rounds it was.
+ bbr_sender_.AddBytesToTransfer(12 * 1024 * 1024);
+ bool used_conservation_cwnd = false;
+ bool simulator_result = simulator_.RunUntilOrTimeout(
+ [this, &used_conservation_cwnd]() {
+ if (!sender_->ExportDebugState().is_at_full_bandwidth &&
+ sender_->GetCongestionWindow() <
+ sender_->ExportDebugState().congestion_window) {
+ used_conservation_cwnd = true;
+ }
+ return sender_->ExportDebugState().is_at_full_bandwidth;
+ },
+ QuicTime::Delta::FromSeconds(5));
+ ASSERT_TRUE(simulator_result);
+ EXPECT_FALSE(used_conservation_cwnd);
+ EXPECT_EQ(BbrSender::DRAIN, sender_->ExportDebugState().mode);
+ EXPECT_EQ(3u, sender_->ExportDebugState().rounds_without_bandwidth_gain);
+ EXPECT_NE(0u, bbr_sender_.connection()->GetStats().packets_lost);
+ EXPECT_FALSE(sender_->ExportDebugState().last_sample_is_app_limited);
+}
+
+// Ensures no change in congestion window in STARTUP after loss, but that the
+// rate decreases.
+TEST_F(BbrSenderTest, SimpleTransferStartupRateReduction) {
+ SetQuicReloadableFlag(quic_bbr_startup_rate_reduction, true);
+ CreateSmallBufferSetup();
+
+ SetConnectionOption(kBBS4);
+
+ // Run until the full bandwidth is reached and check how many rounds it was.
+ bbr_sender_.AddBytesToTransfer(12 * 1024 * 1024);
+ bool used_conservation_cwnd = false;
+ bool simulator_result = simulator_.RunUntilOrTimeout(
+ [this, &used_conservation_cwnd]() {
+ if (!sender_->ExportDebugState().is_at_full_bandwidth &&
+ sender_->GetCongestionWindow() <
+ sender_->ExportDebugState().congestion_window) {
+ used_conservation_cwnd = true;
+ }
+ // Exit once a loss is hit.
+ return bbr_sender_.connection()->GetStats().packets_lost > 0 ||
+ sender_->ExportDebugState().is_at_full_bandwidth;
+ },
+ QuicTime::Delta::FromSeconds(5));
+ ASSERT_TRUE(simulator_result);
+ EXPECT_TRUE(sender_->InRecovery());
+ EXPECT_FALSE(used_conservation_cwnd);
+ EXPECT_EQ(BbrSender::STARTUP, sender_->ExportDebugState().mode);
+ EXPECT_NE(0u, bbr_sender_.connection()->GetStats().packets_lost);
+
+ // Lose each outstanding packet and the pacing rate decreases.
+ const QuicBandwidth original_pacing_rate = sender_->PacingRate(0);
+ QuicBandwidth pacing_rate = original_pacing_rate;
+ const QuicByteCount original_cwnd = sender_->GetCongestionWindow();
+ LostPacketVector lost_packets;
+ lost_packets.push_back(LostPacket(QuicPacketNumber(), kMaxPacketSize));
+ QuicPacketNumber largest_sent =
+ bbr_sender_.connection()->sent_packet_manager().GetLargestSentPacket();
+ for (QuicPacketNumber packet_number =
+ bbr_sender_.connection()->sent_packet_manager().GetLeastUnacked();
+ packet_number <= largest_sent; ++packet_number) {
+ lost_packets[0].packet_number = packet_number;
+ sender_->OnCongestionEvent(false, 0, clock_->Now(), {}, lost_packets);
+ EXPECT_EQ(original_cwnd, sender_->GetCongestionWindow());
+ EXPECT_GT(original_pacing_rate, sender_->PacingRate(0));
+ EXPECT_GE(pacing_rate, sender_->PacingRate(0));
+ EXPECT_LE(1.25 * sender_->BandwidthEstimate(), sender_->PacingRate(0));
+ pacing_rate = sender_->PacingRate(0);
+ }
+}
+
+// Ensures no change in congestion window in STARTUP after loss, but that the
+// rate decreases twice as fast as BBS4.
+TEST_F(BbrSenderTest, SimpleTransferDoubleStartupRateReduction) {
+ SetQuicReloadableFlag(quic_bbr_startup_rate_reduction, true);
+ CreateSmallBufferSetup();
+
+ SetConnectionOption(kBBS5);
+
+ // Run until the full bandwidth is reached and check how many rounds it was.
+ bbr_sender_.AddBytesToTransfer(12 * 1024 * 1024);
+ bool used_conservation_cwnd = false;
+ bool simulator_result = simulator_.RunUntilOrTimeout(
+ [this, &used_conservation_cwnd]() {
+ if (!sender_->ExportDebugState().is_at_full_bandwidth &&
+ sender_->GetCongestionWindow() <
+ sender_->ExportDebugState().congestion_window) {
+ used_conservation_cwnd = true;
+ }
+ // Exit once a loss is hit.
+ return bbr_sender_.connection()->GetStats().packets_lost > 0 ||
+ sender_->ExportDebugState().is_at_full_bandwidth;
+ },
+ QuicTime::Delta::FromSeconds(5));
+ ASSERT_TRUE(simulator_result);
+ EXPECT_TRUE(sender_->InRecovery());
+ EXPECT_FALSE(used_conservation_cwnd);
+ EXPECT_EQ(BbrSender::STARTUP, sender_->ExportDebugState().mode);
+ EXPECT_NE(0u, bbr_sender_.connection()->GetStats().packets_lost);
+
+ // Lose each outstanding packet and the pacing rate decreases.
+ const QuicBandwidth original_pacing_rate = sender_->PacingRate(0);
+ QuicBandwidth pacing_rate = original_pacing_rate;
+ const QuicByteCount original_cwnd = sender_->GetCongestionWindow();
+ LostPacketVector lost_packets;
+ lost_packets.push_back(LostPacket(QuicPacketNumber(), kMaxPacketSize));
+ QuicPacketNumber largest_sent =
+ bbr_sender_.connection()->sent_packet_manager().GetLargestSentPacket();
+ for (QuicPacketNumber packet_number =
+ bbr_sender_.connection()->sent_packet_manager().GetLeastUnacked();
+ packet_number <= largest_sent; ++packet_number) {
+ lost_packets[0].packet_number = packet_number;
+ sender_->OnCongestionEvent(false, 0, clock_->Now(), {}, lost_packets);
+ EXPECT_EQ(original_cwnd, sender_->GetCongestionWindow());
+ EXPECT_GT(original_pacing_rate, sender_->PacingRate(0));
+ EXPECT_GE(pacing_rate, sender_->PacingRate(0));
+ EXPECT_LE(1.25 * sender_->BandwidthEstimate(), sender_->PacingRate(0));
+ pacing_rate = sender_->PacingRate(0);
+ }
+}
+
+TEST_F(BbrSenderTest, DerivedPacingGainStartup) {
+ SetQuicReloadableFlag(quic_bbr_slower_startup3, true);
+ CreateDefaultSetup();
+
+ SetConnectionOption(kBBQ1);
+ EXPECT_EQ(3u, sender_->num_startup_rtts());
+ // Verify that Sender is in slow start.
+ EXPECT_TRUE(sender_->InSlowStart());
+ // Verify that pacing rate is based on the initial RTT.
+ QuicBandwidth expected_pacing_rate = QuicBandwidth::FromBytesAndTimeDelta(
+ 2.773 * kDefaultWindowTCP, rtt_stats_->initial_rtt());
+ ExpectApproxEq(expected_pacing_rate.ToBitsPerSecond(),
+ sender_->PacingRate(0).ToBitsPerSecond(), 0.01f);
+
+ // Run until the full bandwidth is reached and check how many rounds it was.
+ bbr_sender_.AddBytesToTransfer(12 * 1024 * 1024);
+ bool simulator_result = simulator_.RunUntilOrTimeout(
+ [this]() { return sender_->ExportDebugState().is_at_full_bandwidth; },
+ QuicTime::Delta::FromSeconds(5));
+ ASSERT_TRUE(simulator_result);
+ EXPECT_EQ(BbrSender::DRAIN, sender_->ExportDebugState().mode);
+ EXPECT_EQ(3u, sender_->ExportDebugState().rounds_without_bandwidth_gain);
+ ExpectApproxEq(kTestLinkBandwidth, sender_->ExportDebugState().max_bandwidth,
+ 0.01f);
+ EXPECT_EQ(0u, bbr_sender_.connection()->GetStats().packets_lost);
+ EXPECT_FALSE(sender_->ExportDebugState().last_sample_is_app_limited);
+}
+
+TEST_F(BbrSenderTest, DerivedCWNDGainStartup) {
+ SetQuicReloadableFlag(quic_bbr_slower_startup3, true);
+ CreateDefaultSetup();
+
+ SetConnectionOption(kBBQ2);
+ EXPECT_EQ(3u, sender_->num_startup_rtts());
+ // Verify that Sender is in slow start.
+ EXPECT_TRUE(sender_->InSlowStart());
+ // Verify that pacing rate is based on the initial RTT.
+ QuicBandwidth expected_pacing_rate = QuicBandwidth::FromBytesAndTimeDelta(
+ 2.885 * kDefaultWindowTCP, rtt_stats_->initial_rtt());
+ ExpectApproxEq(expected_pacing_rate.ToBitsPerSecond(),
+ sender_->PacingRate(0).ToBitsPerSecond(), 0.01f);
+
+ // Run until the full bandwidth is reached and check how many rounds it was.
+ bbr_sender_.AddBytesToTransfer(12 * 1024 * 1024);
+ bool simulator_result = simulator_.RunUntilOrTimeout(
+ [this]() { return sender_->ExportDebugState().is_at_full_bandwidth; },
+ QuicTime::Delta::FromSeconds(5));
+ ASSERT_TRUE(simulator_result);
+ EXPECT_EQ(BbrSender::DRAIN, sender_->ExportDebugState().mode);
+ EXPECT_EQ(3u, sender_->ExportDebugState().rounds_without_bandwidth_gain);
+ ExpectApproxEq(kTestLinkBandwidth, sender_->ExportDebugState().max_bandwidth,
+ 0.01f);
+ EXPECT_EQ(0u, bbr_sender_.connection()->GetStats().packets_lost);
+ EXPECT_FALSE(sender_->ExportDebugState().last_sample_is_app_limited);
+ // Expect an SRTT less than 2.7 * Min RTT on exit from STARTUP.
+ EXPECT_GT(kTestRtt * 2.7, rtt_stats_->smoothed_rtt());
+}
+
+TEST_F(BbrSenderTest, AckAggregationInStartup) {
+ SetQuicReloadableFlag(quic_bbr_slower_startup3, true);
+ // Disable Ack Decimation on the receiver to avoid loss and make results
+ // consistent.
+ QuicConnectionPeer::SetAckMode(receiver_.connection(), AckMode::TCP_ACKING);
+ CreateDefaultSetup();
+
+ SetConnectionOption(kBBQ3);
+ EXPECT_EQ(3u, sender_->num_startup_rtts());
+ // Verify that Sender is in slow start.
+ EXPECT_TRUE(sender_->InSlowStart());
+ // Verify that pacing rate is based on the initial RTT.
+ QuicBandwidth expected_pacing_rate = QuicBandwidth::FromBytesAndTimeDelta(
+ 2.885 * kDefaultWindowTCP, rtt_stats_->initial_rtt());
+ ExpectApproxEq(expected_pacing_rate.ToBitsPerSecond(),
+ sender_->PacingRate(0).ToBitsPerSecond(), 0.01f);
+
+ // Run until the full bandwidth is reached and check how many rounds it was.
+ bbr_sender_.AddBytesToTransfer(12 * 1024 * 1024);
+ bool simulator_result = simulator_.RunUntilOrTimeout(
+ [this]() { return sender_->ExportDebugState().is_at_full_bandwidth; },
+ QuicTime::Delta::FromSeconds(5));
+ ASSERT_TRUE(simulator_result);
+ EXPECT_EQ(BbrSender::DRAIN, sender_->ExportDebugState().mode);
+ EXPECT_EQ(3u, sender_->ExportDebugState().rounds_without_bandwidth_gain);
+ ExpectApproxEq(kTestLinkBandwidth, sender_->ExportDebugState().max_bandwidth,
+ 0.01f);
+ EXPECT_EQ(0u, bbr_sender_.connection()->GetStats().packets_lost);
+ EXPECT_FALSE(sender_->ExportDebugState().last_sample_is_app_limited);
+}
+
+// Test that two BBR flows started slightly apart from each other terminate.
+TEST_F(BbrSenderTest, SimpleCompetition) {
+ const QuicByteCount transfer_size = 10 * 1024 * 1024;
+ const QuicTime::Delta transfer_time =
+ kTestLinkBandwidth.TransferTime(transfer_size);
+ CreateBbrVsBbrSetup();
+
+ // Transfer 10% of data in first transfer.
+ bbr_sender_.AddBytesToTransfer(transfer_size);
+ bool simulator_result = simulator_.RunUntilOrTimeout(
+ [this]() { return receiver_.bytes_received() >= 0.1 * transfer_size; },
+ transfer_time);
+ ASSERT_TRUE(simulator_result);
+
+ // Start the second transfer and wait until both finish.
+ competing_sender_.AddBytesToTransfer(transfer_size);
+ simulator_result = simulator_.RunUntilOrTimeout(
+ [this]() {
+ return receiver_.bytes_received() == transfer_size &&
+ competing_receiver_.bytes_received() == transfer_size;
+ },
+ 3 * transfer_time);
+ ASSERT_TRUE(simulator_result);
+}
+
+// Test that BBR can resume bandwidth from cached network parameters.
+TEST_F(BbrSenderTest, ResumeConnectionState) {
+ CreateDefaultSetup();
+
+ bbr_sender_.connection()->AdjustNetworkParameters(kTestLinkBandwidth,
+ kTestRtt);
+ EXPECT_EQ(kTestLinkBandwidth, sender_->ExportDebugState().max_bandwidth);
+ EXPECT_EQ(kTestLinkBandwidth, sender_->BandwidthEstimate());
+ ExpectApproxEq(kTestRtt, sender_->ExportDebugState().min_rtt, 0.01f);
+
+ DriveOutOfStartup();
+}
+
+// Test with a min CWND of 1 instead of 4 packets.
+TEST_F(BbrSenderTest, ProbeRTTMinCWND1) {
+ CreateDefaultSetup();
+ SetConnectionOption(kMIN1);
+ DriveOutOfStartup();
+
+ // We have no intention of ever finishing this transfer.
+ bbr_sender_.AddBytesToTransfer(100 * 1024 * 1024);
+
+ // Wait until the connection enters PROBE_RTT.
+ const QuicTime::Delta timeout = QuicTime::Delta::FromSeconds(12);
+ bool simulator_result = simulator_.RunUntilOrTimeout(
+ [this]() {
+ return sender_->ExportDebugState().mode == BbrSender::PROBE_RTT;
+ },
+ timeout);
+ ASSERT_TRUE(simulator_result);
+ ASSERT_EQ(BbrSender::PROBE_RTT, sender_->ExportDebugState().mode);
+ // The PROBE_RTT CWND should be 1 if the min CWND is 1.
+ EXPECT_EQ(kDefaultTCPMSS, sender_->GetCongestionWindow());
+
+ // Exit PROBE_RTT.
+ const QuicTime probe_rtt_start = clock_->Now();
+ const QuicTime::Delta time_to_exit_probe_rtt =
+ kTestRtt + QuicTime::Delta::FromMilliseconds(200);
+ simulator_.RunFor(1.5 * time_to_exit_probe_rtt);
+ EXPECT_EQ(BbrSender::PROBE_BW, sender_->ExportDebugState().mode);
+ EXPECT_GE(sender_->ExportDebugState().min_rtt_timestamp, probe_rtt_start);
+}
+
+} // namespace test
+} // namespace quic
diff --git a/quic/core/congestion_control/cubic_bytes.cc b/quic/core/congestion_control/cubic_bytes.cc
new file mode 100644
index 0000000..9300a79
--- /dev/null
+++ b/quic/core/congestion_control/cubic_bytes.cc
@@ -0,0 +1,191 @@
+// Copyright (c) 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/third_party/quiche/src/quic/core/congestion_control/cubic_bytes.h"
+
+#include <algorithm>
+#include <cmath>
+#include <cstdint>
+
+#include "net/third_party/quiche/src/quic/core/quic_constants.h"
+#include "net/third_party/quiche/src/quic/core/quic_packets.h"
+#include "net/third_party/quiche/src/quic/platform/api/quic_flag_utils.h"
+#include "net/third_party/quiche/src/quic/platform/api/quic_flags.h"
+#include "net/third_party/quiche/src/quic/platform/api/quic_logging.h"
+
+namespace quic {
+
+namespace {
+
+// Constants based on TCP defaults.
+// The following constants are in 2^10 fractions of a second instead of ms to
+// allow a 10 shift right to divide.
+const int kCubeScale = 40; // 1024*1024^3 (first 1024 is from 0.100^3)
+ // where 0.100 is 100 ms which is the scaling
+ // round trip time.
+const int kCubeCongestionWindowScale = 410;
+// The cube factor for packets in bytes.
+const uint64_t kCubeFactor =
+ (UINT64_C(1) << kCubeScale) / kCubeCongestionWindowScale / kDefaultTCPMSS;
+
+const float kDefaultCubicBackoffFactor = 0.7f; // Default Cubic backoff factor.
+// Additional backoff factor when loss occurs in the concave part of the Cubic
+// curve. This additional backoff factor is expected to give up bandwidth to
+// new concurrent flows and speed up convergence.
+const float kBetaLastMax = 0.85f;
+
+} // namespace
+
+CubicBytes::CubicBytes(const QuicClock* clock)
+ : clock_(clock),
+ num_connections_(kDefaultNumConnections),
+ epoch_(QuicTime::Zero()) {
+ ResetCubicState();
+}
+
+void CubicBytes::SetNumConnections(int num_connections) {
+ num_connections_ = num_connections;
+}
+
+float CubicBytes::Alpha() const {
+ // TCPFriendly alpha is described in Section 3.3 of the CUBIC paper. Note that
+ // beta here is a cwnd multiplier, and is equal to 1-beta from the paper.
+ // We derive the equivalent alpha for an N-connection emulation as:
+ const float beta = Beta();
+ return 3 * num_connections_ * num_connections_ * (1 - beta) / (1 + beta);
+}
+
+float CubicBytes::Beta() const {
+ // kNConnectionBeta is the backoff factor after loss for our N-connection
+ // emulation, which emulates the effective backoff of an ensemble of N
+ // TCP-Reno connections on a single loss event. The effective multiplier is
+ // computed as:
+ return (num_connections_ - 1 + kDefaultCubicBackoffFactor) / num_connections_;
+}
+
+float CubicBytes::BetaLastMax() const {
+ // BetaLastMax is the additional backoff factor after loss for our
+ // N-connection emulation, which emulates the additional backoff of
+ // an ensemble of N TCP-Reno connections on a single loss event. The
+ // effective multiplier is computed as:
+ return (num_connections_ - 1 + kBetaLastMax) / num_connections_;
+}
+
+void CubicBytes::ResetCubicState() {
+ epoch_ = QuicTime::Zero(); // Reset time.
+ last_max_congestion_window_ = 0;
+ acked_bytes_count_ = 0;
+ estimated_tcp_congestion_window_ = 0;
+ origin_point_congestion_window_ = 0;
+ time_to_origin_point_ = 0;
+ last_target_congestion_window_ = 0;
+}
+
+void CubicBytes::OnApplicationLimited() {
+ // When sender is not using the available congestion window, the window does
+ // not grow. But to be RTT-independent, Cubic assumes that the sender has been
+ // using the entire window during the time since the beginning of the current
+ // "epoch" (the end of the last loss recovery period). Since
+ // application-limited periods break this assumption, we reset the epoch when
+ // in such a period. This reset effectively freezes congestion window growth
+ // through application-limited periods and allows Cubic growth to continue
+ // when the entire window is being used.
+ epoch_ = QuicTime::Zero();
+}
+
+QuicByteCount CubicBytes::CongestionWindowAfterPacketLoss(
+ QuicByteCount current_congestion_window) {
+ // Since bytes-mode Reno mode slightly under-estimates the cwnd, we
+ // may never reach precisely the last cwnd over the course of an
+ // RTT. Do not interpret a slight under-estimation as competing traffic.
+ if (current_congestion_window + kDefaultTCPMSS <
+ last_max_congestion_window_) {
+ // We never reached the old max, so assume we are competing with
+ // another flow. Use our extra back off factor to allow the other
+ // flow to go up.
+ last_max_congestion_window_ =
+ static_cast<int>(BetaLastMax() * current_congestion_window);
+ } else {
+ last_max_congestion_window_ = current_congestion_window;
+ }
+ epoch_ = QuicTime::Zero(); // Reset time.
+ return static_cast<int>(current_congestion_window * Beta());
+}
+
+QuicByteCount CubicBytes::CongestionWindowAfterAck(
+ QuicByteCount acked_bytes,
+ QuicByteCount current_congestion_window,
+ QuicTime::Delta delay_min,
+ QuicTime event_time) {
+ acked_bytes_count_ += acked_bytes;
+
+ if (!epoch_.IsInitialized()) {
+ // First ACK after a loss event.
+ QUIC_DVLOG(1) << "Start of epoch";
+ epoch_ = event_time; // Start of epoch.
+ acked_bytes_count_ = acked_bytes; // Reset count.
+ // Reset estimated_tcp_congestion_window_ to be in sync with cubic.
+ estimated_tcp_congestion_window_ = current_congestion_window;
+ if (last_max_congestion_window_ <= current_congestion_window) {
+ time_to_origin_point_ = 0;
+ origin_point_congestion_window_ = current_congestion_window;
+ } else {
+ time_to_origin_point_ = static_cast<uint32_t>(
+ cbrt(kCubeFactor *
+ (last_max_congestion_window_ - current_congestion_window)));
+ origin_point_congestion_window_ = last_max_congestion_window_;
+ }
+ }
+ // Change the time unit from microseconds to 2^10 fractions per second. Take
+ // the round trip time in account. This is done to allow us to use shift as a
+ // divide operator.
+ int64_t elapsed_time =
+ ((event_time + delay_min - epoch_).ToMicroseconds() << 10) /
+ kNumMicrosPerSecond;
+
+ // Right-shifts of negative, signed numbers have implementation-dependent
+ // behavior, so force the offset to be positive, as is done in the kernel.
+ uint64_t offset = std::abs(time_to_origin_point_ - elapsed_time);
+
+ QuicByteCount delta_congestion_window = (kCubeCongestionWindowScale * offset *
+ offset * offset * kDefaultTCPMSS) >>
+ kCubeScale;
+
+ const bool add_delta = elapsed_time > time_to_origin_point_;
+ DCHECK(add_delta ||
+ (origin_point_congestion_window_ > delta_congestion_window));
+ QuicByteCount target_congestion_window =
+ add_delta ? origin_point_congestion_window_ + delta_congestion_window
+ : origin_point_congestion_window_ - delta_congestion_window;
+ // Limit the CWND increase to half the acked bytes.
+ target_congestion_window =
+ std::min(target_congestion_window,
+ current_congestion_window + acked_bytes_count_ / 2);
+
+ DCHECK_LT(0u, estimated_tcp_congestion_window_);
+ // Increase the window by approximately Alpha * 1 MSS of bytes every
+ // time we ack an estimated tcp window of bytes. For small
+ // congestion windows (less than 25), the formula below will
+ // increase slightly slower than linearly per estimated tcp window
+ // of bytes.
+ estimated_tcp_congestion_window_ += acked_bytes_count_ *
+ (Alpha() * kDefaultTCPMSS) /
+ estimated_tcp_congestion_window_;
+ acked_bytes_count_ = 0;
+
+ // We have a new cubic congestion window.
+ last_target_congestion_window_ = target_congestion_window;
+
+ // Compute target congestion_window based on cubic target and estimated TCP
+ // congestion_window, use highest (fastest).
+ if (target_congestion_window < estimated_tcp_congestion_window_) {
+ target_congestion_window = estimated_tcp_congestion_window_;
+ }
+
+ QUIC_DVLOG(1) << "Final target congestion_window: "
+ << target_congestion_window;
+ return target_congestion_window;
+}
+
+} // namespace quic
diff --git a/quic/core/congestion_control/cubic_bytes.h b/quic/core/congestion_control/cubic_bytes.h
new file mode 100644
index 0000000..18f7c82
--- /dev/null
+++ b/quic/core/congestion_control/cubic_bytes.h
@@ -0,0 +1,103 @@
+// Copyright (c) 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Cubic algorithm, helper class to TCP cubic.
+// For details see http://netsrv.csc.ncsu.edu/export/cubic_a_new_tcp_2008.pdf.
+
+#ifndef QUICHE_QUIC_CORE_CONGESTION_CONTROL_CUBIC_BYTES_H_
+#define QUICHE_QUIC_CORE_CONGESTION_CONTROL_CUBIC_BYTES_H_
+
+#include <cstdint>
+
+#include "base/macros.h"
+#include "net/third_party/quiche/src/quic/core/quic_bandwidth.h"
+#include "net/third_party/quiche/src/quic/core/quic_connection_stats.h"
+#include "net/third_party/quiche/src/quic/core/quic_time.h"
+#include "net/third_party/quiche/src/quic/platform/api/quic_clock.h"
+#include "net/third_party/quiche/src/quic/platform/api/quic_export.h"
+
+namespace quic {
+
+namespace test {
+class CubicBytesTest;
+} // namespace test
+
+class QUIC_EXPORT_PRIVATE CubicBytes {
+ public:
+ explicit CubicBytes(const QuicClock* clock);
+ CubicBytes(const CubicBytes&) = delete;
+ CubicBytes& operator=(const CubicBytes&) = delete;
+
+ void SetNumConnections(int num_connections);
+
+ // Call after a timeout to reset the cubic state.
+ void ResetCubicState();
+
+ // Compute a new congestion window to use after a loss event.
+ // Returns the new congestion window in packets. The new congestion window is
+ // a multiplicative decrease of our current window.
+ QuicByteCount CongestionWindowAfterPacketLoss(QuicPacketCount current);
+
+ // Compute a new congestion window to use after a received ACK.
+ // Returns the new congestion window in bytes. The new congestion window
+ // follows a cubic function that depends on the time passed since last packet
+ // loss.
+ QuicByteCount CongestionWindowAfterAck(QuicByteCount acked_bytes,
+ QuicByteCount current,
+ QuicTime::Delta delay_min,
+ QuicTime event_time);
+
+ // Call on ack arrival when sender is unable to use the available congestion
+ // window. Resets Cubic state during quiescence.
+ void OnApplicationLimited();
+
+ private:
+ friend class test::CubicBytesTest;
+
+ static const QuicTime::Delta MaxCubicTimeInterval() {
+ return QuicTime::Delta::FromMilliseconds(30);
+ }
+
+ // Compute the TCP Cubic alpha, beta, and beta-last-max based on the
+ // current number of connections.
+ float Alpha() const;
+ float Beta() const;
+ float BetaLastMax() const;
+
+ QuicByteCount last_max_congestion_window() const {
+ return last_max_congestion_window_;
+ }
+
+ const QuicClock* clock_;
+
+ // Number of connections to simulate.
+ int num_connections_;
+
+ // Time when this cycle started, after last loss event.
+ QuicTime epoch_;
+
+ // Max congestion window used just before last loss event.
+ // Note: to improve fairness to other streams an additional back off is
+ // applied to this value if the new value is below our latest value.
+ QuicByteCount last_max_congestion_window_;
+
+ // Number of acked bytes since the cycle started (epoch).
+ QuicByteCount acked_bytes_count_;
+
+ // TCP Reno equivalent congestion window in packets.
+ QuicByteCount estimated_tcp_congestion_window_;
+
+ // Origin point of cubic function.
+ QuicByteCount origin_point_congestion_window_;
+
+ // Time to origin point of cubic function in 2^10 fractions of a second.
+ uint32_t time_to_origin_point_;
+
+ // Last congestion window in packets computed by cubic function.
+ QuicByteCount last_target_congestion_window_;
+};
+
+} // namespace quic
+
+#endif // QUICHE_QUIC_CORE_CONGESTION_CONTROL_CUBIC_BYTES_H_
diff --git a/quic/core/congestion_control/cubic_bytes_test.cc b/quic/core/congestion_control/cubic_bytes_test.cc
new file mode 100644
index 0000000..4f3e9b1
--- /dev/null
+++ b/quic/core/congestion_control/cubic_bytes_test.cc
@@ -0,0 +1,388 @@
+// Copyright (c) 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/third_party/quiche/src/quic/core/congestion_control/cubic_bytes.h"
+
+#include <cstdint>
+
+#include "net/third_party/quiche/src/quic/platform/api/quic_flags.h"
+#include "net/third_party/quiche/src/quic/platform/api/quic_str_cat.h"
+#include "net/third_party/quiche/src/quic/platform/api/quic_test.h"
+#include "net/third_party/quiche/src/quic/test_tools/mock_clock.h"
+
+namespace quic {
+namespace test {
+namespace {
+
+const float kBeta = 0.7f; // Default Cubic backoff factor.
+const float kBetaLastMax = 0.85f; // Default Cubic backoff factor.
+const uint32_t kNumConnections = 2;
+const float kNConnectionBeta = (kNumConnections - 1 + kBeta) / kNumConnections;
+const float kNConnectionBetaLastMax =
+ (kNumConnections - 1 + kBetaLastMax) / kNumConnections;
+const float kNConnectionAlpha = 3 * kNumConnections * kNumConnections *
+ (1 - kNConnectionBeta) / (1 + kNConnectionBeta);
+
+} // namespace
+
+class CubicBytesTest : public QuicTest {
+ protected:
+ CubicBytesTest()
+ : one_ms_(QuicTime::Delta::FromMilliseconds(1)),
+ hundred_ms_(QuicTime::Delta::FromMilliseconds(100)),
+ cubic_(&clock_) {}
+
+ QuicByteCount RenoCwndInBytes(QuicByteCount current_cwnd) {
+ QuicByteCount reno_estimated_cwnd =
+ current_cwnd +
+ kDefaultTCPMSS * (kNConnectionAlpha * kDefaultTCPMSS) / current_cwnd;
+ return reno_estimated_cwnd;
+ }
+
+ QuicByteCount ConservativeCwndInBytes(QuicByteCount current_cwnd) {
+ QuicByteCount conservative_cwnd = current_cwnd + kDefaultTCPMSS / 2;
+ return conservative_cwnd;
+ }
+
+ QuicByteCount CubicConvexCwndInBytes(QuicByteCount initial_cwnd,
+ QuicTime::Delta rtt,
+ QuicTime::Delta elapsed_time) {
+ const int64_t offset =
+ ((elapsed_time + rtt).ToMicroseconds() << 10) / 1000000;
+ const QuicByteCount delta_congestion_window =
+ ((410 * offset * offset * offset) * kDefaultTCPMSS >> 40);
+ const QuicByteCount cubic_cwnd = initial_cwnd + delta_congestion_window;
+ return cubic_cwnd;
+ }
+
+ QuicByteCount LastMaxCongestionWindow() {
+ return cubic_.last_max_congestion_window();
+ }
+
+ QuicTime::Delta MaxCubicTimeInterval() {
+ return cubic_.MaxCubicTimeInterval();
+ }
+
+ const QuicTime::Delta one_ms_;
+ const QuicTime::Delta hundred_ms_;
+ MockClock clock_;
+ CubicBytes cubic_;
+};
+
+// TODO(jokulik): The original "AboveOrigin" test, below, is very
+// loose. It's nearly impossible to make the test tighter without
+// deploying the fix for convex mode. Once cubic convex is deployed,
+// replace "AboveOrigin" with this test.
+TEST_F(CubicBytesTest, AboveOriginWithTighterBounds) {
+ // Convex growth.
+ const QuicTime::Delta rtt_min = hundred_ms_;
+ int64_t rtt_min_ms = rtt_min.ToMilliseconds();
+ float rtt_min_s = rtt_min_ms / 1000.0;
+ QuicByteCount current_cwnd = 10 * kDefaultTCPMSS;
+ const QuicByteCount initial_cwnd = current_cwnd;
+
+ clock_.AdvanceTime(one_ms_);
+ const QuicTime initial_time = clock_.ApproximateNow();
+ const QuicByteCount expected_first_cwnd = RenoCwndInBytes(current_cwnd);
+ current_cwnd = cubic_.CongestionWindowAfterAck(kDefaultTCPMSS, current_cwnd,
+ rtt_min, initial_time);
+ ASSERT_EQ(expected_first_cwnd, current_cwnd);
+
+ // Normal TCP phase.
+ // The maximum number of expected Reno RTTs is calculated by
+ // finding the point where the cubic curve and the reno curve meet.
+ const int max_reno_rtts =
+ std::sqrt(kNConnectionAlpha / (.4 * rtt_min_s * rtt_min_s * rtt_min_s)) -
+ 2;
+ for (int i = 0; i < max_reno_rtts; ++i) {
+ // Alternatively, we expect it to increase by one, every time we
+ // receive current_cwnd/Alpha acks back. (This is another way of
+ // saying we expect cwnd to increase by approximately Alpha once
+ // we receive current_cwnd number ofacks back).
+ const uint64_t num_acks_this_epoch =
+ current_cwnd / kDefaultTCPMSS / kNConnectionAlpha;
+ const QuicByteCount initial_cwnd_this_epoch = current_cwnd;
+ for (QuicPacketCount n = 0; n < num_acks_this_epoch; ++n) {
+ // Call once per ACK.
+ const QuicByteCount expected_next_cwnd = RenoCwndInBytes(current_cwnd);
+ current_cwnd = cubic_.CongestionWindowAfterAck(
+ kDefaultTCPMSS, current_cwnd, rtt_min, clock_.ApproximateNow());
+ ASSERT_EQ(expected_next_cwnd, current_cwnd);
+ }
+ // Our byte-wise Reno implementation is an estimate. We expect
+ // the cwnd to increase by approximately one MSS every
+ // cwnd/kDefaultTCPMSS/Alpha acks, but it may be off by as much as
+ // half a packet for smaller values of current_cwnd.
+ const QuicByteCount cwnd_change_this_epoch =
+ current_cwnd - initial_cwnd_this_epoch;
+ ASSERT_NEAR(kDefaultTCPMSS, cwnd_change_this_epoch, kDefaultTCPMSS / 2);
+ clock_.AdvanceTime(hundred_ms_);
+ }
+
+ for (int i = 0; i < 54; ++i) {
+ const uint64_t max_acks_this_epoch = current_cwnd / kDefaultTCPMSS;
+ const QuicTime::Delta interval = QuicTime::Delta::FromMicroseconds(
+ hundred_ms_.ToMicroseconds() / max_acks_this_epoch);
+ for (QuicPacketCount n = 0; n < max_acks_this_epoch; ++n) {
+ clock_.AdvanceTime(interval);
+ current_cwnd = cubic_.CongestionWindowAfterAck(
+ kDefaultTCPMSS, current_cwnd, rtt_min, clock_.ApproximateNow());
+ const QuicByteCount expected_cwnd = CubicConvexCwndInBytes(
+ initial_cwnd, rtt_min, (clock_.ApproximateNow() - initial_time));
+ // If we allow per-ack updates, every update is a small cubic update.
+ ASSERT_EQ(expected_cwnd, current_cwnd);
+ }
+ }
+ const QuicByteCount expected_cwnd = CubicConvexCwndInBytes(
+ initial_cwnd, rtt_min, (clock_.ApproximateNow() - initial_time));
+ current_cwnd = cubic_.CongestionWindowAfterAck(
+ kDefaultTCPMSS, current_cwnd, rtt_min, clock_.ApproximateNow());
+ ASSERT_EQ(expected_cwnd, current_cwnd);
+}
+
+// TODO(ianswett): This test was disabled when all fixes were enabled, but it
+// may be worth fixing.
+TEST_F(CubicBytesTest, DISABLED_AboveOrigin) {
+ // Convex growth.
+ const QuicTime::Delta rtt_min = hundred_ms_;
+ QuicByteCount current_cwnd = 10 * kDefaultTCPMSS;
+ // Without the signed-integer, cubic-convex fix, we start out in the
+ // wrong mode.
+ QuicPacketCount expected_cwnd = RenoCwndInBytes(current_cwnd);
+ // Initialize the state.
+ clock_.AdvanceTime(one_ms_);
+ ASSERT_EQ(expected_cwnd,
+ cubic_.CongestionWindowAfterAck(kDefaultTCPMSS, current_cwnd,
+ rtt_min, clock_.ApproximateNow()));
+ current_cwnd = expected_cwnd;
+ const QuicPacketCount initial_cwnd = expected_cwnd;
+ // Normal TCP phase.
+ for (int i = 0; i < 48; ++i) {
+ for (QuicPacketCount n = 1;
+ n < current_cwnd / kDefaultTCPMSS / kNConnectionAlpha; ++n) {
+ // Call once per ACK.
+ ASSERT_NEAR(
+ current_cwnd,
+ cubic_.CongestionWindowAfterAck(kDefaultTCPMSS, current_cwnd, rtt_min,
+ clock_.ApproximateNow()),
+ kDefaultTCPMSS);
+ }
+ clock_.AdvanceTime(hundred_ms_);
+ current_cwnd = cubic_.CongestionWindowAfterAck(
+ kDefaultTCPMSS, current_cwnd, rtt_min, clock_.ApproximateNow());
+ // When we fix convex mode and the uint64 arithmetic, we
+ // increase the expected_cwnd only after after the first 100ms,
+ // rather than after the initial 1ms.
+ expected_cwnd += kDefaultTCPMSS;
+ ASSERT_NEAR(expected_cwnd, current_cwnd, kDefaultTCPMSS);
+ }
+ // Cubic phase.
+ for (int i = 0; i < 52; ++i) {
+ for (QuicPacketCount n = 1; n < current_cwnd / kDefaultTCPMSS; ++n) {
+ // Call once per ACK.
+ ASSERT_NEAR(
+ current_cwnd,
+ cubic_.CongestionWindowAfterAck(kDefaultTCPMSS, current_cwnd, rtt_min,
+ clock_.ApproximateNow()),
+ kDefaultTCPMSS);
+ }
+ clock_.AdvanceTime(hundred_ms_);
+ current_cwnd = cubic_.CongestionWindowAfterAck(
+ kDefaultTCPMSS, current_cwnd, rtt_min, clock_.ApproximateNow());
+ }
+ // Total time elapsed so far; add min_rtt (0.1s) here as well.
+ float elapsed_time_s = 10.0f + 0.1f;
+ // |expected_cwnd| is initial value of cwnd + K * t^3, where K = 0.4.
+ expected_cwnd =
+ initial_cwnd / kDefaultTCPMSS +
+ (elapsed_time_s * elapsed_time_s * elapsed_time_s * 410) / 1024;
+ EXPECT_EQ(expected_cwnd, current_cwnd / kDefaultTCPMSS);
+}
+
+// Constructs an artificial scenario to ensure that cubic-convex
+// increases are truly fine-grained:
+//
+// - After starting the epoch, this test advances the elapsed time
+// sufficiently far that cubic will do small increases at less than
+// MaxCubicTimeInterval() intervals.
+//
+// - Sets an artificially large initial cwnd to prevent Reno from the
+// convex increases on every ack.
+TEST_F(CubicBytesTest, AboveOriginFineGrainedCubing) {
+ // Start the test with an artificially large cwnd to prevent Reno
+ // from over-taking cubic.
+ QuicByteCount current_cwnd = 1000 * kDefaultTCPMSS;
+ const QuicByteCount initial_cwnd = current_cwnd;
+ const QuicTime::Delta rtt_min = hundred_ms_;
+ clock_.AdvanceTime(one_ms_);
+ QuicTime initial_time = clock_.ApproximateNow();
+
+ // Start the epoch and then artificially advance the time.
+ current_cwnd = cubic_.CongestionWindowAfterAck(
+ kDefaultTCPMSS, current_cwnd, rtt_min, clock_.ApproximateNow());
+ clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(600));
+ current_cwnd = cubic_.CongestionWindowAfterAck(
+ kDefaultTCPMSS, current_cwnd, rtt_min, clock_.ApproximateNow());
+
+ // We expect the algorithm to perform only non-zero, fine-grained cubic
+ // increases on every ack in this case.
+ for (int i = 0; i < 100; ++i) {
+ clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(10));
+ const QuicByteCount expected_cwnd = CubicConvexCwndInBytes(
+ initial_cwnd, rtt_min, (clock_.ApproximateNow() - initial_time));
+ const QuicByteCount next_cwnd = cubic_.CongestionWindowAfterAck(
+ kDefaultTCPMSS, current_cwnd, rtt_min, clock_.ApproximateNow());
+ // Make sure we are performing cubic increases.
+ ASSERT_EQ(expected_cwnd, next_cwnd);
+ // Make sure that these are non-zero, less-than-packet sized
+ // increases.
+ ASSERT_GT(next_cwnd, current_cwnd);
+ const QuicByteCount cwnd_delta = next_cwnd - current_cwnd;
+ ASSERT_GT(kDefaultTCPMSS * .1, cwnd_delta);
+
+ current_cwnd = next_cwnd;
+ }
+}
+
+// Constructs an artificial scenario to show what happens when we
+// allow per-ack updates, rather than limititing update freqency. In
+// this scenario, the first two acks of the epoch produce the same
+// cwnd. When we limit per-ack updates, this would cause the
+// cessation of cubic updates for 30ms. When we allow per-ack
+// updates, the window continues to grow on every ack.
+TEST_F(CubicBytesTest, PerAckUpdates) {
+ // Start the test with a large cwnd and RTT, to force the first
+ // increase to be a cubic increase.
+ QuicPacketCount initial_cwnd_packets = 150;
+ QuicByteCount current_cwnd = initial_cwnd_packets * kDefaultTCPMSS;
+ const QuicTime::Delta rtt_min = 350 * one_ms_;
+
+ // Initialize the epoch
+ clock_.AdvanceTime(one_ms_);
+ // Keep track of the growth of the reno-equivalent cwnd.
+ QuicByteCount reno_cwnd = RenoCwndInBytes(current_cwnd);
+ current_cwnd = cubic_.CongestionWindowAfterAck(
+ kDefaultTCPMSS, current_cwnd, rtt_min, clock_.ApproximateNow());
+ const QuicByteCount initial_cwnd = current_cwnd;
+
+ // Simulate the return of cwnd packets in less than
+ // MaxCubicInterval() time.
+ const QuicPacketCount max_acks = initial_cwnd_packets / kNConnectionAlpha;
+ const QuicTime::Delta interval = QuicTime::Delta::FromMicroseconds(
+ MaxCubicTimeInterval().ToMicroseconds() / (max_acks + 1));
+
+ // In this scenario, the first increase is dictated by the cubic
+ // equation, but it is less than one byte, so the cwnd doesn't
+ // change. Normally, without per-ack increases, any cwnd plateau
+ // will cause the cwnd to be pinned for MaxCubicTimeInterval(). If
+ // we enable per-ack updates, the cwnd will continue to grow,
+ // regardless of the temporary plateau.
+ clock_.AdvanceTime(interval);
+ reno_cwnd = RenoCwndInBytes(reno_cwnd);
+ ASSERT_EQ(current_cwnd,
+ cubic_.CongestionWindowAfterAck(kDefaultTCPMSS, current_cwnd,
+ rtt_min, clock_.ApproximateNow()));
+ for (QuicPacketCount i = 1; i < max_acks; ++i) {
+ clock_.AdvanceTime(interval);
+ const QuicByteCount next_cwnd = cubic_.CongestionWindowAfterAck(
+ kDefaultTCPMSS, current_cwnd, rtt_min, clock_.ApproximateNow());
+ reno_cwnd = RenoCwndInBytes(reno_cwnd);
+ // The window shoud increase on every ack.
+ ASSERT_LT(current_cwnd, next_cwnd);
+ ASSERT_EQ(reno_cwnd, next_cwnd);
+ current_cwnd = next_cwnd;
+ }
+
+ // After all the acks are returned from the epoch, we expect the
+ // cwnd to have increased by nearly one packet. (Not exactly one
+ // packet, because our byte-wise Reno algorithm is always a slight
+ // under-estimation). Without per-ack updates, the current_cwnd
+ // would otherwise be unchanged.
+ const QuicByteCount minimum_expected_increase = kDefaultTCPMSS * .9;
+ EXPECT_LT(minimum_expected_increase + initial_cwnd, current_cwnd);
+}
+
+TEST_F(CubicBytesTest, LossEvents) {
+ const QuicTime::Delta rtt_min = hundred_ms_;
+ QuicByteCount current_cwnd = 422 * kDefaultTCPMSS;
+ // Without the signed-integer, cubic-convex fix, we mistakenly
+ // increment cwnd after only one_ms_ and a single ack.
+ QuicPacketCount expected_cwnd = RenoCwndInBytes(current_cwnd);
+ // Initialize the state.
+ clock_.AdvanceTime(one_ms_);
+ EXPECT_EQ(expected_cwnd,
+ cubic_.CongestionWindowAfterAck(kDefaultTCPMSS, current_cwnd,
+ rtt_min, clock_.ApproximateNow()));
+
+ // On the first loss, the last max congestion window is set to the
+ // congestion window before the loss.
+ QuicByteCount pre_loss_cwnd = current_cwnd;
+ ASSERT_EQ(0u, LastMaxCongestionWindow());
+ expected_cwnd = static_cast<QuicByteCount>(current_cwnd * kNConnectionBeta);
+ EXPECT_EQ(expected_cwnd,
+ cubic_.CongestionWindowAfterPacketLoss(current_cwnd));
+ ASSERT_EQ(pre_loss_cwnd, LastMaxCongestionWindow());
+ current_cwnd = expected_cwnd;
+
+ // On the second loss, the current congestion window has not yet
+ // reached the last max congestion window. The last max congestion
+ // window will be reduced by an additional backoff factor to allow
+ // for competition.
+ pre_loss_cwnd = current_cwnd;
+ expected_cwnd = static_cast<QuicByteCount>(current_cwnd * kNConnectionBeta);
+ ASSERT_EQ(expected_cwnd,
+ cubic_.CongestionWindowAfterPacketLoss(current_cwnd));
+ current_cwnd = expected_cwnd;
+ EXPECT_GT(pre_loss_cwnd, LastMaxCongestionWindow());
+ QuicByteCount expected_last_max =
+ static_cast<QuicByteCount>(pre_loss_cwnd * kNConnectionBetaLastMax);
+ EXPECT_EQ(expected_last_max, LastMaxCongestionWindow());
+ EXPECT_LT(expected_cwnd, LastMaxCongestionWindow());
+ // Simulate an increase, and check that we are below the origin.
+ current_cwnd = cubic_.CongestionWindowAfterAck(
+ kDefaultTCPMSS, current_cwnd, rtt_min, clock_.ApproximateNow());
+ EXPECT_GT(LastMaxCongestionWindow(), current_cwnd);
+
+ // On the final loss, simulate the condition where the congestion
+ // window had a chance to grow nearly to the last congestion window.
+ current_cwnd = LastMaxCongestionWindow() - 1;
+ pre_loss_cwnd = current_cwnd;
+ expected_cwnd = static_cast<QuicByteCount>(current_cwnd * kNConnectionBeta);
+ EXPECT_EQ(expected_cwnd,
+ cubic_.CongestionWindowAfterPacketLoss(current_cwnd));
+ expected_last_max = pre_loss_cwnd;
+ ASSERT_EQ(expected_last_max, LastMaxCongestionWindow());
+}
+
+TEST_F(CubicBytesTest, BelowOrigin) {
+ // Concave growth.
+ const QuicTime::Delta rtt_min = hundred_ms_;
+ QuicByteCount current_cwnd = 422 * kDefaultTCPMSS;
+ // Without the signed-integer, cubic-convex fix, we mistakenly
+ // increment cwnd after only one_ms_ and a single ack.
+ QuicPacketCount expected_cwnd = RenoCwndInBytes(current_cwnd);
+ // Initialize the state.
+ clock_.AdvanceTime(one_ms_);
+ EXPECT_EQ(expected_cwnd,
+ cubic_.CongestionWindowAfterAck(kDefaultTCPMSS, current_cwnd,
+ rtt_min, clock_.ApproximateNow()));
+ expected_cwnd = static_cast<QuicPacketCount>(current_cwnd * kNConnectionBeta);
+ EXPECT_EQ(expected_cwnd,
+ cubic_.CongestionWindowAfterPacketLoss(current_cwnd));
+ current_cwnd = expected_cwnd;
+ // First update after loss to initialize the epoch.
+ current_cwnd = cubic_.CongestionWindowAfterAck(
+ kDefaultTCPMSS, current_cwnd, rtt_min, clock_.ApproximateNow());
+ // Cubic phase.
+ for (int i = 0; i < 40; ++i) {
+ clock_.AdvanceTime(hundred_ms_);
+ current_cwnd = cubic_.CongestionWindowAfterAck(
+ kDefaultTCPMSS, current_cwnd, rtt_min, clock_.ApproximateNow());
+ }
+ expected_cwnd = 553632;
+ EXPECT_EQ(expected_cwnd, current_cwnd);
+}
+
+} // namespace test
+} // namespace quic
diff --git a/quic/core/congestion_control/general_loss_algorithm.cc b/quic/core/congestion_control/general_loss_algorithm.cc
new file mode 100644
index 0000000..8cd5e71
--- /dev/null
+++ b/quic/core/congestion_control/general_loss_algorithm.cc
@@ -0,0 +1,236 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/third_party/quiche/src/quic/core/congestion_control/general_loss_algorithm.h"
+
+#include "net/third_party/quiche/src/quic/core/congestion_control/rtt_stats.h"
+#include "net/third_party/quiche/src/quic/core/quic_packets.h"
+#include "net/third_party/quiche/src/quic/platform/api/quic_bug_tracker.h"
+#include "net/third_party/quiche/src/quic/platform/api/quic_flag_utils.h"
+#include "net/third_party/quiche/src/quic/platform/api/quic_flags.h"
+
+namespace quic {
+
+namespace {
+
+// The minimum delay before a packet will be considered lost,
+// regardless of SRTT. Half of the minimum TLP, since the loss algorithm only
+// triggers when a nack has been receieved for the packet.
+static const size_t kMinLossDelayMs = 5;
+
+// Default fraction of an RTT the algorithm waits before determining a packet is
+// lost due to early retransmission by time based loss detection.
+static const int kDefaultLossDelayShift = 2;
+// Default fraction of an RTT when doing adaptive loss detection.
+static const int kDefaultAdaptiveLossDelayShift = 4;
+
+} // namespace
+
+GeneralLossAlgorithm::GeneralLossAlgorithm() : GeneralLossAlgorithm(kNack) {}
+
+GeneralLossAlgorithm::GeneralLossAlgorithm(LossDetectionType loss_type)
+ : loss_detection_timeout_(QuicTime::Zero()),
+ least_in_flight_(1),
+ packet_number_space_(NUM_PACKET_NUMBER_SPACES) {
+ SetLossDetectionType(loss_type);
+}
+
+void GeneralLossAlgorithm::SetLossDetectionType(LossDetectionType loss_type) {
+ loss_detection_timeout_ = QuicTime::Zero();
+ largest_sent_on_spurious_retransmit_.Clear();
+ loss_type_ = loss_type;
+ reordering_shift_ = loss_type == kAdaptiveTime
+ ? kDefaultAdaptiveLossDelayShift
+ : kDefaultLossDelayShift;
+ if (GetQuicReloadableFlag(quic_eighth_rtt_loss_detection) &&
+ loss_type == kTime) {
+ QUIC_RELOADABLE_FLAG_COUNT(quic_eighth_rtt_loss_detection);
+ reordering_shift_ = 3;
+ }
+ largest_previously_acked_.Clear();
+}
+
+LossDetectionType GeneralLossAlgorithm::GetLossDetectionType() const {
+ return loss_type_;
+}
+
+// Uses nack counts to decide when packets are lost.
+void GeneralLossAlgorithm::DetectLosses(
+ const QuicUnackedPacketMap& unacked_packets,
+ QuicTime time,
+ const RttStats& rtt_stats,
+ QuicPacketNumber largest_newly_acked,
+ const AckedPacketVector& packets_acked,
+ LostPacketVector* packets_lost) {
+ loss_detection_timeout_ = QuicTime::Zero();
+ if (!packets_acked.empty() &&
+ packets_acked.front().packet_number == least_in_flight_) {
+ if (least_in_flight_ + packets_acked.size() - 1 == largest_newly_acked) {
+ // Optimization for the case when no packet is missing.
+ least_in_flight_ = largest_newly_acked + 1;
+ largest_previously_acked_ = largest_newly_acked;
+ return;
+ }
+ // There is hole in acked_packets, increment least_in_flight_ if possible.
+ for (const auto& acked : packets_acked) {
+ if (acked.packet_number != least_in_flight_) {
+ break;
+ }
+ ++least_in_flight_;
+ }
+ }
+ QuicTime::Delta max_rtt =
+ std::max(rtt_stats.previous_srtt(), rtt_stats.latest_rtt());
+ QuicTime::Delta loss_delay =
+ std::max(QuicTime::Delta::FromMilliseconds(kMinLossDelayMs),
+ max_rtt + (max_rtt >> reordering_shift_));
+ QuicPacketNumber packet_number = unacked_packets.GetLeastUnacked();
+ auto it = unacked_packets.begin();
+ if (least_in_flight_.IsInitialized() && least_in_flight_ >= packet_number) {
+ if (least_in_flight_ > unacked_packets.largest_sent_packet() + 1) {
+ QUIC_BUG << "least_in_flight: " << least_in_flight_
+ << " is greater than largest_sent_packet + 1: "
+ << unacked_packets.largest_sent_packet() + 1;
+ } else {
+ it += (least_in_flight_ - packet_number);
+ packet_number = least_in_flight_;
+ }
+ }
+ // Clear least_in_flight_.
+ least_in_flight_.Clear();
+ DCHECK(!unacked_packets.use_uber_loss_algorithm() ||
+ packet_number_space_ ==
+ unacked_packets.GetPacketNumberSpace(largest_newly_acked));
+ for (; it != unacked_packets.end() && packet_number <= largest_newly_acked;
+ ++it, ++packet_number) {
+ if (unacked_packets.use_uber_loss_algorithm() &&
+ unacked_packets.GetPacketNumberSpace(it->encryption_level) !=
+ packet_number_space_) {
+ // Skip packets of different packet number space.
+ continue;
+ }
+ if (!it->in_flight) {
+ continue;
+ }
+
+ if (loss_type_ == kNack) {
+ // FACK based loss detection.
+ if (largest_newly_acked - packet_number >=
+ kNumberOfNacksBeforeRetransmission) {
+ packets_lost->push_back(LostPacket(packet_number, it->bytes_sent));
+ continue;
+ }
+ } else if (loss_type_ == kLazyFack) {
+ // Require two in order acks to invoke FACK, which avoids spuriously
+ // retransmitting packets when one packet is reordered by a large amount.
+ if (largest_previously_acked_.IsInitialized() &&
+ largest_newly_acked > largest_previously_acked_ &&
+ largest_previously_acked_ > packet_number &&
+ largest_previously_acked_ - packet_number >=
+ (kNumberOfNacksBeforeRetransmission - 1)) {
+ packets_lost->push_back(LostPacket(packet_number, it->bytes_sent));
+ continue;
+ }
+ }
+
+ // Only early retransmit(RFC5827) when the last packet gets acked and
+ // there are retransmittable packets in flight.
+ // This also implements a timer-protected variant of FACK.
+ QuicPacketNumber largest_sent_retransmittable_packet;
+ if (unacked_packets.use_uber_loss_algorithm()) {
+ // Use largest_sent_retransmittable_packet of corresponding packet number
+ // space for timer based loss detection.
+ largest_sent_retransmittable_packet =
+ unacked_packets.GetLargestSentRetransmittableOfPacketNumberSpace(
+ packet_number_space_);
+ } else {
+ largest_sent_retransmittable_packet =
+ unacked_packets.largest_sent_retransmittable_packet();
+ }
+ if (largest_sent_retransmittable_packet <= largest_newly_acked ||
+ loss_type_ == kTime || loss_type_ == kAdaptiveTime) {
+ QuicTime when_lost = it->sent_time + loss_delay;
+ if (time < when_lost) {
+ loss_detection_timeout_ = when_lost;
+ if (!least_in_flight_.IsInitialized()) {
+ // At this point, packet_number is in flight and not detected as lost.
+ least_in_flight_ = packet_number;
+ }
+ break;
+ }
+ packets_lost->push_back(LostPacket(packet_number, it->bytes_sent));
+ continue;
+ }
+
+ // NACK-based loss detection allows for a max reordering window of 1 RTT.
+ if (it->sent_time + rtt_stats.smoothed_rtt() <
+ unacked_packets.GetTransmissionInfo(largest_newly_acked).sent_time) {
+ packets_lost->push_back(LostPacket(packet_number, it->bytes_sent));
+ continue;
+ }
+ if (!least_in_flight_.IsInitialized()) {
+ // At this point, packet_number is in flight and not detected as lost.
+ least_in_flight_ = packet_number;
+ }
+ }
+ if (!least_in_flight_.IsInitialized()) {
+ // There is no in flight packet.
+ least_in_flight_ = largest_newly_acked + 1;
+ }
+ largest_previously_acked_ = largest_newly_acked;
+}
+
+QuicTime GeneralLossAlgorithm::GetLossTimeout() const {
+ return loss_detection_timeout_;
+}
+
+void GeneralLossAlgorithm::SpuriousRetransmitDetected(
+ const QuicUnackedPacketMap& unacked_packets,
+ QuicTime time,
+ const RttStats& rtt_stats,
+ QuicPacketNumber spurious_retransmission) {
+ if (loss_type_ != kAdaptiveTime || reordering_shift_ == 0) {
+ return;
+ }
+ // Calculate the extra time needed so this wouldn't have been declared lost.
+ // Extra time needed is based on how long it's been since the spurious
+ // retransmission was sent, because the SRTT and latest RTT may have changed.
+ QuicTime::Delta extra_time_needed =
+ time -
+ unacked_packets.GetTransmissionInfo(spurious_retransmission).sent_time;
+ // Increase the reordering fraction until enough time would be allowed.
+ QuicTime::Delta max_rtt =
+ std::max(rtt_stats.previous_srtt(), rtt_stats.latest_rtt());
+ if (GetQuicReloadableFlag(quic_fix_adaptive_time_loss)) {
+ QUIC_RELOADABLE_FLAG_COUNT(quic_fix_adaptive_time_loss);
+ while ((max_rtt >> reordering_shift_) <= extra_time_needed &&
+ reordering_shift_ > 0) {
+ --reordering_shift_;
+ }
+ return;
+ }
+
+ if (largest_sent_on_spurious_retransmit_.IsInitialized() &&
+ spurious_retransmission <= largest_sent_on_spurious_retransmit_) {
+ return;
+ }
+ largest_sent_on_spurious_retransmit_ = unacked_packets.largest_sent_packet();
+ QuicTime::Delta proposed_extra_time(QuicTime::Delta::Zero());
+ do {
+ proposed_extra_time = max_rtt >> reordering_shift_;
+ --reordering_shift_;
+ } while (proposed_extra_time < extra_time_needed && reordering_shift_ > 0);
+}
+
+void GeneralLossAlgorithm::SetPacketNumberSpace(
+ PacketNumberSpace packet_number_space) {
+ if (packet_number_space_ < NUM_PACKET_NUMBER_SPACES) {
+ QUIC_BUG << "Cannot switch packet_number_space";
+ return;
+ }
+
+ packet_number_space_ = packet_number_space;
+}
+
+} // namespace quic
diff --git a/quic/core/congestion_control/general_loss_algorithm.h b/quic/core/congestion_control/general_loss_algorithm.h
new file mode 100644
index 0000000..a2bcadd
--- /dev/null
+++ b/quic/core/congestion_control/general_loss_algorithm.h
@@ -0,0 +1,85 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef QUICHE_QUIC_CORE_CONGESTION_CONTROL_GENERAL_LOSS_ALGORITHM_H_
+#define QUICHE_QUIC_CORE_CONGESTION_CONTROL_GENERAL_LOSS_ALGORITHM_H_
+
+#include <algorithm>
+#include <map>
+
+#include "base/macros.h"
+#include "net/third_party/quiche/src/quic/core/congestion_control/loss_detection_interface.h"
+#include "net/third_party/quiche/src/quic/core/quic_packets.h"
+#include "net/third_party/quiche/src/quic/core/quic_time.h"
+#include "net/third_party/quiche/src/quic/core/quic_unacked_packet_map.h"
+#include "net/third_party/quiche/src/quic/platform/api/quic_export.h"
+
+namespace quic {
+
+// Class which can be configured to implement's TCP's approach of detecting loss
+// when 3 nacks have been received for a packet or with a time threshold.
+// Also implements TCP's early retransmit(RFC5827).
+class QUIC_EXPORT_PRIVATE GeneralLossAlgorithm : public LossDetectionInterface {
+ public:
+ // TCP retransmits after 3 nacks.
+ static const QuicPacketCount kNumberOfNacksBeforeRetransmission = 3;
+
+ GeneralLossAlgorithm();
+ explicit GeneralLossAlgorithm(LossDetectionType loss_type);
+ GeneralLossAlgorithm(const GeneralLossAlgorithm&) = delete;
+ GeneralLossAlgorithm& operator=(const GeneralLossAlgorithm&) = delete;
+ ~GeneralLossAlgorithm() override {}
+
+ LossDetectionType GetLossDetectionType() const override;
+
+ // Switches the loss detection type to |loss_type| and resets the loss
+ // algorithm.
+ void SetLossDetectionType(LossDetectionType loss_type);
+
+ // Uses |largest_acked| and time to decide when packets are lost.
+ void DetectLosses(const QuicUnackedPacketMap& unacked_packets,
+ QuicTime time,
+ const RttStats& rtt_stats,
+ QuicPacketNumber largest_newly_acked,
+ const AckedPacketVector& packets_acked,
+ LostPacketVector* packets_lost) override;
+
+ // Returns a non-zero value when the early retransmit timer is active.
+ QuicTime GetLossTimeout() const override;
+
+ // Increases the loss detection threshold for time loss detection.
+ void SpuriousRetransmitDetected(
+ const QuicUnackedPacketMap& unacked_packets,
+ QuicTime time,
+ const RttStats& rtt_stats,
+ QuicPacketNumber spurious_retransmission) override;
+
+ void SetPacketNumberSpace(PacketNumberSpace packet_number_space);
+
+ int reordering_shift() const { return reordering_shift_; }
+
+ private:
+ QuicTime loss_detection_timeout_;
+ // Largest sent packet when a spurious retransmit is detected.
+ // Prevents increasing the reordering threshold multiple times per epoch.
+ // TODO(ianswett): Deprecate when quic_fix_adaptive_time_loss flag is
+ // deprecated.
+ QuicPacketNumber largest_sent_on_spurious_retransmit_;
+ LossDetectionType loss_type_;
+ // Fraction of a max(SRTT, latest_rtt) to permit reordering before declaring
+ // loss. Fraction calculated by shifting max(SRTT, latest_rtt) to the right
+ // by reordering_shift.
+ int reordering_shift_;
+ // The largest newly acked from the previous call to DetectLosses.
+ QuicPacketNumber largest_previously_acked_;
+ // The least in flight packet. Loss detection should start from this. Please
+ // note, least_in_flight_ could be largest packet ever sent + 1.
+ QuicPacketNumber least_in_flight_;
+ // This is only used when quic_use_uber_loss_algorithm is true.
+ PacketNumberSpace packet_number_space_;
+};
+
+} // namespace quic
+
+#endif // QUICHE_QUIC_CORE_CONGESTION_CONTROL_GENERAL_LOSS_ALGORITHM_H_
diff --git a/quic/core/congestion_control/general_loss_algorithm_test.cc b/quic/core/congestion_control/general_loss_algorithm_test.cc
new file mode 100644
index 0000000..91a25da
--- /dev/null
+++ b/quic/core/congestion_control/general_loss_algorithm_test.cc
@@ -0,0 +1,577 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/third_party/quiche/src/quic/core/congestion_control/general_loss_algorithm.h"
+
+#include <algorithm>
+#include <cstdint>
+
+#include "net/third_party/quiche/src/quic/core/congestion_control/rtt_stats.h"
+#include "net/third_party/quiche/src/quic/core/quic_unacked_packet_map.h"
+#include "net/third_party/quiche/src/quic/core/quic_utils.h"
+#include "net/third_party/quiche/src/quic/platform/api/quic_flags.h"
+#include "net/third_party/quiche/src/quic/platform/api/quic_test.h"
+#include "net/third_party/quiche/src/quic/test_tools/mock_clock.h"
+
+namespace quic {
+namespace test {
+namespace {
+
+// Default packet length.
+const uint32_t kDefaultLength = 1000;
+
+class GeneralLossAlgorithmTest : public QuicTest {
+ protected:
+ GeneralLossAlgorithmTest() : unacked_packets_(Perspective::IS_CLIENT) {
+ rtt_stats_.UpdateRtt(QuicTime::Delta::FromMilliseconds(100),
+ QuicTime::Delta::Zero(), clock_.Now());
+ EXPECT_LT(0, rtt_stats_.smoothed_rtt().ToMicroseconds());
+ if (unacked_packets_.use_uber_loss_algorithm()) {
+ loss_algorithm_.SetPacketNumberSpace(HANDSHAKE_DATA);
+ }
+ }
+
+ ~GeneralLossAlgorithmTest() override {}
+
+ void SendDataPacket(uint64_t packet_number) {
+ QuicStreamFrame frame;
+ frame.stream_id = QuicUtils::GetHeadersStreamId(
+ CurrentSupportedVersions()[0].transport_version);
+ SerializedPacket packet(QuicPacketNumber(packet_number),
+ PACKET_1BYTE_PACKET_NUMBER, nullptr, kDefaultLength,
+ false, false);
+ packet.retransmittable_frames.push_back(QuicFrame(frame));
+ unacked_packets_.AddSentPacket(&packet, QuicPacketNumber(),
+ NOT_RETRANSMISSION, clock_.Now(), true);
+ }
+
+ void SendAckPacket(uint64_t packet_number) {
+ SerializedPacket packet(QuicPacketNumber(packet_number),
+ PACKET_1BYTE_PACKET_NUMBER, nullptr, kDefaultLength,
+ true, false);
+ unacked_packets_.AddSentPacket(&packet, QuicPacketNumber(),
+ NOT_RETRANSMISSION, clock_.Now(), false);
+ }
+
+ void VerifyLosses(uint64_t largest_newly_acked,
+ const AckedPacketVector& packets_acked,
+ const std::vector<uint64_t>& losses_expected) {
+ if (unacked_packets_.use_uber_loss_algorithm()) {
+ unacked_packets_.MaybeUpdateLargestAckedOfPacketNumberSpace(
+ ENCRYPTION_NONE, QuicPacketNumber(largest_newly_acked));
+ } else if (!unacked_packets_.largest_acked().IsInitialized() ||
+ QuicPacketNumber(largest_newly_acked) >
+ unacked_packets_.largest_acked()) {
+ unacked_packets_.IncreaseLargestAcked(
+ QuicPacketNumber(largest_newly_acked));
+ }
+ LostPacketVector lost_packets;
+ loss_algorithm_.DetectLosses(unacked_packets_, clock_.Now(), rtt_stats_,
+ QuicPacketNumber(largest_newly_acked),
+ packets_acked, &lost_packets);
+ ASSERT_EQ(losses_expected.size(), lost_packets.size());
+ for (size_t i = 0; i < losses_expected.size(); ++i) {
+ EXPECT_EQ(lost_packets[i].packet_number,
+ QuicPacketNumber(losses_expected[i]));
+ }
+ }
+
+ QuicUnackedPacketMap unacked_packets_;
+ GeneralLossAlgorithm loss_algorithm_;
+ RttStats rtt_stats_;
+ MockClock clock_;
+};
+
+TEST_F(GeneralLossAlgorithmTest, NackRetransmit1Packet) {
+ const size_t kNumSentPackets = 5;
+ // Transmit 5 packets.
+ for (size_t i = 1; i <= kNumSentPackets; ++i) {
+ SendDataPacket(i);
+ }
+ AckedPacketVector packets_acked;
+ // No loss on one ack.
+ unacked_packets_.RemoveFromInFlight(QuicPacketNumber(2));
+ packets_acked.push_back(
+ AckedPacket(QuicPacketNumber(2), kMaxPacketSize, QuicTime::Zero()));
+ VerifyLosses(2, packets_acked, std::vector<uint64_t>{});
+ packets_acked.clear();
+ // No loss on two acks.
+ unacked_packets_.RemoveFromInFlight(QuicPacketNumber(3));
+ packets_acked.push_back(
+ AckedPacket(QuicPacketNumber(3), kMaxPacketSize, QuicTime::Zero()));
+ VerifyLosses(3, packets_acked, std::vector<uint64_t>{});
+ packets_acked.clear();
+ // Loss on three acks.
+ unacked_packets_.RemoveFromInFlight(QuicPacketNumber(4));
+ packets_acked.push_back(
+ AckedPacket(QuicPacketNumber(4), kMaxPacketSize, QuicTime::Zero()));
+ VerifyLosses(4, packets_acked, {1});
+ EXPECT_EQ(QuicTime::Zero(), loss_algorithm_.GetLossTimeout());
+}
+
+// A stretch ack is an ack that covers more than 1 packet of previously
+// unacknowledged data.
+TEST_F(GeneralLossAlgorithmTest, NackRetransmit1PacketWith1StretchAck) {
+ const size_t kNumSentPackets = 10;
+ // Transmit 10 packets.
+ for (size_t i = 1; i <= kNumSentPackets; ++i) {
+ SendDataPacket(i);
+ }
+ AckedPacketVector packets_acked;
+ // Nack the first packet 3 times in a single StretchAck.
+ unacked_packets_.RemoveFromInFlight(QuicPacketNumber(2));
+ packets_acked.push_back(
+ AckedPacket(QuicPacketNumber(2), kMaxPacketSize, QuicTime::Zero()));
+ unacked_packets_.RemoveFromInFlight(QuicPacketNumber(3));
+ packets_acked.push_back(
+ AckedPacket(QuicPacketNumber(3), kMaxPacketSize, QuicTime::Zero()));
+ unacked_packets_.RemoveFromInFlight(QuicPacketNumber(4));
+ packets_acked.push_back(
+ AckedPacket(QuicPacketNumber(4), kMaxPacketSize, QuicTime::Zero()));
+ VerifyLosses(4, packets_acked, {1});
+ EXPECT_EQ(QuicTime::Zero(), loss_algorithm_.GetLossTimeout());
+}
+
+// Ack a packet 3 packets ahead, causing a retransmit.
+TEST_F(GeneralLossAlgorithmTest, NackRetransmit1PacketSingleAck) {
+ const size_t kNumSentPackets = 10;
+ // Transmit 10 packets.
+ for (size_t i = 1; i <= kNumSentPackets; ++i) {
+ SendDataPacket(i);
+ }
+ AckedPacketVector packets_acked;
+ // Nack the first packet 3 times in an AckFrame with three missing packets.
+ unacked_packets_.RemoveFromInFlight(QuicPacketNumber(4));
+ packets_acked.push_back(
+ AckedPacket(QuicPacketNumber(4), kMaxPacketSize, QuicTime::Zero()));
+ VerifyLosses(4, packets_acked, {1});
+ EXPECT_EQ(QuicTime::Zero(), loss_algorithm_.GetLossTimeout());
+}
+
+TEST_F(GeneralLossAlgorithmTest, EarlyRetransmit1Packet) {
+ const size_t kNumSentPackets = 2;
+ // Transmit 2 packets.
+ for (size_t i = 1; i <= kNumSentPackets; ++i) {
+ SendDataPacket(i);
+ }
+ AckedPacketVector packets_acked;
+ // Early retransmit when the final packet gets acked and the first is nacked.
+ unacked_packets_.RemoveFromInFlight(QuicPacketNumber(2));
+ packets_acked.push_back(
+ AckedPacket(QuicPacketNumber(2), kMaxPacketSize, QuicTime::Zero()));
+ VerifyLosses(2, packets_acked, std::vector<uint64_t>{});
+ packets_acked.clear();
+ EXPECT_EQ(clock_.Now() + 1.25 * rtt_stats_.smoothed_rtt(),
+ loss_algorithm_.GetLossTimeout());
+
+ clock_.AdvanceTime(1.25 * rtt_stats_.latest_rtt());
+ VerifyLosses(2, packets_acked, {1});
+ EXPECT_EQ(QuicTime::Zero(), loss_algorithm_.GetLossTimeout());
+}
+
+TEST_F(GeneralLossAlgorithmTest, EarlyRetransmitAllPackets) {
+ const size_t kNumSentPackets = 5;
+ for (size_t i = 1; i <= kNumSentPackets; ++i) {
+ SendDataPacket(i);
+ // Advance the time 1/4 RTT between 3 and 4.
+ if (i == 3) {
+ clock_.AdvanceTime(0.25 * rtt_stats_.smoothed_rtt());
+ }
+ }
+ AckedPacketVector packets_acked;
+ // Early retransmit when the final packet gets acked and 1.25 RTTs have
+ // elapsed since the packets were sent.
+ unacked_packets_.RemoveFromInFlight(QuicPacketNumber(kNumSentPackets));
+ packets_acked.push_back(AckedPacket(QuicPacketNumber(kNumSentPackets),
+ kMaxPacketSize, QuicTime::Zero()));
+ // This simulates a single ack following multiple missing packets with FACK.
+ VerifyLosses(kNumSentPackets, packets_acked, {1, 2});
+ packets_acked.clear();
+ // The time has already advanced 1/4 an RTT, so ensure the timeout is set
+ // 1.25 RTTs after the earliest pending packet(3), not the last(4).
+ EXPECT_EQ(clock_.Now() + rtt_stats_.smoothed_rtt(),
+ loss_algorithm_.GetLossTimeout());
+
+ clock_.AdvanceTime(rtt_stats_.smoothed_rtt());
+ VerifyLosses(kNumSentPackets, packets_acked, {3});
+ EXPECT_EQ(clock_.Now() + 0.25 * rtt_stats_.smoothed_rtt(),
+ loss_algorithm_.GetLossTimeout());
+ clock_.AdvanceTime(0.25 * rtt_stats_.smoothed_rtt());
+ VerifyLosses(kNumSentPackets, packets_acked, {4});
+ EXPECT_EQ(QuicTime::Zero(), loss_algorithm_.GetLossTimeout());
+}
+
+TEST_F(GeneralLossAlgorithmTest, DontEarlyRetransmitNeuteredPacket) {
+ const size_t kNumSentPackets = 2;
+ // Transmit 2 packets.
+ for (size_t i = 1; i <= kNumSentPackets; ++i) {
+ SendDataPacket(i);
+ }
+ AckedPacketVector packets_acked;
+ // Neuter packet 1.
+ unacked_packets_.RemoveRetransmittability(QuicPacketNumber(1));
+ clock_.AdvanceTime(rtt_stats_.smoothed_rtt());
+
+ // Early retransmit when the final packet gets acked and the first is nacked.
+ if (unacked_packets_.use_uber_loss_algorithm()) {
+ unacked_packets_.MaybeUpdateLargestAckedOfPacketNumberSpace(
+ ENCRYPTION_NONE, QuicPacketNumber(2));
+ } else {
+ unacked_packets_.IncreaseLargestAcked(QuicPacketNumber(2));
+ }
+ unacked_packets_.RemoveFromInFlight(QuicPacketNumber(2));
+ packets_acked.push_back(
+ AckedPacket(QuicPacketNumber(2), kMaxPacketSize, QuicTime::Zero()));
+ VerifyLosses(2, packets_acked, std::vector<uint64_t>{});
+ EXPECT_EQ(clock_.Now() + 0.25 * rtt_stats_.smoothed_rtt(),
+ loss_algorithm_.GetLossTimeout());
+}
+
+TEST_F(GeneralLossAlgorithmTest, EarlyRetransmitWithLargerUnackablePackets) {
+ // Transmit 2 data packets and one ack.
+ SendDataPacket(1);
+ SendDataPacket(2);
+ SendAckPacket(3);
+ AckedPacketVector packets_acked;
+ clock_.AdvanceTime(rtt_stats_.smoothed_rtt());
+
+ // Early retransmit when the final packet gets acked and the first is nacked.
+ if (unacked_packets_.use_uber_loss_algorithm()) {
+ unacked_packets_.MaybeUpdateLargestAckedOfPacketNumberSpace(
+ ENCRYPTION_NONE, QuicPacketNumber(2));
+ } else {
+ unacked_packets_.IncreaseLargestAcked(QuicPacketNumber(2));
+ }
+ unacked_packets_.RemoveFromInFlight(QuicPacketNumber(2));
+ packets_acked.push_back(
+ AckedPacket(QuicPacketNumber(2), kMaxPacketSize, QuicTime::Zero()));
+ VerifyLosses(2, packets_acked, std::vector<uint64_t>{});
+ packets_acked.clear();
+ EXPECT_EQ(clock_.Now() + 0.25 * rtt_stats_.smoothed_rtt(),
+ loss_algorithm_.GetLossTimeout());
+
+ // The packet should be lost once the loss timeout is reached.
+ clock_.AdvanceTime(0.25 * rtt_stats_.latest_rtt());
+ VerifyLosses(2, packets_acked, {1});
+ EXPECT_EQ(QuicTime::Zero(), loss_algorithm_.GetLossTimeout());
+}
+
+TEST_F(GeneralLossAlgorithmTest, AlwaysLosePacketSent1RTTEarlier) {
+ // Transmit 1 packet and then wait an rtt plus 1ms.
+ SendDataPacket(1);
+ clock_.AdvanceTime(rtt_stats_.smoothed_rtt() +
+ QuicTime::Delta::FromMilliseconds(1));
+
+ // Transmit 2 packets.
+ SendDataPacket(2);
+ SendDataPacket(3);
+ AckedPacketVector packets_acked;
+ // Wait another RTT and ack 2.
+ clock_.AdvanceTime(rtt_stats_.smoothed_rtt());
+ if (unacked_packets_.use_uber_loss_algorithm()) {
+ unacked_packets_.MaybeUpdateLargestAckedOfPacketNumberSpace(
+ ENCRYPTION_NONE, QuicPacketNumber(2));
+ } else {
+ unacked_packets_.IncreaseLargestAcked(QuicPacketNumber(2));
+ }
+ unacked_packets_.RemoveFromInFlight(QuicPacketNumber(2));
+ packets_acked.push_back(
+ AckedPacket(QuicPacketNumber(2), kMaxPacketSize, QuicTime::Zero()));
+ VerifyLosses(2, packets_acked, {1});
+}
+
+// NoFack loss detection tests.
+TEST_F(GeneralLossAlgorithmTest, LazyFackNackRetransmit1Packet) {
+ loss_algorithm_.SetLossDetectionType(kLazyFack);
+ const size_t kNumSentPackets = 5;
+ // Transmit 5 packets.
+ for (size_t i = 1; i <= kNumSentPackets; ++i) {
+ SendDataPacket(i);
+ }
+ AckedPacketVector packets_acked;
+ // No loss on one ack.
+ unacked_packets_.RemoveFromInFlight(QuicPacketNumber(2));
+ packets_acked.push_back(
+ AckedPacket(QuicPacketNumber(2), kMaxPacketSize, QuicTime::Zero()));
+ VerifyLosses(2, packets_acked, std::vector<uint64_t>{});
+ packets_acked.clear();
+ // No loss on two acks.
+ unacked_packets_.RemoveFromInFlight(QuicPacketNumber(3));
+ packets_acked.push_back(
+ AckedPacket(QuicPacketNumber(3), kMaxPacketSize, QuicTime::Zero()));
+ VerifyLosses(3, packets_acked, std::vector<uint64_t>{});
+ packets_acked.clear();
+ // Loss on three acks.
+ unacked_packets_.RemoveFromInFlight(QuicPacketNumber(4));
+ packets_acked.push_back(
+ AckedPacket(QuicPacketNumber(4), kMaxPacketSize, QuicTime::Zero()));
+ VerifyLosses(4, packets_acked, {1});
+ EXPECT_EQ(QuicTime::Zero(), loss_algorithm_.GetLossTimeout());
+}
+
+// A stretch ack is an ack that covers more than 1 packet of previously
+// unacknowledged data.
+TEST_F(GeneralLossAlgorithmTest,
+ LazyFackNoNackRetransmit1PacketWith1StretchAck) {
+ loss_algorithm_.SetLossDetectionType(kLazyFack);
+ const size_t kNumSentPackets = 10;
+ // Transmit 10 packets.
+ for (size_t i = 1; i <= kNumSentPackets; ++i) {
+ SendDataPacket(i);
+ }
+ AckedPacketVector packets_acked;
+ // Nack the first packet 3 times in a single StretchAck.
+ unacked_packets_.RemoveFromInFlight(QuicPacketNumber(2));
+ packets_acked.push_back(
+ AckedPacket(QuicPacketNumber(2), kMaxPacketSize, QuicTime::Zero()));
+ unacked_packets_.RemoveFromInFlight(QuicPacketNumber(3));
+ packets_acked.push_back(
+ AckedPacket(QuicPacketNumber(3), kMaxPacketSize, QuicTime::Zero()));
+ unacked_packets_.RemoveFromInFlight(QuicPacketNumber(4));
+ packets_acked.push_back(
+ AckedPacket(QuicPacketNumber(4), kMaxPacketSize, QuicTime::Zero()));
+ VerifyLosses(4, packets_acked, std::vector<uint64_t>{});
+ packets_acked.clear();
+ // The timer isn't set because we expect more acks.
+ EXPECT_EQ(QuicTime::Zero(), loss_algorithm_.GetLossTimeout());
+ // Process another ack and then packet 1 will be lost.
+ unacked_packets_.RemoveFromInFlight(QuicPacketNumber(5));
+ packets_acked.push_back(
+ AckedPacket(QuicPacketNumber(5), kMaxPacketSize, QuicTime::Zero()));
+ VerifyLosses(5, packets_acked, {1});
+ EXPECT_EQ(QuicTime::Zero(), loss_algorithm_.GetLossTimeout());
+}
+
+// Ack a packet 3 packets ahead does not cause a retransmit.
+TEST_F(GeneralLossAlgorithmTest, LazyFackNackRetransmit1PacketSingleAck) {
+ loss_algorithm_.SetLossDetectionType(kLazyFack);
+ const size_t kNumSentPackets = 10;
+ // Transmit 10 packets.
+ for (size_t i = 1; i <= kNumSentPackets; ++i) {
+ SendDataPacket(i);
+ }
+ AckedPacketVector packets_acked;
+ // Nack the first packet 3 times in an AckFrame with three missing packets.
+ unacked_packets_.RemoveFromInFlight(QuicPacketNumber(4));
+ packets_acked.push_back(
+ AckedPacket(QuicPacketNumber(4), kMaxPacketSize, QuicTime::Zero()));
+ VerifyLosses(4, packets_acked, std::vector<uint64_t>{});
+ packets_acked.clear();
+ // The timer isn't set because we expect more acks.
+ EXPECT_EQ(QuicTime::Zero(), loss_algorithm_.GetLossTimeout());
+ // Process another ack and then packet 1 and 2 will be lost.
+ unacked_packets_.RemoveFromInFlight(QuicPacketNumber(5));
+ packets_acked.push_back(
+ AckedPacket(QuicPacketNumber(5), kMaxPacketSize, QuicTime::Zero()));
+ VerifyLosses(5, packets_acked, {1, 2});
+ EXPECT_EQ(QuicTime::Zero(), loss_algorithm_.GetLossTimeout());
+}
+
+// Time-based loss detection tests.
+TEST_F(GeneralLossAlgorithmTest, NoLossFor500Nacks) {
+ loss_algorithm_.SetLossDetectionType(kTime);
+ const size_t kNumSentPackets = 5;
+ // Transmit 5 packets.
+ for (size_t i = 1; i <= kNumSentPackets; ++i) {
+ SendDataPacket(i);
+ }
+ AckedPacketVector packets_acked;
+ unacked_packets_.RemoveFromInFlight(QuicPacketNumber(2));
+ packets_acked.push_back(
+ AckedPacket(QuicPacketNumber(2), kMaxPacketSize, QuicTime::Zero()));
+ for (size_t i = 1; i < 500; ++i) {
+ VerifyLosses(2, packets_acked, std::vector<uint64_t>{});
+ packets_acked.clear();
+ }
+ if (GetQuicReloadableFlag(quic_eighth_rtt_loss_detection)) {
+ EXPECT_EQ(1.125 * rtt_stats_.smoothed_rtt(),
+ loss_algorithm_.GetLossTimeout() - clock_.Now());
+ } else {
+ EXPECT_EQ(1.25 * rtt_stats_.smoothed_rtt(),
+ loss_algorithm_.GetLossTimeout() - clock_.Now());
+ }
+}
+
+TEST_F(GeneralLossAlgorithmTest, NoLossUntilTimeout) {
+ loss_algorithm_.SetLossDetectionType(kTime);
+ const size_t kNumSentPackets = 10;
+ // Transmit 10 packets at 1/10th an RTT interval.
+ for (size_t i = 1; i <= kNumSentPackets; ++i) {
+ SendDataPacket(i);
+ clock_.AdvanceTime(0.1 * rtt_stats_.smoothed_rtt());
+ }
+ AckedPacketVector packets_acked;
+ // Expect the timer to not be set.
+ EXPECT_EQ(QuicTime::Zero(), loss_algorithm_.GetLossTimeout());
+ // The packet should not be lost until 1.25 RTTs pass.
+ unacked_packets_.RemoveFromInFlight(QuicPacketNumber(2));
+ packets_acked.push_back(
+ AckedPacket(QuicPacketNumber(2), kMaxPacketSize, QuicTime::Zero()));
+ VerifyLosses(2, packets_acked, std::vector<uint64_t>{});
+ packets_acked.clear();
+ if (GetQuicReloadableFlag(quic_eighth_rtt_loss_detection)) {
+ // Expect the timer to be set to 0.25 RTT's in the future.
+ EXPECT_EQ(0.125 * rtt_stats_.smoothed_rtt(),
+ loss_algorithm_.GetLossTimeout() - clock_.Now());
+ } else {
+ // Expect the timer to be set to 0.25 RTT's in the future.
+ EXPECT_EQ(0.25 * rtt_stats_.smoothed_rtt(),
+ loss_algorithm_.GetLossTimeout() - clock_.Now());
+ }
+ VerifyLosses(2, packets_acked, std::vector<uint64_t>{});
+ clock_.AdvanceTime(0.25 * rtt_stats_.smoothed_rtt());
+ VerifyLosses(2, packets_acked, {1});
+ EXPECT_EQ(QuicTime::Zero(), loss_algorithm_.GetLossTimeout());
+}
+
+TEST_F(GeneralLossAlgorithmTest, NoLossWithoutNack) {
+ loss_algorithm_.SetLossDetectionType(kTime);
+ const size_t kNumSentPackets = 10;
+ // Transmit 10 packets at 1/10th an RTT interval.
+ for (size_t i = 1; i <= kNumSentPackets; ++i) {
+ SendDataPacket(i);
+ clock_.AdvanceTime(0.1 * rtt_stats_.smoothed_rtt());
+ }
+ AckedPacketVector packets_acked;
+ // Expect the timer to not be set.
+ EXPECT_EQ(QuicTime::Zero(), loss_algorithm_.GetLossTimeout());
+ // The packet should not be lost without a nack.
+ unacked_packets_.RemoveFromInFlight(QuicPacketNumber(1));
+ packets_acked.push_back(
+ AckedPacket(QuicPacketNumber(1), kMaxPacketSize, QuicTime::Zero()));
+ VerifyLosses(1, packets_acked, std::vector<uint64_t>{});
+ packets_acked.clear();
+ // The timer should still not be set.
+ EXPECT_EQ(QuicTime::Zero(), loss_algorithm_.GetLossTimeout());
+ clock_.AdvanceTime(0.25 * rtt_stats_.smoothed_rtt());
+ VerifyLosses(1, packets_acked, std::vector<uint64_t>{});
+ clock_.AdvanceTime(rtt_stats_.smoothed_rtt());
+ VerifyLosses(1, packets_acked, std::vector<uint64_t>{});
+
+ EXPECT_EQ(QuicTime::Zero(), loss_algorithm_.GetLossTimeout());
+}
+
+TEST_F(GeneralLossAlgorithmTest, MultipleLossesAtOnce) {
+ loss_algorithm_.SetLossDetectionType(kTime);
+ const size_t kNumSentPackets = 10;
+ // Transmit 10 packets at once and then go forward an RTT.
+ for (size_t i = 1; i <= kNumSentPackets; ++i) {
+ SendDataPacket(i);
+ }
+ AckedPacketVector packets_acked;
+ clock_.AdvanceTime(rtt_stats_.smoothed_rtt());
+ // Expect the timer to not be set.
+ EXPECT_EQ(QuicTime::Zero(), loss_algorithm_.GetLossTimeout());
+ // The packet should not be lost until 1.25 RTTs pass.
+ unacked_packets_.RemoveFromInFlight(QuicPacketNumber(10));
+ packets_acked.push_back(
+ AckedPacket(QuicPacketNumber(10), kMaxPacketSize, QuicTime::Zero()));
+ VerifyLosses(10, packets_acked, std::vector<uint64_t>{});
+ packets_acked.clear();
+ if (GetQuicReloadableFlag(quic_eighth_rtt_loss_detection)) {
+ // Expect the timer to be set to 0.25 RTT's in the future.
+ EXPECT_EQ(0.125 * rtt_stats_.smoothed_rtt(),
+ loss_algorithm_.GetLossTimeout() - clock_.Now());
+ } else {
+ // Expect the timer to be set to 0.25 RTT's in the future.
+ EXPECT_EQ(0.25 * rtt_stats_.smoothed_rtt(),
+ loss_algorithm_.GetLossTimeout() - clock_.Now());
+ }
+ clock_.AdvanceTime(0.25 * rtt_stats_.smoothed_rtt());
+ VerifyLosses(10, packets_acked, {1, 2, 3, 4, 5, 6, 7, 8, 9});
+ EXPECT_EQ(QuicTime::Zero(), loss_algorithm_.GetLossTimeout());
+}
+
+TEST_F(GeneralLossAlgorithmTest, NoSpuriousLossesFromLargeReordering) {
+ loss_algorithm_.SetLossDetectionType(kTime);
+ const size_t kNumSentPackets = 10;
+ // Transmit 10 packets at once and then go forward an RTT.
+ for (size_t i = 1; i <= kNumSentPackets; ++i) {
+ SendDataPacket(i);
+ }
+ AckedPacketVector packets_acked;
+ clock_.AdvanceTime(rtt_stats_.smoothed_rtt());
+ // Expect the timer to not be set.
+ EXPECT_EQ(QuicTime::Zero(), loss_algorithm_.GetLossTimeout());
+ // The packet should not be lost until 1.25 RTTs pass.
+
+ unacked_packets_.RemoveFromInFlight(QuicPacketNumber(10));
+ packets_acked.push_back(
+ AckedPacket(QuicPacketNumber(10), kMaxPacketSize, QuicTime::Zero()));
+ VerifyLosses(10, packets_acked, std::vector<uint64_t>{});
+ packets_acked.clear();
+ if (GetQuicReloadableFlag(quic_eighth_rtt_loss_detection)) {
+ // Expect the timer to be set to 0.25 RTT's in the future.
+ EXPECT_EQ(0.125 * rtt_stats_.smoothed_rtt(),
+ loss_algorithm_.GetLossTimeout() - clock_.Now());
+ } else {
+ // Expect the timer to be set to 0.25 RTT's in the future.
+ EXPECT_EQ(0.25 * rtt_stats_.smoothed_rtt(),
+ loss_algorithm_.GetLossTimeout() - clock_.Now());
+ }
+ clock_.AdvanceTime(0.25 * rtt_stats_.smoothed_rtt());
+ // Now ack packets 1 to 9 and ensure the timer is no longer set and no packets
+ // are lost.
+ for (uint64_t i = 1; i <= 9; ++i) {
+ unacked_packets_.RemoveFromInFlight(QuicPacketNumber(i));
+ packets_acked.push_back(
+ AckedPacket(QuicPacketNumber(i), kMaxPacketSize, QuicTime::Zero()));
+ VerifyLosses(i, packets_acked, std::vector<uint64_t>{});
+ packets_acked.clear();
+ EXPECT_EQ(QuicTime::Zero(), loss_algorithm_.GetLossTimeout());
+ }
+}
+
+TEST_F(GeneralLossAlgorithmTest, IncreaseThresholdUponSpuriousLoss) {
+ loss_algorithm_.SetLossDetectionType(kAdaptiveTime);
+ EXPECT_EQ(4, loss_algorithm_.reordering_shift());
+ const size_t kNumSentPackets = 10;
+ // Transmit 2 packets at 1/10th an RTT interval.
+ for (size_t i = 1; i <= kNumSentPackets; ++i) {
+ SendDataPacket(i);
+ clock_.AdvanceTime(0.1 * rtt_stats_.smoothed_rtt());
+ }
+ EXPECT_EQ(QuicTime::Zero() + rtt_stats_.smoothed_rtt(), clock_.Now());
+ AckedPacketVector packets_acked;
+ // Expect the timer to not be set.
+ EXPECT_EQ(QuicTime::Zero(), loss_algorithm_.GetLossTimeout());
+ // Packet 1 should not be lost until 1/16 RTTs pass.
+ unacked_packets_.RemoveFromInFlight(QuicPacketNumber(2));
+ packets_acked.push_back(
+ AckedPacket(QuicPacketNumber(2), kMaxPacketSize, QuicTime::Zero()));
+ VerifyLosses(2, packets_acked, std::vector<uint64_t>{});
+ packets_acked.clear();
+ // Expect the timer to be set to 1/16 RTT's in the future.
+ EXPECT_EQ(rtt_stats_.smoothed_rtt() * (1.0f / 16),
+ loss_algorithm_.GetLossTimeout() - clock_.Now());
+ VerifyLosses(2, packets_acked, std::vector<uint64_t>{});
+ clock_.AdvanceTime(rtt_stats_.smoothed_rtt() * (1.0f / 16));
+ VerifyLosses(2, packets_acked, {1});
+ EXPECT_EQ(QuicTime::Zero(), loss_algorithm_.GetLossTimeout());
+ // Retransmit packet 1 as 11 and 2 as 12.
+ SendDataPacket(11);
+ SendDataPacket(12);
+
+ // Advance the time 1/4 RTT and indicate the loss was spurious.
+ // The new threshold should be 1/2 RTT.
+ clock_.AdvanceTime(rtt_stats_.smoothed_rtt() * (1.0f / 4));
+ if (GetQuicReloadableFlag(quic_fix_adaptive_time_loss)) {
+ // The flag fixes an issue where adaptive time loss would increase the
+ // reordering threshold by an extra factor of two.
+ clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(1));
+ }
+ loss_algorithm_.SpuriousRetransmitDetected(unacked_packets_, clock_.Now(),
+ rtt_stats_, QuicPacketNumber(11));
+ EXPECT_EQ(1, loss_algorithm_.reordering_shift());
+
+ // Detect another spurious retransmit and ensure the threshold doesn't
+ // increase again.
+ loss_algorithm_.SpuriousRetransmitDetected(unacked_packets_, clock_.Now(),
+ rtt_stats_, QuicPacketNumber(12));
+ EXPECT_EQ(1, loss_algorithm_.reordering_shift());
+}
+
+} // namespace
+} // namespace test
+} // namespace quic
diff --git a/quic/core/congestion_control/hybrid_slow_start.cc b/quic/core/congestion_control/hybrid_slow_start.cc
new file mode 100644
index 0000000..28963b1
--- /dev/null
+++ b/quic/core/congestion_control/hybrid_slow_start.cc
@@ -0,0 +1,104 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/third_party/quiche/src/quic/core/congestion_control/hybrid_slow_start.h"
+
+#include <algorithm>
+
+#include "net/third_party/quiche/src/quic/platform/api/quic_logging.h"
+
+namespace quic {
+
+// Note(pwestin): the magic clamping numbers come from the original code in
+// tcp_cubic.c.
+const int64_t kHybridStartLowWindow = 16;
+// Number of delay samples for detecting the increase of delay.
+const uint32_t kHybridStartMinSamples = 8;
+// Exit slow start if the min rtt has increased by more than 1/8th.
+const int kHybridStartDelayFactorExp = 3; // 2^3 = 8
+// The original paper specifies 2 and 8ms, but those have changed over time.
+const int64_t kHybridStartDelayMinThresholdUs = 4000;
+const int64_t kHybridStartDelayMaxThresholdUs = 16000;
+
+HybridSlowStart::HybridSlowStart()
+ : started_(false),
+ hystart_found_(NOT_FOUND),
+ rtt_sample_count_(0),
+ current_min_rtt_(QuicTime::Delta::Zero()) {}
+
+void HybridSlowStart::OnPacketAcked(QuicPacketNumber acked_packet_number) {
+ // OnPacketAcked gets invoked after ShouldExitSlowStart, so it's best to end
+ // the round when the final packet of the burst is received and start it on
+ // the next incoming ack.
+ if (IsEndOfRound(acked_packet_number)) {
+ started_ = false;
+ }
+}
+
+void HybridSlowStart::OnPacketSent(QuicPacketNumber packet_number) {
+ last_sent_packet_number_ = packet_number;
+}
+
+void HybridSlowStart::Restart() {
+ started_ = false;
+ hystart_found_ = NOT_FOUND;
+}
+
+void HybridSlowStart::StartReceiveRound(QuicPacketNumber last_sent) {
+ QUIC_DVLOG(1) << "Reset hybrid slow start @" << last_sent;
+ end_packet_number_ = last_sent;
+ current_min_rtt_ = QuicTime::Delta::Zero();
+ rtt_sample_count_ = 0;
+ started_ = true;
+}
+
+bool HybridSlowStart::IsEndOfRound(QuicPacketNumber ack) const {
+ return !end_packet_number_.IsInitialized() || end_packet_number_ <= ack;
+}
+
+bool HybridSlowStart::ShouldExitSlowStart(QuicTime::Delta latest_rtt,
+ QuicTime::Delta min_rtt,
+ QuicPacketCount congestion_window) {
+ if (!started_) {
+ // Time to start the hybrid slow start.
+ StartReceiveRound(last_sent_packet_number_);
+ }
+ if (hystart_found_ != NOT_FOUND) {
+ return true;
+ }
+ // Second detection parameter - delay increase detection.
+ // Compare the minimum delay (current_min_rtt_) of the current
+ // burst of packets relative to the minimum delay during the session.
+ // Note: we only look at the first few(8) packets in each burst, since we
+ // only want to compare the lowest RTT of the burst relative to previous
+ // bursts.
+ rtt_sample_count_++;
+ if (rtt_sample_count_ <= kHybridStartMinSamples) {
+ if (current_min_rtt_.IsZero() || current_min_rtt_ > latest_rtt) {
+ current_min_rtt_ = latest_rtt;
+ }
+ }
+ // We only need to check this once per round.
+ if (rtt_sample_count_ == kHybridStartMinSamples) {
+ // Divide min_rtt by 8 to get a rtt increase threshold for exiting.
+ int64_t min_rtt_increase_threshold_us =
+ min_rtt.ToMicroseconds() >> kHybridStartDelayFactorExp;
+ // Ensure the rtt threshold is never less than 2ms or more than 16ms.
+ min_rtt_increase_threshold_us = std::min(min_rtt_increase_threshold_us,
+ kHybridStartDelayMaxThresholdUs);
+ QuicTime::Delta min_rtt_increase_threshold =
+ QuicTime::Delta::FromMicroseconds(std::max(
+ min_rtt_increase_threshold_us, kHybridStartDelayMinThresholdUs));
+
+ if (current_min_rtt_ > min_rtt + min_rtt_increase_threshold) {
+ hystart_found_ = DELAY;
+ }
+ }
+ // Exit from slow start if the cwnd is greater than 16 and
+ // increasing delay is found.
+ return congestion_window >= kHybridStartLowWindow &&
+ hystart_found_ != NOT_FOUND;
+}
+
+} // namespace quic
diff --git a/quic/core/congestion_control/hybrid_slow_start.h b/quic/core/congestion_control/hybrid_slow_start.h
new file mode 100644
index 0000000..61e40ae
--- /dev/null
+++ b/quic/core/congestion_control/hybrid_slow_start.h
@@ -0,0 +1,84 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// This class is a helper class to TcpCubicSender.
+// Slow start is the initial startup phase of TCP, it lasts until first packet
+// loss. This class implements hybrid slow start of the TCP cubic send side
+// congestion algorithm. The key feaure of hybrid slow start is that it tries to
+// avoid running into the wall too hard during the slow start phase, which
+// the traditional TCP implementation does.
+// This does not implement ack train detection because it interacts poorly with
+// pacing.
+// http://netsrv.csc.ncsu.edu/export/hybridstart_pfldnet08.pdf
+// http://research.csc.ncsu.edu/netsrv/sites/default/files/hystart_techreport_2008.pdf
+
+#ifndef QUICHE_QUIC_CORE_CONGESTION_CONTROL_HYBRID_SLOW_START_H_
+#define QUICHE_QUIC_CORE_CONGESTION_CONTROL_HYBRID_SLOW_START_H_
+
+#include <cstdint>
+
+#include "base/macros.h"
+#include "net/third_party/quiche/src/quic/core/quic_packets.h"
+#include "net/third_party/quiche/src/quic/core/quic_time.h"
+#include "net/third_party/quiche/src/quic/platform/api/quic_export.h"
+
+namespace quic {
+
+class QUIC_EXPORT_PRIVATE HybridSlowStart {
+ public:
+ HybridSlowStart();
+ HybridSlowStart(const HybridSlowStart&) = delete;
+ HybridSlowStart& operator=(const HybridSlowStart&) = delete;
+
+ void OnPacketAcked(QuicPacketNumber acked_packet_number);
+
+ void OnPacketSent(QuicPacketNumber packet_number);
+
+ // ShouldExitSlowStart should be called on every new ack frame, since a new
+ // RTT measurement can be made then.
+ // rtt: the RTT for this ack packet.
+ // min_rtt: is the lowest delay (RTT) we have seen during the session.
+ // congestion_window: the congestion window in packets.
+ bool ShouldExitSlowStart(QuicTime::Delta rtt,
+ QuicTime::Delta min_rtt,
+ QuicPacketCount congestion_window);
+
+ // Start a new slow start phase.
+ void Restart();
+
+ // TODO(ianswett): The following methods should be private, but that requires
+ // a follow up CL to update the unit test.
+ // Returns true if this ack the last packet number of our current slow start
+ // round.
+ // Call Reset if this returns true.
+ bool IsEndOfRound(QuicPacketNumber ack) const;
+
+ // Call for the start of each receive round (burst) in the slow start phase.
+ void StartReceiveRound(QuicPacketNumber last_sent);
+
+ // Whether slow start has started.
+ bool started() const { return started_; }
+
+ private:
+ // Whether a condition for exiting slow start has been found.
+ enum HystartState {
+ NOT_FOUND,
+ DELAY, // Too much increase in the round's min_rtt was observed.
+ };
+
+ // Whether the hybrid slow start has been started.
+ bool started_;
+ HystartState hystart_found_;
+ // Last packet number sent which was CWND limited.
+ QuicPacketNumber last_sent_packet_number_;
+
+ // Variables for tracking acks received during a slow start round.
+ QuicPacketNumber end_packet_number_; // End of the receive round.
+ uint32_t rtt_sample_count_; // Number of rtt samples in the current round.
+ QuicTime::Delta current_min_rtt_; // The minimum rtt of current round.
+};
+
+} // namespace quic
+
+#endif // QUICHE_QUIC_CORE_CONGESTION_CONTROL_HYBRID_SLOW_START_H_
diff --git a/quic/core/congestion_control/hybrid_slow_start_test.cc b/quic/core/congestion_control/hybrid_slow_start_test.cc
new file mode 100644
index 0000000..aa2f849
--- /dev/null
+++ b/quic/core/congestion_control/hybrid_slow_start_test.cc
@@ -0,0 +1,76 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/third_party/quiche/src/quic/core/congestion_control/hybrid_slow_start.h"
+
+#include <memory>
+
+#include "net/third_party/quiche/src/quic/platform/api/quic_ptr_util.h"
+#include "net/third_party/quiche/src/quic/platform/api/quic_test.h"
+
+namespace quic {
+namespace test {
+
+class HybridSlowStartTest : public QuicTest {
+ protected:
+ HybridSlowStartTest()
+ : one_ms_(QuicTime::Delta::FromMilliseconds(1)),
+ rtt_(QuicTime::Delta::FromMilliseconds(60)) {}
+ void SetUp() override { slow_start_ = QuicMakeUnique<HybridSlowStart>(); }
+ const QuicTime::Delta one_ms_;
+ const QuicTime::Delta rtt_;
+ std::unique_ptr<HybridSlowStart> slow_start_;
+};
+
+TEST_F(HybridSlowStartTest, Simple) {
+ QuicPacketNumber packet_number(1);
+ QuicPacketNumber end_packet_number(3);
+ slow_start_->StartReceiveRound(end_packet_number);
+
+ EXPECT_FALSE(slow_start_->IsEndOfRound(packet_number++));
+
+ // Test duplicates.
+ EXPECT_FALSE(slow_start_->IsEndOfRound(packet_number));
+
+ EXPECT_FALSE(slow_start_->IsEndOfRound(packet_number++));
+ EXPECT_TRUE(slow_start_->IsEndOfRound(packet_number++));
+
+ // Test without a new registered end_packet_number;
+ EXPECT_TRUE(slow_start_->IsEndOfRound(packet_number++));
+
+ end_packet_number = QuicPacketNumber(20);
+ slow_start_->StartReceiveRound(end_packet_number);
+ while (packet_number < end_packet_number) {
+ EXPECT_FALSE(slow_start_->IsEndOfRound(packet_number++));
+ }
+ EXPECT_TRUE(slow_start_->IsEndOfRound(packet_number++));
+}
+
+TEST_F(HybridSlowStartTest, Delay) {
+ // We expect to detect the increase at +1/8 of the RTT; hence at a typical
+ // RTT of 60ms the detection will happen at 67.5 ms.
+ const int kHybridStartMinSamples = 8; // Number of acks required to trigger.
+
+ QuicPacketNumber end_packet_number(1);
+ slow_start_->StartReceiveRound(end_packet_number++);
+
+ // Will not trigger since our lowest RTT in our burst is the same as the long
+ // term RTT provided.
+ for (int n = 0; n < kHybridStartMinSamples; ++n) {
+ EXPECT_FALSE(slow_start_->ShouldExitSlowStart(
+ rtt_ + QuicTime::Delta::FromMilliseconds(n), rtt_, 100));
+ }
+ slow_start_->StartReceiveRound(end_packet_number++);
+ for (int n = 1; n < kHybridStartMinSamples; ++n) {
+ EXPECT_FALSE(slow_start_->ShouldExitSlowStart(
+ rtt_ + QuicTime::Delta::FromMilliseconds(n + 10), rtt_, 100));
+ }
+ // Expect to trigger since all packets in this burst was above the long term
+ // RTT provided.
+ EXPECT_TRUE(slow_start_->ShouldExitSlowStart(
+ rtt_ + QuicTime::Delta::FromMilliseconds(10), rtt_, 100));
+}
+
+} // namespace test
+} // namespace quic
diff --git a/quic/core/congestion_control/loss_detection_interface.h b/quic/core/congestion_control/loss_detection_interface.h
new file mode 100644
index 0000000..7439d3f
--- /dev/null
+++ b/quic/core/congestion_control/loss_detection_interface.h
@@ -0,0 +1,49 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// The pure virtual class for send side loss detection algorithm.
+
+#ifndef QUICHE_QUIC_CORE_CONGESTION_CONTROL_LOSS_DETECTION_INTERFACE_H_
+#define QUICHE_QUIC_CORE_CONGESTION_CONTROL_LOSS_DETECTION_INTERFACE_H_
+
+#include "net/third_party/quiche/src/quic/core/congestion_control/send_algorithm_interface.h"
+#include "net/third_party/quiche/src/quic/core/quic_packets.h"
+#include "net/third_party/quiche/src/quic/core/quic_time.h"
+#include "net/third_party/quiche/src/quic/platform/api/quic_export.h"
+
+namespace quic {
+
+class QuicUnackedPacketMap;
+class RttStats;
+
+class QUIC_EXPORT_PRIVATE LossDetectionInterface {
+ public:
+ virtual ~LossDetectionInterface() {}
+
+ virtual LossDetectionType GetLossDetectionType() const = 0;
+
+ // Called when a new ack arrives or the loss alarm fires.
+ virtual void DetectLosses(const QuicUnackedPacketMap& unacked_packets,
+ QuicTime time,
+ const RttStats& rtt_stats,
+ QuicPacketNumber largest_newly_acked,
+ const AckedPacketVector& packets_acked,
+ LostPacketVector* packets_lost) = 0;
+
+ // Get the time the LossDetectionAlgorithm wants to re-evaluate losses.
+ // Returns QuicTime::Zero if no alarm needs to be set.
+ virtual QuicTime GetLossTimeout() const = 0;
+
+ // Called when a |spurious_retransmission| is detected. The original
+ // transmission must have been caused by DetectLosses.
+ virtual void SpuriousRetransmitDetected(
+ const QuicUnackedPacketMap& unacked_packets,
+ QuicTime time,
+ const RttStats& rtt_stats,
+ QuicPacketNumber spurious_retransmission) = 0;
+};
+
+} // namespace quic
+
+#endif // QUICHE_QUIC_CORE_CONGESTION_CONTROL_LOSS_DETECTION_INTERFACE_H_
diff --git a/quic/core/congestion_control/pacing_sender.cc b/quic/core/congestion_control/pacing_sender.cc
new file mode 100644
index 0000000..348f410
--- /dev/null
+++ b/quic/core/congestion_control/pacing_sender.cc
@@ -0,0 +1,151 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/third_party/quiche/src/quic/core/congestion_control/pacing_sender.h"
+
+#include "net/third_party/quiche/src/quic/platform/api/quic_flag_utils.h"
+#include "net/third_party/quiche/src/quic/platform/api/quic_logging.h"
+
+namespace quic {
+namespace {
+
+// Configured maximum size of the burst coming out of quiescence. The burst
+// is never larger than the current CWND in packets.
+static const uint32_t kInitialUnpacedBurst = 10;
+
+} // namespace
+
+PacingSender::PacingSender()
+ : sender_(nullptr),
+ max_pacing_rate_(QuicBandwidth::Zero()),
+ burst_tokens_(kInitialUnpacedBurst),
+ ideal_next_packet_send_time_(QuicTime::Zero()),
+ initial_burst_size_(kInitialUnpacedBurst),
+ lumpy_tokens_(0),
+ alarm_granularity_(QuicTime::Delta::FromMilliseconds(1)),
+ pacing_limited_(false) {
+ if (GetQuicReloadableFlag(quic_donot_reset_ideal_next_packet_send_time)) {
+ QUIC_RELOADABLE_FLAG_COUNT(quic_donot_reset_ideal_next_packet_send_time);
+ }
+}
+
+PacingSender::~PacingSender() {}
+
+void PacingSender::set_sender(SendAlgorithmInterface* sender) {
+ DCHECK(sender != nullptr);
+ sender_ = sender;
+}
+
+void PacingSender::OnCongestionEvent(bool rtt_updated,
+ QuicByteCount bytes_in_flight,
+ QuicTime event_time,
+ const AckedPacketVector& acked_packets,
+ const LostPacketVector& lost_packets) {
+ DCHECK(sender_ != nullptr);
+ if (!lost_packets.empty()) {
+ // Clear any burst tokens when entering recovery.
+ burst_tokens_ = 0;
+ }
+ sender_->OnCongestionEvent(rtt_updated, bytes_in_flight, event_time,
+ acked_packets, lost_packets);
+}
+
+void PacingSender::OnPacketSent(
+ QuicTime sent_time,
+ QuicByteCount bytes_in_flight,
+ QuicPacketNumber packet_number,
+ QuicByteCount bytes,
+ HasRetransmittableData has_retransmittable_data) {
+ DCHECK(sender_ != nullptr);
+ sender_->OnPacketSent(sent_time, bytes_in_flight, packet_number, bytes,
+ has_retransmittable_data);
+ if (has_retransmittable_data != HAS_RETRANSMITTABLE_DATA) {
+ return;
+ }
+ // If in recovery, the connection is not coming out of quiescence.
+ if (bytes_in_flight == 0 && !sender_->InRecovery()) {
+ // Add more burst tokens anytime the connection is leaving quiescence, but
+ // limit it to the equivalent of a single bulk write, not exceeding the
+ // current CWND in packets.
+ burst_tokens_ = std::min(
+ initial_burst_size_,
+ static_cast<uint32_t>(sender_->GetCongestionWindow() / kDefaultTCPMSS));
+ }
+ if (burst_tokens_ > 0) {
+ --burst_tokens_;
+ if (!GetQuicReloadableFlag(quic_donot_reset_ideal_next_packet_send_time)) {
+ ideal_next_packet_send_time_ = QuicTime::Zero();
+ }
+ pacing_limited_ = false;
+ return;
+ }
+ // The next packet should be sent as soon as the current packet has been
+ // transferred. PacingRate is based on bytes in flight including this packet.
+ QuicTime::Delta delay =
+ PacingRate(bytes_in_flight + bytes).TransferTime(bytes);
+ if (!pacing_limited_ || lumpy_tokens_ == 0) {
+ // Reset lumpy_tokens_ if either application or cwnd throttles sending or
+ // token runs out.
+ lumpy_tokens_ = std::max(
+ 1u, std::min(static_cast<uint32_t>(
+ GetQuicFlag(FLAGS_quic_lumpy_pacing_size)),
+ static_cast<uint32_t>(
+ (sender_->GetCongestionWindow() *
+ GetQuicFlag(FLAGS_quic_lumpy_pacing_cwnd_fraction)) /
+ kDefaultTCPMSS)));
+ }
+ --lumpy_tokens_;
+ if (pacing_limited_) {
+ // Make up for lost time since pacing throttles the sending.
+ ideal_next_packet_send_time_ = ideal_next_packet_send_time_ + delay;
+ } else {
+ ideal_next_packet_send_time_ =
+ std::max(ideal_next_packet_send_time_ + delay, sent_time + delay);
+ }
+ // Stop making up for lost time if underlying sender prevents sending.
+ pacing_limited_ = sender_->CanSend(bytes_in_flight + bytes);
+}
+
+void PacingSender::OnApplicationLimited() {
+ // The send is application limited, stop making up for lost time.
+ pacing_limited_ = false;
+}
+
+QuicTime::Delta PacingSender::TimeUntilSend(
+ QuicTime now,
+ QuicByteCount bytes_in_flight) const {
+ DCHECK(sender_ != nullptr);
+
+ if (!sender_->CanSend(bytes_in_flight)) {
+ // The underlying sender prevents sending.
+ return QuicTime::Delta::Infinite();
+ }
+
+ if (burst_tokens_ > 0 || bytes_in_flight == 0 || lumpy_tokens_ > 0) {
+ // Don't pace if we have burst tokens available or leaving quiescence.
+ return QuicTime::Delta::Zero();
+ }
+
+ // If the next send time is within the alarm granularity, send immediately.
+ if (ideal_next_packet_send_time_ > now + alarm_granularity_) {
+ QUIC_DVLOG(1) << "Delaying packet: "
+ << (ideal_next_packet_send_time_ - now).ToMicroseconds();
+ return ideal_next_packet_send_time_ - now;
+ }
+
+ QUIC_DVLOG(1) << "Sending packet now";
+ return QuicTime::Delta::Zero();
+}
+
+QuicBandwidth PacingSender::PacingRate(QuicByteCount bytes_in_flight) const {
+ DCHECK(sender_ != nullptr);
+ if (!max_pacing_rate_.IsZero()) {
+ return QuicBandwidth::FromBitsPerSecond(
+ std::min(max_pacing_rate_.ToBitsPerSecond(),
+ sender_->PacingRate(bytes_in_flight).ToBitsPerSecond()));
+ }
+ return sender_->PacingRate(bytes_in_flight);
+}
+
+} // namespace quic
diff --git a/quic/core/congestion_control/pacing_sender.h b/quic/core/congestion_control/pacing_sender.h
new file mode 100644
index 0000000..983bbf6
--- /dev/null
+++ b/quic/core/congestion_control/pacing_sender.h
@@ -0,0 +1,108 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// A send algorithm that adds pacing on top of an another send algorithm.
+// It uses the underlying sender's pacing rate to schedule packets.
+// It also takes into consideration the expected granularity of the underlying
+// alarm to ensure that alarms are not set too aggressively, and err towards
+// sending packets too early instead of too late.
+
+#ifndef QUICHE_QUIC_CORE_CONGESTION_CONTROL_PACING_SENDER_H_
+#define QUICHE_QUIC_CORE_CONGESTION_CONTROL_PACING_SENDER_H_
+
+#include <cstdint>
+#include <map>
+#include <memory>
+
+#include "base/macros.h"
+#include "net/third_party/quiche/src/quic/core/congestion_control/send_algorithm_interface.h"
+#include "net/third_party/quiche/src/quic/core/quic_bandwidth.h"
+#include "net/third_party/quiche/src/quic/core/quic_config.h"
+#include "net/third_party/quiche/src/quic/core/quic_packets.h"
+#include "net/third_party/quiche/src/quic/core/quic_time.h"
+#include "net/third_party/quiche/src/quic/platform/api/quic_export.h"
+
+namespace quic {
+
+namespace test {
+class QuicSentPacketManagerPeer;
+} // namespace test
+
+class QUIC_EXPORT_PRIVATE PacingSender {
+ public:
+ PacingSender();
+ PacingSender(const PacingSender&) = delete;
+ PacingSender& operator=(const PacingSender&) = delete;
+ ~PacingSender();
+
+ // Sets the underlying sender. Does not take ownership of |sender|. |sender|
+ // must not be null. This must be called before any of the
+ // SendAlgorithmInterface wrapper methods are called.
+ void set_sender(SendAlgorithmInterface* sender);
+
+ void set_max_pacing_rate(QuicBandwidth max_pacing_rate) {
+ max_pacing_rate_ = max_pacing_rate;
+ }
+
+ void set_alarm_granularity(QuicTime::Delta alarm_granularity) {
+ alarm_granularity_ = alarm_granularity;
+ }
+
+ QuicBandwidth max_pacing_rate() const { return max_pacing_rate_; }
+
+ void OnCongestionEvent(bool rtt_updated,
+ QuicByteCount bytes_in_flight,
+ QuicTime event_time,
+ const AckedPacketVector& acked_packets,
+ const LostPacketVector& lost_packets);
+
+ void OnPacketSent(QuicTime sent_time,
+ QuicByteCount bytes_in_flight,
+ QuicPacketNumber packet_number,
+ QuicByteCount bytes,
+ HasRetransmittableData has_retransmittable_data);
+
+ // Called when application throttles the sending, so that pacing sender stops
+ // making up for lost time.
+ void OnApplicationLimited();
+
+ QuicTime::Delta TimeUntilSend(QuicTime now,
+ QuicByteCount bytes_in_flight) const;
+
+ QuicBandwidth PacingRate(QuicByteCount bytes_in_flight) const;
+
+ QuicTime ideal_next_packet_send_time() const {
+ return ideal_next_packet_send_time_;
+ }
+
+ private:
+ friend class test::QuicSentPacketManagerPeer;
+
+ // Underlying sender. Not owned.
+ SendAlgorithmInterface* sender_;
+ // If not QuicBandidth::Zero, the maximum rate the PacingSender will use.
+ QuicBandwidth max_pacing_rate_;
+
+ // Number of unpaced packets to be sent before packets are delayed.
+ uint32_t burst_tokens_;
+ QuicTime ideal_next_packet_send_time_; // When can the next packet be sent.
+ uint32_t initial_burst_size_;
+
+ // Number of unpaced packets to be sent before packets are delayed. This token
+ // is consumed after burst_tokens_ ran out.
+ uint32_t lumpy_tokens_;
+
+ // If the next send time is within alarm_granularity_, send immediately.
+ // TODO(fayang): Remove alarm_granularity_ when deprecating
+ // quic_offload_pacing_to_usps2 flag.
+ QuicTime::Delta alarm_granularity_;
+
+ // Indicates whether pacing throttles the sending. If true, make up for lost
+ // time.
+ bool pacing_limited_;
+};
+
+} // namespace quic
+
+#endif // QUICHE_QUIC_CORE_CONGESTION_CONTROL_PACING_SENDER_H_
diff --git a/quic/core/congestion_control/pacing_sender_test.cc b/quic/core/congestion_control/pacing_sender_test.cc
new file mode 100644
index 0000000..0bb9258
--- /dev/null
+++ b/quic/core/congestion_control/pacing_sender_test.cc
@@ -0,0 +1,432 @@
+// Copyright (c) 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/third_party/quiche/src/quic/core/congestion_control/pacing_sender.h"
+
+#include <memory>
+
+#include "net/third_party/quiche/src/quic/core/quic_packets.h"
+#include "net/third_party/quiche/src/quic/platform/api/quic_flag_utils.h"
+#include "net/third_party/quiche/src/quic/platform/api/quic_flags.h"
+#include "net/third_party/quiche/src/quic/platform/api/quic_logging.h"
+#include "net/third_party/quiche/src/quic/platform/api/quic_ptr_util.h"
+#include "net/third_party/quiche/src/quic/platform/api/quic_test.h"
+#include "net/third_party/quiche/src/quic/test_tools/mock_clock.h"
+#include "net/third_party/quiche/src/quic/test_tools/quic_test_utils.h"
+
+using testing::_;
+using testing::AtMost;
+using testing::IsEmpty;
+using testing::Return;
+using testing::StrictMock;
+
+namespace quic {
+namespace test {
+
+const QuicByteCount kBytesInFlight = 1024;
+const int kInitialBurstPackets = 10;
+
+class PacingSenderTest : public QuicTest {
+ protected:
+ PacingSenderTest()
+ : zero_time_(QuicTime::Delta::Zero()),
+ infinite_time_(QuicTime::Delta::Infinite()),
+ packet_number_(1),
+ mock_sender_(new StrictMock<MockSendAlgorithm>()),
+ pacing_sender_(new PacingSender) {
+ pacing_sender_->set_sender(mock_sender_.get());
+ // Pick arbitrary time.
+ clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(9));
+ }
+
+ ~PacingSenderTest() override {}
+
+ void InitPacingRate(QuicPacketCount burst_size, QuicBandwidth bandwidth) {
+ mock_sender_ = QuicMakeUnique<StrictMock<MockSendAlgorithm>>();
+ pacing_sender_ = QuicMakeUnique<PacingSender>();
+ pacing_sender_->set_sender(mock_sender_.get());
+ EXPECT_CALL(*mock_sender_, PacingRate(_)).WillRepeatedly(Return(bandwidth));
+ if (burst_size == 0) {
+ EXPECT_CALL(*mock_sender_, OnCongestionEvent(_, _, _, _, _));
+ LostPacketVector lost_packets;
+ lost_packets.push_back(LostPacket(QuicPacketNumber(1), kMaxPacketSize));
+ AckedPacketVector empty;
+ pacing_sender_->OnCongestionEvent(true, 1234, clock_.Now(), empty,
+ lost_packets);
+ } else if (burst_size != kInitialBurstPackets) {
+ QUIC_LOG(FATAL) << "Unsupported burst_size " << burst_size
+ << " specificied, only 0 and " << kInitialBurstPackets
+ << " are supported.";
+ }
+ }
+
+ void CheckPacketIsSentImmediately(HasRetransmittableData retransmittable_data,
+ QuicByteCount bytes_in_flight,
+ bool in_recovery,
+ bool cwnd_limited,
+ QuicPacketCount cwnd) {
+ // In order for the packet to be sendable, the underlying sender must
+ // permit it to be sent immediately.
+ for (int i = 0; i < 2; ++i) {
+ EXPECT_CALL(*mock_sender_, CanSend(bytes_in_flight))
+ .WillOnce(Return(true));
+ // Verify that the packet can be sent immediately.
+ EXPECT_EQ(zero_time_,
+ pacing_sender_->TimeUntilSend(clock_.Now(), bytes_in_flight));
+ }
+
+ // Actually send the packet.
+ if (bytes_in_flight == 0) {
+ EXPECT_CALL(*mock_sender_, InRecovery()).WillOnce(Return(in_recovery));
+ }
+ EXPECT_CALL(*mock_sender_,
+ OnPacketSent(clock_.Now(), bytes_in_flight, packet_number_,
+ kMaxPacketSize, retransmittable_data));
+ EXPECT_CALL(*mock_sender_, GetCongestionWindow())
+ .Times(AtMost(1))
+ .WillRepeatedly(Return(cwnd * kDefaultTCPMSS));
+ EXPECT_CALL(*mock_sender_, CanSend(bytes_in_flight + kMaxPacketSize))
+ .Times(AtMost(1))
+ .WillRepeatedly(Return(!cwnd_limited));
+ pacing_sender_->OnPacketSent(clock_.Now(), bytes_in_flight,
+ packet_number_++, kMaxPacketSize,
+ retransmittable_data);
+ }
+
+ void CheckPacketIsSentImmediately() {
+ CheckPacketIsSentImmediately(HAS_RETRANSMITTABLE_DATA, kBytesInFlight,
+ false, false, 10);
+ }
+
+ void CheckPacketIsDelayed(QuicTime::Delta delay) {
+ // In order for the packet to be sendable, the underlying sender must
+ // permit it to be sent immediately.
+ for (int i = 0; i < 2; ++i) {
+ EXPECT_CALL(*mock_sender_, CanSend(kBytesInFlight))
+ .WillOnce(Return(true));
+ // Verify that the packet is delayed.
+ EXPECT_EQ(delay.ToMicroseconds(),
+ pacing_sender_->TimeUntilSend(clock_.Now(), kBytesInFlight)
+ .ToMicroseconds());
+ }
+ }
+
+ void UpdateRtt() {
+ EXPECT_CALL(*mock_sender_,
+ OnCongestionEvent(true, kBytesInFlight, _, _, _));
+ AckedPacketVector empty_acked;
+ LostPacketVector empty_lost;
+ pacing_sender_->OnCongestionEvent(true, kBytesInFlight, clock_.Now(),
+ empty_acked, empty_lost);
+ }
+
+ void OnApplicationLimited() { pacing_sender_->OnApplicationLimited(); }
+
+ const QuicTime::Delta zero_time_;
+ const QuicTime::Delta infinite_time_;
+ MockClock clock_;
+ QuicPacketNumber packet_number_;
+ std::unique_ptr<StrictMock<MockSendAlgorithm>> mock_sender_;
+ std::unique_ptr<PacingSender> pacing_sender_;
+};
+
+TEST_F(PacingSenderTest, NoSend) {
+ for (int i = 0; i < 2; ++i) {
+ EXPECT_CALL(*mock_sender_, CanSend(kBytesInFlight)).WillOnce(Return(false));
+ EXPECT_EQ(infinite_time_,
+ pacing_sender_->TimeUntilSend(clock_.Now(), kBytesInFlight));
+ }
+}
+
+TEST_F(PacingSenderTest, SendNow) {
+ for (int i = 0; i < 2; ++i) {
+ EXPECT_CALL(*mock_sender_, CanSend(kBytesInFlight)).WillOnce(Return(true));
+ EXPECT_EQ(zero_time_,
+ pacing_sender_->TimeUntilSend(clock_.Now(), kBytesInFlight));
+ }
+}
+
+TEST_F(PacingSenderTest, VariousSending) {
+ // Configure pacing rate of 1 packet per 1 ms, no initial burst.
+ InitPacingRate(0, QuicBandwidth::FromBytesAndTimeDelta(
+ kMaxPacketSize, QuicTime::Delta::FromMilliseconds(1)));
+
+ // Now update the RTT and verify that packets are actually paced.
+ UpdateRtt();
+
+ CheckPacketIsSentImmediately();
+ CheckPacketIsSentImmediately();
+
+ // The first packet was a "make up", then we sent two packets "into the
+ // future", so the delay should be 2.
+ CheckPacketIsDelayed(QuicTime::Delta::FromMilliseconds(2));
+
+ // Wake up on time.
+ clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(2));
+ CheckPacketIsSentImmediately();
+ CheckPacketIsSentImmediately();
+ CheckPacketIsDelayed(QuicTime::Delta::FromMilliseconds(2));
+
+ // Wake up late.
+ clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(4));
+ CheckPacketIsSentImmediately();
+ CheckPacketIsSentImmediately();
+ CheckPacketIsSentImmediately();
+ CheckPacketIsSentImmediately();
+ CheckPacketIsDelayed(QuicTime::Delta::FromMilliseconds(2));
+
+ // Wake up really late.
+ clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(8));
+ CheckPacketIsSentImmediately();
+ CheckPacketIsSentImmediately();
+ CheckPacketIsSentImmediately();
+ CheckPacketIsSentImmediately();
+ CheckPacketIsSentImmediately();
+ CheckPacketIsSentImmediately();
+ CheckPacketIsSentImmediately();
+ CheckPacketIsSentImmediately();
+ CheckPacketIsDelayed(QuicTime::Delta::FromMilliseconds(2));
+
+ // Wake up really late again, but application pause partway through.
+ clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(8));
+ CheckPacketIsSentImmediately();
+ CheckPacketIsSentImmediately();
+ OnApplicationLimited();
+ clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(100));
+ CheckPacketIsSentImmediately();
+ CheckPacketIsSentImmediately();
+ CheckPacketIsDelayed(QuicTime::Delta::FromMilliseconds(2));
+ // Wake up early, but after enough time has passed to permit a send.
+ clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(1));
+ CheckPacketIsSentImmediately();
+}
+
+TEST_F(PacingSenderTest, InitialBurst) {
+ // Configure pacing rate of 1 packet per 1 ms.
+ InitPacingRate(10, QuicBandwidth::FromBytesAndTimeDelta(
+ kMaxPacketSize, QuicTime::Delta::FromMilliseconds(1)));
+
+ // Update the RTT and verify that the first 10 packets aren't paced.
+ UpdateRtt();
+
+ // Send 10 packets, and verify that they are not paced.
+ for (int i = 0; i < kInitialBurstPackets; ++i) {
+ CheckPacketIsSentImmediately();
+ }
+
+ // The first packet was a "make up", then we sent two packets "into the
+ // future", so the delay should be 2ms.
+ CheckPacketIsSentImmediately();
+ CheckPacketIsSentImmediately();
+ CheckPacketIsDelayed(QuicTime::Delta::FromMilliseconds(2));
+
+ clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(5));
+ CheckPacketIsSentImmediately();
+
+ // Next time TimeUntilSend is called with no bytes in flight, pacing should
+ // allow a packet to be sent, and when it's sent, the tokens are refilled.
+ CheckPacketIsSentImmediately(HAS_RETRANSMITTABLE_DATA, 0, false, false, 10);
+ for (int i = 0; i < kInitialBurstPackets - 1; ++i) {
+ CheckPacketIsSentImmediately();
+ }
+
+ // The first packet was a "make up", then we sent two packets "into the
+ // future", so the delay should be 2ms.
+ CheckPacketIsSentImmediately();
+ CheckPacketIsSentImmediately();
+ CheckPacketIsDelayed(QuicTime::Delta::FromMilliseconds(2));
+}
+
+TEST_F(PacingSenderTest, InitialBurstNoRttMeasurement) {
+ // Configure pacing rate of 1 packet per 1 ms.
+ InitPacingRate(10, QuicBandwidth::FromBytesAndTimeDelta(
+ kMaxPacketSize, QuicTime::Delta::FromMilliseconds(1)));
+
+ // Send 10 packets, and verify that they are not paced.
+ for (int i = 0; i < kInitialBurstPackets; ++i) {
+ CheckPacketIsSentImmediately();
+ }
+
+ // The first packet was a "make up", then we sent two packets "into the
+ // future", so the delay should be 2ms.
+ CheckPacketIsSentImmediately();
+ CheckPacketIsSentImmediately();
+ CheckPacketIsDelayed(QuicTime::Delta::FromMilliseconds(2));
+
+ clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(5));
+ CheckPacketIsSentImmediately();
+
+ // Next time TimeUntilSend is called with no bytes in flight, the tokens
+ // should be refilled and there should be no delay.
+ CheckPacketIsSentImmediately(HAS_RETRANSMITTABLE_DATA, 0, false, false, 10);
+ // Send 10 packets, and verify that they are not paced.
+ for (int i = 0; i < kInitialBurstPackets - 1; ++i) {
+ CheckPacketIsSentImmediately();
+ }
+
+ // The first packet was a "make up", then we sent two packets "into the
+ // future", so the delay should be 2ms.
+ CheckPacketIsSentImmediately();
+ CheckPacketIsSentImmediately();
+ CheckPacketIsDelayed(QuicTime::Delta::FromMilliseconds(2));
+}
+
+TEST_F(PacingSenderTest, FastSending) {
+ // Ensure the pacing sender paces, even when the inter-packet spacing is less
+ // than the pacing granularity.
+ InitPacingRate(10,
+ QuicBandwidth::FromBytesAndTimeDelta(
+ 2 * kMaxPacketSize, QuicTime::Delta::FromMilliseconds(1)));
+ // Update the RTT and verify that the first 10 packets aren't paced.
+ UpdateRtt();
+
+ // Send 10 packets, and verify that they are not paced.
+ for (int i = 0; i < kInitialBurstPackets; ++i) {
+ CheckPacketIsSentImmediately();
+ }
+
+ // The first packet was a "make up", then we sent two packets "into the
+ // future", since it's 2 packets/ms, so the delay should be 1.5ms.
+ CheckPacketIsSentImmediately();
+ CheckPacketIsSentImmediately();
+ CheckPacketIsSentImmediately();
+ CheckPacketIsDelayed(QuicTime::Delta::FromMicroseconds(1500));
+
+ clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(5));
+ CheckPacketIsSentImmediately();
+
+ // Next time TimeUntilSend is called with no bytes in flight, the tokens
+ // should be refilled and there should be no delay.
+ CheckPacketIsSentImmediately(HAS_RETRANSMITTABLE_DATA, 0, false, false, 10);
+ for (int i = 0; i < kInitialBurstPackets - 1; ++i) {
+ CheckPacketIsSentImmediately();
+ }
+
+ // The first packet was a "make up", then we sent two packets "into the
+ // future", so the delay should be 1.5ms.
+ CheckPacketIsSentImmediately();
+ CheckPacketIsSentImmediately();
+ CheckPacketIsSentImmediately();
+ CheckPacketIsDelayed(QuicTime::Delta::FromMicroseconds(1500));
+}
+
+TEST_F(PacingSenderTest, NoBurstEnteringRecovery) {
+ // Configure pacing rate of 1 packet per 1 ms with no burst tokens.
+ InitPacingRate(0, QuicBandwidth::FromBytesAndTimeDelta(
+ kMaxPacketSize, QuicTime::Delta::FromMilliseconds(1)));
+ // Sending a packet will set burst tokens.
+ CheckPacketIsSentImmediately();
+
+ // Losing a packet will set clear burst tokens.
+ LostPacketVector lost_packets;
+ lost_packets.push_back(LostPacket(QuicPacketNumber(1), kMaxPacketSize));
+ AckedPacketVector empty_acked;
+ EXPECT_CALL(*mock_sender_,
+ OnCongestionEvent(true, kMaxPacketSize, _, IsEmpty(), _));
+ pacing_sender_->OnCongestionEvent(true, kMaxPacketSize, clock_.Now(),
+ empty_acked, lost_packets);
+ // One packet is sent immediately, because of 1ms pacing granularity.
+ CheckPacketIsSentImmediately();
+ // Ensure packets are immediately paced.
+ EXPECT_CALL(*mock_sender_, CanSend(kDefaultTCPMSS)).WillOnce(Return(true));
+ // Verify the next packet is paced and delayed 2ms due to granularity.
+ EXPECT_EQ(QuicTime::Delta::FromMilliseconds(2),
+ pacing_sender_->TimeUntilSend(clock_.Now(), kDefaultTCPMSS));
+ CheckPacketIsDelayed(QuicTime::Delta::FromMilliseconds(2));
+}
+
+TEST_F(PacingSenderTest, NoBurstInRecovery) {
+ // Configure pacing rate of 1 packet per 1 ms with no burst tokens.
+ InitPacingRate(0, QuicBandwidth::FromBytesAndTimeDelta(
+ kMaxPacketSize, QuicTime::Delta::FromMilliseconds(1)));
+
+ UpdateRtt();
+
+ // Ensure only one packet is sent immediately and the rest are paced.
+ CheckPacketIsSentImmediately(HAS_RETRANSMITTABLE_DATA, 0, true, false, 10);
+ CheckPacketIsSentImmediately();
+ CheckPacketIsDelayed(QuicTime::Delta::FromMilliseconds(2));
+}
+
+TEST_F(PacingSenderTest, CwndLimited) {
+ // Configure pacing rate of 1 packet per 1 ms, no initial burst.
+ InitPacingRate(0, QuicBandwidth::FromBytesAndTimeDelta(
+ kMaxPacketSize, QuicTime::Delta::FromMilliseconds(1)));
+
+ UpdateRtt();
+
+ CheckPacketIsSentImmediately();
+ CheckPacketIsSentImmediately();
+ // Packet 3 will be delayed 2ms.
+ CheckPacketIsDelayed(QuicTime::Delta::FromMilliseconds(2));
+
+ // Wake up on time.
+ clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(2));
+ // After sending packet 3, cwnd is limited.
+ CheckPacketIsSentImmediately(HAS_RETRANSMITTABLE_DATA, kBytesInFlight, false,
+ true, 10);
+
+ clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(100));
+ // Verify pacing sender stops making up for lost time after sending packet 3.
+ // Packet 6 will be delayed 2ms.
+ CheckPacketIsSentImmediately();
+ CheckPacketIsSentImmediately();
+ CheckPacketIsDelayed(QuicTime::Delta::FromMilliseconds(2));
+}
+
+TEST_F(PacingSenderTest, LumpyPacingWithInitialBurstToken) {
+ // Set lumpy size to be 3, and cwnd faction to 0.5
+ SetQuicFlag(&FLAGS_quic_lumpy_pacing_size, 3);
+ SetQuicFlag(&FLAGS_quic_lumpy_pacing_cwnd_fraction, 0.5f);
+ // Configure pacing rate of 1 packet per 1 ms.
+ InitPacingRate(10, QuicBandwidth::FromBytesAndTimeDelta(
+ kMaxPacketSize, QuicTime::Delta::FromMilliseconds(1)));
+ UpdateRtt();
+
+ // Send 10 packets, and verify that they are not paced.
+ for (int i = 0; i < kInitialBurstPackets; ++i) {
+ CheckPacketIsSentImmediately();
+ }
+
+ CheckPacketIsSentImmediately();
+ CheckPacketIsSentImmediately();
+ CheckPacketIsSentImmediately();
+ // Packet 14 will be delayed 3ms.
+ CheckPacketIsDelayed(QuicTime::Delta::FromMilliseconds(3));
+
+ // Wake up on time.
+ clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(3));
+ CheckPacketIsSentImmediately();
+ CheckPacketIsSentImmediately();
+ CheckPacketIsSentImmediately();
+ // Packet 17 will be delayed 3ms.
+ CheckPacketIsDelayed(QuicTime::Delta::FromMilliseconds(3));
+
+ // Application throttles sending.
+ OnApplicationLimited();
+ clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(100));
+ CheckPacketIsSentImmediately();
+ CheckPacketIsSentImmediately();
+ CheckPacketIsSentImmediately();
+ // Packet 20 will be delayed 3ms.
+ CheckPacketIsDelayed(QuicTime::Delta::FromMilliseconds(3));
+
+ // Wake up on time.
+ clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(3));
+ CheckPacketIsSentImmediately();
+ // After sending packet 21, cwnd is limited.
+ CheckPacketIsSentImmediately(HAS_RETRANSMITTABLE_DATA, kBytesInFlight, false,
+ true, 10);
+
+ clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(100));
+ // Suppose cwnd size is 5, so that lumpy size becomes 2.
+ CheckPacketIsSentImmediately(HAS_RETRANSMITTABLE_DATA, kBytesInFlight, false,
+ false, 5);
+ CheckPacketIsSentImmediately();
+ // Packet 24 will be delayed 2ms.
+ CheckPacketIsDelayed(QuicTime::Delta::FromMilliseconds(2));
+}
+
+} // namespace test
+} // namespace quic
diff --git a/quic/core/congestion_control/prr_sender.cc b/quic/core/congestion_control/prr_sender.cc
new file mode 100644
index 0000000..c09b312
--- /dev/null
+++ b/quic/core/congestion_control/prr_sender.cc
@@ -0,0 +1,62 @@
+// Copyright (c) 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/third_party/quiche/src/quic/core/congestion_control/prr_sender.h"
+
+#include "net/third_party/quiche/src/quic/core/quic_packets.h"
+
+namespace quic {
+
+PrrSender::PrrSender()
+ : bytes_sent_since_loss_(0),
+ bytes_delivered_since_loss_(0),
+ ack_count_since_loss_(0),
+ bytes_in_flight_before_loss_(0) {}
+
+void PrrSender::OnPacketSent(QuicByteCount sent_bytes) {
+ bytes_sent_since_loss_ += sent_bytes;
+}
+
+void PrrSender::OnPacketLost(QuicByteCount prior_in_flight) {
+ bytes_sent_since_loss_ = 0;
+ bytes_in_flight_before_loss_ = prior_in_flight;
+ bytes_delivered_since_loss_ = 0;
+ ack_count_since_loss_ = 0;
+}
+
+void PrrSender::OnPacketAcked(QuicByteCount acked_bytes) {
+ bytes_delivered_since_loss_ += acked_bytes;
+ ++ack_count_since_loss_;
+}
+
+bool PrrSender::CanSend(QuicByteCount congestion_window,
+ QuicByteCount bytes_in_flight,
+ QuicByteCount slowstart_threshold) const {
+ // Return QuicTime::Zero in order to ensure limited transmit always works.
+ if (bytes_sent_since_loss_ == 0 || bytes_in_flight < kMaxSegmentSize) {
+ return true;
+ }
+ if (congestion_window > bytes_in_flight) {
+ // During PRR-SSRB, limit outgoing packets to 1 extra MSS per ack, instead
+ // of sending the entire available window. This prevents burst retransmits
+ // when more packets are lost than the CWND reduction.
+ // limit = MAX(prr_delivered - prr_out, DeliveredData) + MSS
+ if (bytes_delivered_since_loss_ + ack_count_since_loss_ * kMaxSegmentSize <=
+ bytes_sent_since_loss_) {
+ return false;
+ }
+ return true;
+ }
+ // Implement Proportional Rate Reduction (RFC6937).
+ // Checks a simplified version of the PRR formula that doesn't use division:
+ // AvailableSendWindow =
+ // CEIL(prr_delivered * ssthresh / BytesInFlightAtLoss) - prr_sent
+ if (bytes_delivered_since_loss_ * slowstart_threshold >
+ bytes_sent_since_loss_ * bytes_in_flight_before_loss_) {
+ return true;
+ }
+ return false;
+}
+
+} // namespace quic
diff --git a/quic/core/congestion_control/prr_sender.h b/quic/core/congestion_control/prr_sender.h
new file mode 100644
index 0000000..2190912
--- /dev/null
+++ b/quic/core/congestion_control/prr_sender.h
@@ -0,0 +1,43 @@
+// Copyright (c) 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// Implements Proportional Rate Reduction (PRR) per RFC 6937.
+
+#ifndef QUICHE_QUIC_CORE_CONGESTION_CONTROL_PRR_SENDER_H_
+#define QUICHE_QUIC_CORE_CONGESTION_CONTROL_PRR_SENDER_H_
+
+#include "net/third_party/quiche/src/quic/core/quic_bandwidth.h"
+#include "net/third_party/quiche/src/quic/core/quic_time.h"
+#include "net/third_party/quiche/src/quic/platform/api/quic_export.h"
+
+namespace quic {
+
+class QUIC_EXPORT_PRIVATE PrrSender {
+ public:
+ PrrSender();
+ // OnPacketLost should be called on the first loss that triggers a recovery
+ // period and all other methods in this class should only be called when in
+ // recovery.
+ void OnPacketLost(QuicByteCount prior_in_flight);
+ void OnPacketSent(QuicByteCount sent_bytes);
+ void OnPacketAcked(QuicByteCount acked_bytes);
+ bool CanSend(QuicByteCount congestion_window,
+ QuicByteCount bytes_in_flight,
+ QuicByteCount slowstart_threshold) const;
+
+ private:
+ // Bytes sent and acked since the last loss event.
+ // |bytes_sent_since_loss_| is the same as "prr_out_" in RFC 6937,
+ // and |bytes_delivered_since_loss_| is the same as "prr_delivered_".
+ QuicByteCount bytes_sent_since_loss_;
+ QuicByteCount bytes_delivered_since_loss_;
+ size_t ack_count_since_loss_;
+
+ // The congestion window before the last loss event.
+ QuicByteCount bytes_in_flight_before_loss_;
+};
+
+} // namespace quic
+
+#endif // QUICHE_QUIC_CORE_CONGESTION_CONTROL_PRR_SENDER_H_
diff --git a/quic/core/congestion_control/prr_sender_test.cc b/quic/core/congestion_control/prr_sender_test.cc
new file mode 100644
index 0000000..4591065
--- /dev/null
+++ b/quic/core/congestion_control/prr_sender_test.cc
@@ -0,0 +1,123 @@
+// Copyright (c) 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/third_party/quiche/src/quic/core/congestion_control/prr_sender.h"
+
+#include <algorithm>
+
+#include "net/third_party/quiche/src/quic/core/crypto/crypto_protocol.h"
+#include "net/third_party/quiche/src/quic/core/quic_constants.h"
+#include "net/third_party/quiche/src/quic/platform/api/quic_test.h"
+
+namespace quic {
+namespace test {
+
+namespace {
+// Constant based on TCP defaults.
+const QuicByteCount kMaxSegmentSize = kDefaultTCPMSS;
+} // namespace
+
+class PrrSenderTest : public QuicTest {};
+
+TEST_F(PrrSenderTest, SingleLossResultsInSendOnEveryOtherAck) {
+ PrrSender prr;
+ QuicPacketCount num_packets_in_flight = 50;
+ QuicByteCount bytes_in_flight = num_packets_in_flight * kMaxSegmentSize;
+ const QuicPacketCount ssthresh_after_loss = num_packets_in_flight / 2;
+ const QuicByteCount congestion_window = ssthresh_after_loss * kMaxSegmentSize;
+
+ prr.OnPacketLost(bytes_in_flight);
+ // Ack a packet. PRR allows one packet to leave immediately.
+ prr.OnPacketAcked(kMaxSegmentSize);
+ bytes_in_flight -= kMaxSegmentSize;
+ EXPECT_TRUE(prr.CanSend(congestion_window, bytes_in_flight,
+ ssthresh_after_loss * kMaxSegmentSize));
+ // Send retransmission.
+ prr.OnPacketSent(kMaxSegmentSize);
+ // PRR shouldn't allow sending any more packets.
+ EXPECT_FALSE(prr.CanSend(congestion_window, bytes_in_flight,
+ ssthresh_after_loss * kMaxSegmentSize));
+
+ // One packet is lost, and one ack was consumed above. PRR now paces
+ // transmissions through the remaining 48 acks. PRR will alternatively
+ // disallow and allow a packet to be sent in response to an ack.
+ for (uint64_t i = 0; i < ssthresh_after_loss - 1; ++i) {
+ // Ack a packet. PRR shouldn't allow sending a packet in response.
+ prr.OnPacketAcked(kMaxSegmentSize);
+ bytes_in_flight -= kMaxSegmentSize;
+ EXPECT_FALSE(prr.CanSend(congestion_window, bytes_in_flight,
+ ssthresh_after_loss * kMaxSegmentSize));
+ // Ack another packet. PRR should now allow sending a packet in response.
+ prr.OnPacketAcked(kMaxSegmentSize);
+ bytes_in_flight -= kMaxSegmentSize;
+ EXPECT_TRUE(prr.CanSend(congestion_window, bytes_in_flight,
+ ssthresh_after_loss * kMaxSegmentSize));
+ // Send a packet in response.
+ prr.OnPacketSent(kMaxSegmentSize);
+ bytes_in_flight += kMaxSegmentSize;
+ }
+
+ // Since bytes_in_flight is now equal to congestion_window, PRR now maintains
+ // packet conservation, allowing one packet to be sent in response to an ack.
+ EXPECT_EQ(congestion_window, bytes_in_flight);
+ for (int i = 0; i < 10; ++i) {
+ // Ack a packet.
+ prr.OnPacketAcked(kMaxSegmentSize);
+ bytes_in_flight -= kMaxSegmentSize;
+ EXPECT_TRUE(prr.CanSend(congestion_window, bytes_in_flight,
+ ssthresh_after_loss * kMaxSegmentSize));
+ // Send a packet in response, since PRR allows it.
+ prr.OnPacketSent(kMaxSegmentSize);
+ bytes_in_flight += kMaxSegmentSize;
+
+ // Since bytes_in_flight is equal to the congestion_window,
+ // PRR disallows sending.
+ EXPECT_EQ(congestion_window, bytes_in_flight);
+ EXPECT_FALSE(prr.CanSend(congestion_window, bytes_in_flight,
+ ssthresh_after_loss * kMaxSegmentSize));
+ }
+}
+
+TEST_F(PrrSenderTest, BurstLossResultsInSlowStart) {
+ PrrSender prr;
+ QuicByteCount bytes_in_flight = 20 * kMaxSegmentSize;
+ const QuicPacketCount num_packets_lost = 13;
+ const QuicPacketCount ssthresh_after_loss = 10;
+ const QuicByteCount congestion_window = ssthresh_after_loss * kMaxSegmentSize;
+
+ // Lose 13 packets.
+ bytes_in_flight -= num_packets_lost * kMaxSegmentSize;
+ prr.OnPacketLost(bytes_in_flight);
+
+ // PRR-SSRB will allow the following 3 acks to send up to 2 packets.
+ for (int i = 0; i < 3; ++i) {
+ prr.OnPacketAcked(kMaxSegmentSize);
+ bytes_in_flight -= kMaxSegmentSize;
+ // PRR-SSRB should allow two packets to be sent.
+ for (int j = 0; j < 2; ++j) {
+ EXPECT_TRUE(prr.CanSend(congestion_window, bytes_in_flight,
+ ssthresh_after_loss * kMaxSegmentSize));
+ // Send a packet in response.
+ prr.OnPacketSent(kMaxSegmentSize);
+ bytes_in_flight += kMaxSegmentSize;
+ }
+ // PRR should allow no more than 2 packets in response to an ack.
+ EXPECT_FALSE(prr.CanSend(congestion_window, bytes_in_flight,
+ ssthresh_after_loss * kMaxSegmentSize));
+ }
+
+ // Out of SSRB mode, PRR allows one send in response to each ack.
+ for (int i = 0; i < 10; ++i) {
+ prr.OnPacketAcked(kMaxSegmentSize);
+ bytes_in_flight -= kMaxSegmentSize;
+ EXPECT_TRUE(prr.CanSend(congestion_window, bytes_in_flight,
+ ssthresh_after_loss * kMaxSegmentSize));
+ // Send a packet in response.
+ prr.OnPacketSent(kMaxSegmentSize);
+ bytes_in_flight += kMaxSegmentSize;
+ }
+}
+
+} // namespace test
+} // namespace quic
diff --git a/quic/core/congestion_control/rtt_stats.cc b/quic/core/congestion_control/rtt_stats.cc
new file mode 100644
index 0000000..e1a6372
--- /dev/null
+++ b/quic/core/congestion_control/rtt_stats.cc
@@ -0,0 +1,103 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/third_party/quiche/src/quic/core/congestion_control/rtt_stats.h"
+
+#include <cstdlib> // std::abs
+
+#include "net/third_party/quiche/src/quic/platform/api/quic_flag_utils.h"
+#include "net/third_party/quiche/src/quic/platform/api/quic_flags.h"
+#include "net/third_party/quiche/src/quic/platform/api/quic_logging.h"
+
+namespace quic {
+
+namespace {
+
+// Default initial rtt used before any samples are received.
+const int kInitialRttMs = 100;
+const float kAlpha = 0.125f;
+const float kOneMinusAlpha = (1 - kAlpha);
+const float kBeta = 0.25f;
+const float kOneMinusBeta = (1 - kBeta);
+
+} // namespace
+
+RttStats::RttStats()
+ : latest_rtt_(QuicTime::Delta::Zero()),
+ min_rtt_(QuicTime::Delta::Zero()),
+ smoothed_rtt_(QuicTime::Delta::Zero()),
+ previous_srtt_(QuicTime::Delta::Zero()),
+ mean_deviation_(QuicTime::Delta::Zero()),
+ initial_rtt_(QuicTime::Delta::FromMilliseconds(kInitialRttMs)),
+ max_ack_delay_(QuicTime::Delta::Zero()),
+ ignore_max_ack_delay_(false) {}
+
+void RttStats::ExpireSmoothedMetrics() {
+ mean_deviation_ = std::max(
+ mean_deviation_, QuicTime::Delta::FromMicroseconds(std::abs(
+ (smoothed_rtt_ - latest_rtt_).ToMicroseconds())));
+ smoothed_rtt_ = std::max(smoothed_rtt_, latest_rtt_);
+}
+
+// Updates the RTT based on a new sample.
+void RttStats::UpdateRtt(QuicTime::Delta send_delta,
+ QuicTime::Delta ack_delay,
+ QuicTime now) {
+ if (send_delta.IsInfinite() || send_delta <= QuicTime::Delta::Zero()) {
+ QUIC_LOG_FIRST_N(WARNING, 3)
+ << "Ignoring measured send_delta, because it's is "
+ << "either infinite, zero, or negative. send_delta = "
+ << send_delta.ToMicroseconds();
+ return;
+ }
+
+ // Update min_rtt_ first. min_rtt_ does not use an rtt_sample corrected for
+ // ack_delay but the raw observed send_delta, since poor clock granularity at
+ // the client may cause a high ack_delay to result in underestimation of the
+ // min_rtt_.
+ if (min_rtt_.IsZero() || min_rtt_ > send_delta) {
+ min_rtt_ = send_delta;
+ }
+
+ QuicTime::Delta rtt_sample(send_delta);
+ previous_srtt_ = smoothed_rtt_;
+
+ if (ignore_max_ack_delay_) {
+ ack_delay = QuicTime::Delta::Zero();
+ }
+ // Correct for ack_delay if information received from the peer results in a
+ // an RTT sample at least as large as min_rtt. Otherwise, only use the
+ // send_delta.
+ if (rtt_sample > ack_delay) {
+ if (rtt_sample - min_rtt_ >= ack_delay) {
+ max_ack_delay_ = std::max(max_ack_delay_, ack_delay);
+ rtt_sample = rtt_sample - ack_delay;
+ }
+ }
+ latest_rtt_ = rtt_sample;
+ // First time call.
+ if (smoothed_rtt_.IsZero()) {
+ smoothed_rtt_ = rtt_sample;
+ mean_deviation_ =
+ QuicTime::Delta::FromMicroseconds(rtt_sample.ToMicroseconds() / 2);
+ } else {
+ mean_deviation_ = QuicTime::Delta::FromMicroseconds(static_cast<int64_t>(
+ kOneMinusBeta * mean_deviation_.ToMicroseconds() +
+ kBeta * std::abs((smoothed_rtt_ - rtt_sample).ToMicroseconds())));
+ smoothed_rtt_ = kOneMinusAlpha * smoothed_rtt_ + kAlpha * rtt_sample;
+ QUIC_DVLOG(1) << " smoothed_rtt(us):" << smoothed_rtt_.ToMicroseconds()
+ << " mean_deviation(us):" << mean_deviation_.ToMicroseconds();
+ }
+}
+
+void RttStats::OnConnectionMigration() {
+ latest_rtt_ = QuicTime::Delta::Zero();
+ min_rtt_ = QuicTime::Delta::Zero();
+ smoothed_rtt_ = QuicTime::Delta::Zero();
+ mean_deviation_ = QuicTime::Delta::Zero();
+ initial_rtt_ = QuicTime::Delta::FromMilliseconds(kInitialRttMs);
+ max_ack_delay_ = QuicTime::Delta::Zero();
+}
+
+} // namespace quic
diff --git a/quic/core/congestion_control/rtt_stats.h b/quic/core/congestion_control/rtt_stats.h
new file mode 100644
index 0000000..5641f13
--- /dev/null
+++ b/quic/core/congestion_control/rtt_stats.h
@@ -0,0 +1,110 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// A convenience class to store rtt samples and calculate smoothed rtt.
+
+#ifndef QUICHE_QUIC_CORE_CONGESTION_CONTROL_RTT_STATS_H_
+#define QUICHE_QUIC_CORE_CONGESTION_CONTROL_RTT_STATS_H_
+
+#include <algorithm>
+#include <cstdint>
+
+#include "base/macros.h"
+#include "net/third_party/quiche/src/quic/core/quic_packets.h"
+#include "net/third_party/quiche/src/quic/core/quic_time.h"
+#include "net/third_party/quiche/src/quic/platform/api/quic_bug_tracker.h"
+#include "net/third_party/quiche/src/quic/platform/api/quic_export.h"
+
+namespace quic {
+
+namespace test {
+class RttStatsPeer;
+} // namespace test
+
+class QUIC_EXPORT_PRIVATE RttStats {
+ public:
+ RttStats();
+ RttStats(const RttStats&) = delete;
+ RttStats& operator=(const RttStats&) = delete;
+
+ // Updates the RTT from an incoming ack which is received |send_delta| after
+ // the packet is sent and the peer reports the ack being delayed |ack_delay|.
+ void UpdateRtt(QuicTime::Delta send_delta,
+ QuicTime::Delta ack_delay,
+ QuicTime now);
+
+ // Causes the smoothed_rtt to be increased to the latest_rtt if the latest_rtt
+ // is larger. The mean deviation is increased to the most recent deviation if
+ // it's larger.
+ void ExpireSmoothedMetrics();
+
+ // Called when connection migrates and rtt measurement needs to be reset.
+ void OnConnectionMigration();
+
+ // Returns the EWMA smoothed RTT for the connection.
+ // May return Zero if no valid updates have occurred.
+ QuicTime::Delta smoothed_rtt() const { return smoothed_rtt_; }
+
+ // Returns the EWMA smoothed RTT prior to the most recent RTT sample.
+ QuicTime::Delta previous_srtt() const { return previous_srtt_; }
+
+ QuicTime::Delta initial_rtt() const { return initial_rtt_; }
+
+ QuicTime::Delta SmoothedOrInitialRtt() const {
+ return smoothed_rtt_.IsZero() ? initial_rtt_ : smoothed_rtt_;
+ }
+
+ // Sets an initial RTT to be used for SmoothedRtt before any RTT updates.
+ void set_initial_rtt(QuicTime::Delta initial_rtt) {
+ if (initial_rtt.ToMicroseconds() <= 0) {
+ QUIC_BUG << "Attempt to set initial rtt to <= 0.";
+ return;
+ }
+ initial_rtt_ = initial_rtt;
+ }
+
+ // The most recent rtt measurement.
+ // May return Zero if no valid updates have occurred.
+ QuicTime::Delta latest_rtt() const { return latest_rtt_; }
+
+ // Returns the min_rtt for the entire connection.
+ // May return Zero if no valid updates have occurred.
+ QuicTime::Delta min_rtt() const { return min_rtt_; }
+
+ QuicTime::Delta mean_deviation() const { return mean_deviation_; }
+
+ QuicTime::Delta max_ack_delay() const { return max_ack_delay_; }
+
+ bool ignore_max_ack_delay() const { return ignore_max_ack_delay_; }
+
+ void set_ignore_max_ack_delay(bool ignore_max_ack_delay) {
+ ignore_max_ack_delay_ = ignore_max_ack_delay;
+ }
+
+ void set_initial_max_ack_delay(QuicTime::Delta initial_max_ack_delay) {
+ max_ack_delay_ = std::max(max_ack_delay_, initial_max_ack_delay);
+ }
+
+ private:
+ friend class test::RttStatsPeer;
+
+ QuicTime::Delta latest_rtt_;
+ QuicTime::Delta min_rtt_;
+ QuicTime::Delta smoothed_rtt_;
+ QuicTime::Delta previous_srtt_;
+ // Mean RTT deviation during this session.
+ // Approximation of standard deviation, the error is roughly 1.25 times
+ // larger than the standard deviation, for a normally distributed signal.
+ QuicTime::Delta mean_deviation_;
+ QuicTime::Delta initial_rtt_;
+ // The maximum ack delay observed over the connection after excluding ack
+ // delays that were too large to be included in an RTT measurement.
+ QuicTime::Delta max_ack_delay_;
+ // Whether to ignore the peer's max ack delay.
+ bool ignore_max_ack_delay_;
+};
+
+} // namespace quic
+
+#endif // QUICHE_QUIC_CORE_CONGESTION_CONTROL_RTT_STATS_H_
diff --git a/quic/core/congestion_control/rtt_stats_test.cc b/quic/core/congestion_control/rtt_stats_test.cc
new file mode 100644
index 0000000..83cb155
--- /dev/null
+++ b/quic/core/congestion_control/rtt_stats_test.cc
@@ -0,0 +1,230 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/third_party/quiche/src/quic/core/congestion_control/rtt_stats.h"
+
+#include <cmath>
+
+#include "net/third_party/quiche/src/quic/platform/api/quic_logging.h"
+#include "net/third_party/quiche/src/quic/platform/api/quic_mock_log.h"
+#include "net/third_party/quiche/src/quic/platform/api/quic_test.h"
+#include "net/third_party/quiche/src/quic/test_tools/rtt_stats_peer.h"
+
+using testing::_;
+using testing::Message;
+
+namespace quic {
+namespace test {
+
+class RttStatsTest : public QuicTest {
+ protected:
+ RttStats rtt_stats_;
+};
+
+TEST_F(RttStatsTest, DefaultsBeforeUpdate) {
+ EXPECT_LT(QuicTime::Delta::Zero(), rtt_stats_.initial_rtt());
+ EXPECT_EQ(QuicTime::Delta::Zero(), rtt_stats_.min_rtt());
+ EXPECT_EQ(QuicTime::Delta::Zero(), rtt_stats_.smoothed_rtt());
+}
+
+TEST_F(RttStatsTest, SmoothedRtt) {
+ // Verify that ack_delay is ignored in the first measurement.
+ rtt_stats_.UpdateRtt(QuicTime::Delta::FromMilliseconds(300),
+ QuicTime::Delta::FromMilliseconds(100),
+ QuicTime::Zero());
+ EXPECT_EQ(QuicTime::Delta::FromMilliseconds(300), rtt_stats_.latest_rtt());
+ EXPECT_EQ(QuicTime::Delta::FromMilliseconds(300), rtt_stats_.smoothed_rtt());
+ EXPECT_EQ(QuicTime::Delta::Zero(), rtt_stats_.max_ack_delay());
+ // Verify that a plausible ack delay increases the max ack delay.
+ rtt_stats_.UpdateRtt(QuicTime::Delta::FromMilliseconds(400),
+ QuicTime::Delta::FromMilliseconds(100),
+ QuicTime::Zero());
+ EXPECT_EQ(QuicTime::Delta::FromMilliseconds(300), rtt_stats_.latest_rtt());
+ EXPECT_EQ(QuicTime::Delta::FromMilliseconds(300), rtt_stats_.smoothed_rtt());
+ EXPECT_EQ(QuicTime::Delta::FromMilliseconds(100), rtt_stats_.max_ack_delay());
+ // Verify that Smoothed RTT includes max ack delay if it's reasonable.
+ rtt_stats_.UpdateRtt(QuicTime::Delta::FromMilliseconds(350),
+ QuicTime::Delta::FromMilliseconds(50), QuicTime::Zero());
+ EXPECT_EQ(QuicTime::Delta::FromMilliseconds(300), rtt_stats_.latest_rtt());
+ EXPECT_EQ(QuicTime::Delta::FromMilliseconds(300), rtt_stats_.smoothed_rtt());
+ EXPECT_EQ(QuicTime::Delta::FromMilliseconds(100), rtt_stats_.max_ack_delay());
+ // Verify that large erroneous ack_delay does not change Smoothed RTT.
+ rtt_stats_.UpdateRtt(QuicTime::Delta::FromMilliseconds(200),
+ QuicTime::Delta::FromMilliseconds(300),
+ QuicTime::Zero());
+ EXPECT_EQ(QuicTime::Delta::FromMilliseconds(200), rtt_stats_.latest_rtt());
+ EXPECT_EQ(QuicTime::Delta::FromMicroseconds(287500),
+ rtt_stats_.smoothed_rtt());
+ EXPECT_EQ(QuicTime::Delta::FromMilliseconds(100), rtt_stats_.max_ack_delay());
+}
+
+TEST_F(RttStatsTest, SmoothedRttIgnoreAckDelay) {
+ rtt_stats_.set_ignore_max_ack_delay(true);
+ // Verify that ack_delay is ignored in the first measurement.
+ rtt_stats_.UpdateRtt(QuicTime::Delta::FromMilliseconds(300),
+ QuicTime::Delta::FromMilliseconds(100),
+ QuicTime::Zero());
+ EXPECT_EQ(QuicTime::Delta::FromMilliseconds(300), rtt_stats_.latest_rtt());
+ EXPECT_EQ(QuicTime::Delta::FromMilliseconds(300), rtt_stats_.smoothed_rtt());
+ EXPECT_EQ(QuicTime::Delta::Zero(), rtt_stats_.max_ack_delay());
+ // Verify that a plausible ack delay increases the max ack delay.
+ rtt_stats_.UpdateRtt(QuicTime::Delta::FromMilliseconds(300),
+ QuicTime::Delta::FromMilliseconds(100),
+ QuicTime::Zero());
+ EXPECT_EQ(QuicTime::Delta::FromMilliseconds(300), rtt_stats_.latest_rtt());
+ EXPECT_EQ(QuicTime::Delta::FromMilliseconds(300), rtt_stats_.smoothed_rtt());
+ EXPECT_EQ(QuicTime::Delta::Zero(), rtt_stats_.max_ack_delay());
+ // Verify that Smoothed RTT includes max ack delay if it's reasonable.
+ rtt_stats_.UpdateRtt(QuicTime::Delta::FromMilliseconds(300),
+ QuicTime::Delta::FromMilliseconds(50), QuicTime::Zero());
+ EXPECT_EQ(QuicTime::Delta::FromMilliseconds(300), rtt_stats_.latest_rtt());
+ EXPECT_EQ(QuicTime::Delta::FromMilliseconds(300), rtt_stats_.smoothed_rtt());
+ // Verify that large erroneous ack_delay does not change Smoothed RTT.
+ rtt_stats_.UpdateRtt(QuicTime::Delta::FromMilliseconds(200),
+ QuicTime::Delta::FromMilliseconds(300),
+ QuicTime::Zero());
+ EXPECT_EQ(QuicTime::Delta::FromMilliseconds(200), rtt_stats_.latest_rtt());
+ EXPECT_EQ(QuicTime::Delta::FromMicroseconds(287500),
+ rtt_stats_.smoothed_rtt());
+ EXPECT_EQ(QuicTime::Delta::Zero(), rtt_stats_.max_ack_delay());
+}
+
+// Ensure that the potential rounding artifacts in EWMA calculation do not cause
+// the SRTT to drift too far from the exact value.
+TEST_F(RttStatsTest, SmoothedRttStability) {
+ for (size_t time = 3; time < 20000; time++) {
+ RttStats stats;
+ for (size_t i = 0; i < 100; i++) {
+ stats.UpdateRtt(QuicTime::Delta::FromMicroseconds(time),
+ QuicTime::Delta::FromMilliseconds(0), QuicTime::Zero());
+ int64_t time_delta_us = stats.smoothed_rtt().ToMicroseconds() - time;
+ ASSERT_LE(std::abs(time_delta_us), 1);
+ }
+ }
+}
+
+TEST_F(RttStatsTest, PreviousSmoothedRtt) {
+ // Verify that ack_delay is corrected for in Smoothed RTT.
+ rtt_stats_.UpdateRtt(QuicTime::Delta::FromMilliseconds(200),
+ QuicTime::Delta::FromMilliseconds(0), QuicTime::Zero());
+ EXPECT_EQ(QuicTime::Delta::FromMilliseconds(200), rtt_stats_.latest_rtt());
+ EXPECT_EQ(QuicTime::Delta::FromMilliseconds(200), rtt_stats_.smoothed_rtt());
+ EXPECT_EQ(QuicTime::Delta::Zero(), rtt_stats_.previous_srtt());
+ // Ensure the previous SRTT is 200ms after a 100ms sample.
+ rtt_stats_.UpdateRtt(QuicTime::Delta::FromMilliseconds(100),
+ QuicTime::Delta::Zero(), QuicTime::Zero());
+ EXPECT_EQ(QuicTime::Delta::FromMilliseconds(100), rtt_stats_.latest_rtt());
+ EXPECT_EQ(QuicTime::Delta::FromMicroseconds(187500).ToMicroseconds(),
+ rtt_stats_.smoothed_rtt().ToMicroseconds());
+ EXPECT_EQ(QuicTime::Delta::FromMilliseconds(200), rtt_stats_.previous_srtt());
+}
+
+TEST_F(RttStatsTest, MinRtt) {
+ rtt_stats_.UpdateRtt(QuicTime::Delta::FromMilliseconds(200),
+ QuicTime::Delta::Zero(), QuicTime::Zero());
+ EXPECT_EQ(QuicTime::Delta::FromMilliseconds(200), rtt_stats_.min_rtt());
+ rtt_stats_.UpdateRtt(
+ QuicTime::Delta::FromMilliseconds(10), QuicTime::Delta::Zero(),
+ QuicTime::Zero() + QuicTime::Delta::FromMilliseconds(10));
+ EXPECT_EQ(QuicTime::Delta::FromMilliseconds(10), rtt_stats_.min_rtt());
+ rtt_stats_.UpdateRtt(
+ QuicTime::Delta::FromMilliseconds(50), QuicTime::Delta::Zero(),
+ QuicTime::Zero() + QuicTime::Delta::FromMilliseconds(20));
+ EXPECT_EQ(QuicTime::Delta::FromMilliseconds(10), rtt_stats_.min_rtt());
+ rtt_stats_.UpdateRtt(
+ QuicTime::Delta::FromMilliseconds(50), QuicTime::Delta::Zero(),
+ QuicTime::Zero() + QuicTime::Delta::FromMilliseconds(30));
+ EXPECT_EQ(QuicTime::Delta::FromMilliseconds(10), rtt_stats_.min_rtt());
+ rtt_stats_.UpdateRtt(
+ QuicTime::Delta::FromMilliseconds(50), QuicTime::Delta::Zero(),
+ QuicTime::Zero() + QuicTime::Delta::FromMilliseconds(40));
+ EXPECT_EQ(QuicTime::Delta::FromMilliseconds(10), rtt_stats_.min_rtt());
+ // Verify that ack_delay does not go into recording of min_rtt_.
+ rtt_stats_.UpdateRtt(
+ QuicTime::Delta::FromMilliseconds(7),
+ QuicTime::Delta::FromMilliseconds(2),
+ QuicTime::Zero() + QuicTime::Delta::FromMilliseconds(50));
+ EXPECT_EQ(QuicTime::Delta::FromMilliseconds(7), rtt_stats_.min_rtt());
+}
+
+TEST_F(RttStatsTest, ExpireSmoothedMetrics) {
+ QuicTime::Delta initial_rtt = QuicTime::Delta::FromMilliseconds(10);
+ rtt_stats_.UpdateRtt(initial_rtt, QuicTime::Delta::Zero(), QuicTime::Zero());
+ EXPECT_EQ(initial_rtt, rtt_stats_.min_rtt());
+ EXPECT_EQ(initial_rtt, rtt_stats_.smoothed_rtt());
+
+ EXPECT_EQ(0.5 * initial_rtt, rtt_stats_.mean_deviation());
+
+ // Update once with a 20ms RTT.
+ QuicTime::Delta doubled_rtt = 2 * initial_rtt;
+ rtt_stats_.UpdateRtt(doubled_rtt, QuicTime::Delta::Zero(), QuicTime::Zero());
+ EXPECT_EQ(1.125 * initial_rtt, rtt_stats_.smoothed_rtt());
+
+ // Expire the smoothed metrics, increasing smoothed rtt and mean deviation.
+ rtt_stats_.ExpireSmoothedMetrics();
+ EXPECT_EQ(doubled_rtt, rtt_stats_.smoothed_rtt());
+ EXPECT_EQ(0.875 * initial_rtt, rtt_stats_.mean_deviation());
+
+ // Now go back down to 5ms and expire the smoothed metrics, and ensure the
+ // mean deviation increases to 15ms.
+ QuicTime::Delta half_rtt = 0.5 * initial_rtt;
+ rtt_stats_.UpdateRtt(half_rtt, QuicTime::Delta::Zero(), QuicTime::Zero());
+ EXPECT_GT(doubled_rtt, rtt_stats_.smoothed_rtt());
+ EXPECT_LT(initial_rtt, rtt_stats_.mean_deviation());
+}
+
+TEST_F(RttStatsTest, UpdateRttWithBadSendDeltas) {
+ // Make sure we ignore bad RTTs.
+ CREATE_QUIC_MOCK_LOG(log);
+
+ QuicTime::Delta initial_rtt = QuicTime::Delta::FromMilliseconds(10);
+ rtt_stats_.UpdateRtt(initial_rtt, QuicTime::Delta::Zero(), QuicTime::Zero());
+ EXPECT_EQ(initial_rtt, rtt_stats_.min_rtt());
+ EXPECT_EQ(initial_rtt, rtt_stats_.smoothed_rtt());
+
+ std::vector<QuicTime::Delta> bad_send_deltas;
+ bad_send_deltas.push_back(QuicTime::Delta::Zero());
+ bad_send_deltas.push_back(QuicTime::Delta::Infinite());
+ bad_send_deltas.push_back(QuicTime::Delta::FromMicroseconds(-1000));
+ log.StartCapturingLogs();
+
+ for (QuicTime::Delta bad_send_delta : bad_send_deltas) {
+ SCOPED_TRACE(Message() << "bad_send_delta = "
+ << bad_send_delta.ToMicroseconds());
+ if (QUIC_LOG_WARNING_IS_ON()) {
+ EXPECT_QUIC_LOG_CALL_CONTAINS(log, WARNING, "Ignoring");
+ }
+ rtt_stats_.UpdateRtt(bad_send_delta, QuicTime::Delta::Zero(),
+ QuicTime::Zero());
+ EXPECT_EQ(initial_rtt, rtt_stats_.min_rtt());
+ EXPECT_EQ(initial_rtt, rtt_stats_.smoothed_rtt());
+ }
+}
+
+TEST_F(RttStatsTest, ResetAfterConnectionMigrations) {
+ rtt_stats_.UpdateRtt(QuicTime::Delta::FromMilliseconds(200),
+ QuicTime::Delta::FromMilliseconds(0), QuicTime::Zero());
+ EXPECT_EQ(QuicTime::Delta::FromMilliseconds(200), rtt_stats_.latest_rtt());
+ EXPECT_EQ(QuicTime::Delta::FromMilliseconds(200), rtt_stats_.smoothed_rtt());
+ EXPECT_EQ(QuicTime::Delta::FromMilliseconds(200), rtt_stats_.min_rtt());
+ EXPECT_EQ(QuicTime::Delta::FromMilliseconds(0), rtt_stats_.max_ack_delay());
+
+ rtt_stats_.UpdateRtt(QuicTime::Delta::FromMilliseconds(300),
+ QuicTime::Delta::FromMilliseconds(100),
+ QuicTime::Zero());
+ EXPECT_EQ(QuicTime::Delta::FromMilliseconds(200), rtt_stats_.latest_rtt());
+ EXPECT_EQ(QuicTime::Delta::FromMilliseconds(200), rtt_stats_.smoothed_rtt());
+ EXPECT_EQ(QuicTime::Delta::FromMilliseconds(200), rtt_stats_.min_rtt());
+ EXPECT_EQ(QuicTime::Delta::FromMilliseconds(100), rtt_stats_.max_ack_delay());
+
+ // Reset rtt stats on connection migrations.
+ rtt_stats_.OnConnectionMigration();
+ EXPECT_EQ(QuicTime::Delta::Zero(), rtt_stats_.latest_rtt());
+ EXPECT_EQ(QuicTime::Delta::Zero(), rtt_stats_.smoothed_rtt());
+ EXPECT_EQ(QuicTime::Delta::Zero(), rtt_stats_.min_rtt());
+ EXPECT_EQ(QuicTime::Delta::Zero(), rtt_stats_.max_ack_delay());
+}
+
+} // namespace test
+} // namespace quic
diff --git a/quic/core/congestion_control/send_algorithm_interface.cc b/quic/core/congestion_control/send_algorithm_interface.cc
new file mode 100644
index 0000000..07efa43
--- /dev/null
+++ b/quic/core/congestion_control/send_algorithm_interface.cc
@@ -0,0 +1,56 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/third_party/quiche/src/quic/core/congestion_control/send_algorithm_interface.h"
+
+#include "net/third_party/quiche/src/quic/core/congestion_control/bbr_sender.h"
+#include "net/third_party/quiche/src/quic/core/congestion_control/tcp_cubic_sender_bytes.h"
+#include "net/third_party/quiche/src/quic/core/quic_packets.h"
+#include "net/third_party/quiche/src/quic/platform/api/quic_bug_tracker.h"
+#include "net/third_party/quiche/src/quic/platform/api/quic_fallthrough.h"
+#include "net/third_party/quiche/src/quic/platform/api/quic_flag_utils.h"
+#include "net/third_party/quiche/src/quic/platform/api/quic_flags.h"
+#include "net/third_party/quiche/src/quic/platform/api/quic_pcc_sender.h"
+
+namespace quic {
+
+class RttStats;
+
+// Factory for send side congestion control algorithm.
+SendAlgorithmInterface* SendAlgorithmInterface::Create(
+ const QuicClock* clock,
+ const RttStats* rtt_stats,
+ const QuicUnackedPacketMap* unacked_packets,
+ CongestionControlType congestion_control_type,
+ QuicRandom* random,
+ QuicConnectionStats* stats,
+ QuicPacketCount initial_congestion_window) {
+ QuicPacketCount max_congestion_window = kDefaultMaxCongestionWindowPackets;
+ switch (congestion_control_type) {
+ case kGoogCC: // GoogCC is not supported by quic/core, fall back to BBR.
+ case kBBR:
+ return new BbrSender(rtt_stats, unacked_packets,
+ initial_congestion_window, max_congestion_window,
+ random);
+ case kPCC:
+ if (GetQuicReloadableFlag(quic_enable_pcc3)) {
+ return CreatePccSender(clock, rtt_stats, unacked_packets, random, stats,
+ initial_congestion_window,
+ max_congestion_window);
+ }
+ // Fall back to CUBIC if PCC is disabled.
+ QUIC_FALLTHROUGH_INTENDED;
+ case kCubicBytes:
+ return new TcpCubicSenderBytes(
+ clock, rtt_stats, false /* don't use Reno */,
+ initial_congestion_window, max_congestion_window, stats);
+ case kRenoBytes:
+ return new TcpCubicSenderBytes(clock, rtt_stats, true /* use Reno */,
+ initial_congestion_window,
+ max_congestion_window, stats);
+ }
+ return nullptr;
+}
+
+} // namespace quic
diff --git a/quic/core/congestion_control/send_algorithm_interface.h b/quic/core/congestion_control/send_algorithm_interface.h
new file mode 100644
index 0000000..5df9a93
--- /dev/null
+++ b/quic/core/congestion_control/send_algorithm_interface.h
@@ -0,0 +1,146 @@
+// Copyright (c) 2012 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// The pure virtual class for send side congestion control algorithm.
+
+#ifndef QUICHE_QUIC_CORE_CONGESTION_CONTROL_SEND_ALGORITHM_INTERFACE_H_
+#define QUICHE_QUIC_CORE_CONGESTION_CONTROL_SEND_ALGORITHM_INTERFACE_H_
+
+#include <algorithm>
+#include <map>
+
+#include "net/third_party/quiche/src/quic/core/crypto/quic_random.h"
+#include "net/third_party/quiche/src/quic/core/quic_bandwidth.h"
+#include "net/third_party/quiche/src/quic/core/quic_config.h"
+#include "net/third_party/quiche/src/quic/core/quic_connection_stats.h"
+#include "net/third_party/quiche/src/quic/core/quic_packets.h"
+#include "net/third_party/quiche/src/quic/core/quic_time.h"
+#include "net/third_party/quiche/src/quic/core/quic_types.h"
+#include "net/third_party/quiche/src/quic/core/quic_unacked_packet_map.h"
+#include "net/third_party/quiche/src/quic/platform/api/quic_clock.h"
+#include "net/third_party/quiche/src/quic/platform/api/quic_export.h"
+#include "net/third_party/quiche/src/quic/platform/api/quic_string.h"
+
+namespace quic {
+
+class CachedNetworkParameters;
+class RttStats;
+
+const QuicPacketCount kDefaultMaxCongestionWindowPackets = 2000;
+
+class QUIC_EXPORT_PRIVATE SendAlgorithmInterface {
+ public:
+ static SendAlgorithmInterface* Create(
+ const QuicClock* clock,
+ const RttStats* rtt_stats,
+ const QuicUnackedPacketMap* unacked_packets,
+ CongestionControlType type,
+ QuicRandom* random,
+ QuicConnectionStats* stats,
+ QuicPacketCount initial_congestion_window);
+
+ virtual ~SendAlgorithmInterface() {}
+
+ virtual void SetFromConfig(const QuicConfig& config,
+ Perspective perspective) = 0;
+
+ // Sets the number of connections to emulate when doing congestion control,
+ // particularly for congestion avoidance. Can be set any time.
+ virtual void SetNumEmulatedConnections(int num_connections) = 0;
+
+ // Sets the initial congestion window in number of packets. May be ignored
+ // if called after the initial congestion window is no longer relevant.
+ virtual void SetInitialCongestionWindowInPackets(QuicPacketCount packets) = 0;
+
+ // Indicates an update to the congestion state, caused either by an incoming
+ // ack or loss event timeout. |rtt_updated| indicates whether a new
+ // latest_rtt sample has been taken, |prior_in_flight| the bytes in flight
+ // prior to the congestion event. |acked_packets| and |lost_packets| are any
+ // packets considered acked or lost as a result of the congestion event.
+ virtual void OnCongestionEvent(bool rtt_updated,
+ QuicByteCount prior_in_flight,
+ QuicTime event_time,
+ const AckedPacketVector& acked_packets,
+ const LostPacketVector& lost_packets) = 0;
+
+ // Inform that we sent |bytes| to the wire, and if the packet is
+ // retransmittable. |bytes_in_flight| is the number of bytes in flight before
+ // the packet was sent.
+ // Note: this function must be called for every packet sent to the wire.
+ virtual void OnPacketSent(QuicTime sent_time,
+ QuicByteCount bytes_in_flight,
+ QuicPacketNumber packet_number,
+ QuicByteCount bytes,
+ HasRetransmittableData is_retransmittable) = 0;
+
+ // Called when the retransmission timeout fires. Neither OnPacketAbandoned
+ // nor OnPacketLost will be called for these packets.
+ virtual void OnRetransmissionTimeout(bool packets_retransmitted) = 0;
+
+ // Called when connection migrates and cwnd needs to be reset.
+ virtual void OnConnectionMigration() = 0;
+
+ // Make decision on whether the sender can send right now. Note that even
+ // when this method returns true, the sending can be delayed due to pacing.
+ virtual bool CanSend(QuicByteCount bytes_in_flight) = 0;
+
+ // The pacing rate of the send algorithm. May be zero if the rate is unknown.
+ virtual QuicBandwidth PacingRate(QuicByteCount bytes_in_flight) const = 0;
+
+ // What's the current estimated bandwidth in bytes per second.
+ // Returns 0 when it does not have an estimate.
+ virtual QuicBandwidth BandwidthEstimate() const = 0;
+
+ // Returns the size of the current congestion window in bytes. Note, this is
+ // not the *available* window. Some send algorithms may not use a congestion
+ // window and will return 0.
+ virtual QuicByteCount GetCongestionWindow() const = 0;
+
+ // Whether the send algorithm is currently in slow start. When true, the
+ // BandwidthEstimate is expected to be too low.
+ virtual bool InSlowStart() const = 0;
+
+ // Whether the send algorithm is currently in recovery.
+ virtual bool InRecovery() const = 0;
+
+ // True when the congestion control is probing for more bandwidth and needs
+ // enough data to not be app-limited to do so.
+ // TODO(ianswett): In the future, this API may want to indicate the size of
+ // the probing packet.
+ virtual bool ShouldSendProbingPacket() const = 0;
+
+ // Returns the size of the slow start congestion window in bytes,
+ // aka ssthresh. Only defined for Cubic and Reno, other algorithms return 0.
+ virtual QuicByteCount GetSlowStartThreshold() const = 0;
+
+ virtual CongestionControlType GetCongestionControlType() const = 0;
+
+ // Notifies the congestion control algorithm of an external network
+ // measurement or prediction. Either |bandwidth| or |rtt| may be zero if no
+ // sample is available.
+ virtual void AdjustNetworkParameters(QuicBandwidth bandwidth,
+ QuicTime::Delta rtt) = 0;
+
+ // Retrieves debugging information about the current state of the
+ // send algorithm.
+ virtual QuicString GetDebugState() const = 0;
+
+ // Called when the connection has no outstanding data to send. Specifically,
+ // this means that none of the data streams are write-blocked, there are no
+ // packets in the connection queue, and there are no pending retransmissins,
+ // i.e. the sender cannot send anything for reasons other than being blocked
+ // by congestion controller. This includes cases when the connection is
+ // blocked by the flow controller.
+ //
+ // The fact that this method is called does not necessarily imply that the
+ // connection would not be blocked by the congestion control if it actually
+ // tried to send data. If the congestion control algorithm needs to exclude
+ // such cases, it should use the internal state it uses for congestion control
+ // for that.
+ virtual void OnApplicationLimited(QuicByteCount bytes_in_flight) = 0;
+};
+
+} // namespace quic
+
+#endif // QUICHE_QUIC_CORE_CONGESTION_CONTROL_SEND_ALGORITHM_INTERFACE_H_
diff --git a/quic/core/congestion_control/send_algorithm_test.cc b/quic/core/congestion_control/send_algorithm_test.cc
new file mode 100644
index 0000000..d7bfd85
--- /dev/null
+++ b/quic/core/congestion_control/send_algorithm_test.cc
@@ -0,0 +1,369 @@
+// Copyright 2013 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include <algorithm>
+#include <map>
+#include <memory>
+
+#include "net/third_party/quiche/src/quic/core/congestion_control/rtt_stats.h"
+#include "net/third_party/quiche/src/quic/core/congestion_control/send_algorithm_interface.h"
+#include "net/third_party/quiche/src/quic/core/quic_types.h"
+#include "net/third_party/quiche/src/quic/core/quic_utils.h"
+#include "net/third_party/quiche/src/quic/platform/api/quic_logging.h"
+#include "net/third_party/quiche/src/quic/platform/api/quic_ptr_util.h"
+#include "net/third_party/quiche/src/quic/platform/api/quic_str_cat.h"
+#include "net/third_party/quiche/src/quic/platform/api/quic_string.h"
+#include "net/third_party/quiche/src/quic/platform/api/quic_test.h"
+#include "net/third_party/quiche/src/quic/test_tools/mock_clock.h"
+#include "net/third_party/quiche/src/quic/test_tools/quic_config_peer.h"
+#include "net/third_party/quiche/src/quic/test_tools/quic_connection_peer.h"
+#include "net/third_party/quiche/src/quic/test_tools/quic_sent_packet_manager_peer.h"
+#include "net/third_party/quiche/src/quic/test_tools/quic_test_utils.h"
+#include "net/third_party/quiche/src/quic/test_tools/simulator/quic_endpoint.h"
+#include "net/third_party/quiche/src/quic/test_tools/simulator/simulator.h"
+#include "net/third_party/quiche/src/quic/test_tools/simulator/switch.h"
+
+namespace quic {
+namespace test {
+namespace {
+
+// Use the initial CWND of 10, as 32 is too much for the test network.
+const uint32_t kInitialCongestionWindowPackets = 10;
+
+// Test network parameters. Here, the topology of the network is:
+//
+// QUIC Sender
+// |
+// | <-- local link
+// |
+// Network switch
+// * <-- the bottleneck queue in the direction
+// | of the receiver
+// |
+// | <-- test link
+// |
+// |
+// Receiver
+//
+// When setting the bandwidth of the local link and test link, choose
+// a bandwidth lower than 20Mbps, as the clock-granularity of the
+// simulator can only handle a granularity of 1us.
+
+// Default settings between the switch and the sender.
+const QuicBandwidth kLocalLinkBandwidth =
+ QuicBandwidth::FromKBitsPerSecond(10000);
+const QuicTime::Delta kLocalPropagationDelay =
+ QuicTime::Delta::FromMilliseconds(2);
+
+// Wired network settings. A typical desktop network setup, a
+// high-bandwidth, 30ms test link to the receiver.
+const QuicBandwidth kTestLinkWiredBandwidth =
+ QuicBandwidth::FromKBitsPerSecond(4000);
+const QuicTime::Delta kTestLinkWiredPropagationDelay =
+ QuicTime::Delta::FromMilliseconds(50);
+const QuicTime::Delta kTestWiredTransferTime =
+ kTestLinkWiredBandwidth.TransferTime(kMaxPacketSize) +
+ kLocalLinkBandwidth.TransferTime(kMaxPacketSize);
+const QuicTime::Delta kTestWiredRtt =
+ (kTestLinkWiredPropagationDelay + kLocalPropagationDelay +
+ kTestWiredTransferTime) *
+ 2;
+const QuicByteCount kTestWiredBdp = kTestWiredRtt * kTestLinkWiredBandwidth;
+
+// Small BDP, Bandwidth-policed network settings. In this scenario,
+// the receiver has a low-bandwidth, short propagation-delay link,
+// resulting in a small BDP. We model the policer by setting the
+// queue size to only one packet.
+const QuicBandwidth kTestLinkLowBdpBandwidth =
+ QuicBandwidth::FromKBitsPerSecond(200);
+const QuicTime::Delta kTestLinkLowBdpPropagationDelay =
+ QuicTime::Delta::FromMilliseconds(50);
+const QuicByteCount kTestPolicerQueue = kMaxPacketSize;
+
+// Satellite network settings. In a satellite network, the bottleneck
+// buffer is typically sized for non-satellite links , but the
+// propagation delay of the test link to the receiver is as much as a
+// quarter second.
+const QuicTime::Delta kTestSatellitePropagationDelay =
+ QuicTime::Delta::FromMilliseconds(250);
+
+// Cellular scenarios. In a cellular network, the bottleneck queue at
+// the edge of the network can be as great as 3MB.
+const QuicBandwidth kTestLink2GBandwidth =
+ QuicBandwidth::FromKBitsPerSecond(100);
+const QuicBandwidth kTestLink3GBandwidth =
+ QuicBandwidth::FromKBitsPerSecond(1500);
+const QuicByteCount kCellularQueue = 3 * 1024 * 1024;
+const QuicTime::Delta kTestCellularPropagationDelay =
+ QuicTime::Delta::FromMilliseconds(40);
+
+// Small RTT scenario, below the per-ack-update threshold of 30ms.
+const QuicTime::Delta kTestLinkSmallRTTDelay =
+ QuicTime::Delta::FromMilliseconds(10);
+
+const char* CongestionControlTypeToString(CongestionControlType cc_type) {
+ switch (cc_type) {
+ case kCubicBytes:
+ return "CUBIC_BYTES";
+ case kRenoBytes:
+ return "RENO_BYTES";
+ case kBBR:
+ return "BBR";
+ case kPCC:
+ return "PCC";
+ default:
+ QUIC_DLOG(FATAL) << "Unexpected CongestionControlType";
+ return nullptr;
+ }
+}
+
+struct TestParams {
+ explicit TestParams(CongestionControlType congestion_control_type)
+ : congestion_control_type(congestion_control_type) {}
+
+ friend std::ostream& operator<<(std::ostream& os, const TestParams& p) {
+ os << "{ congestion_control_type: "
+ << CongestionControlTypeToString(p.congestion_control_type);
+ os << " }";
+ return os;
+ }
+
+ const CongestionControlType congestion_control_type;
+};
+
+QuicString TestParamToString(const testing::TestParamInfo<TestParams>& params) {
+ return QuicStrCat(
+ CongestionControlTypeToString(params.param.congestion_control_type), "_");
+}
+
+// Constructs various test permutations.
+std::vector<TestParams> GetTestParams() {
+ std::vector<TestParams> params;
+ for (const CongestionControlType congestion_control_type :
+ {kBBR, kCubicBytes, kRenoBytes, kPCC}) {
+ params.push_back(TestParams(congestion_control_type));
+ }
+ return params;
+}
+
+} // namespace
+
+class SendAlgorithmTest : public QuicTestWithParam<TestParams> {
+ protected:
+ SendAlgorithmTest()
+ : simulator_(),
+ quic_sender_(&simulator_,
+ "QUIC sender",
+ "Receiver",
+ Perspective::IS_CLIENT,
+ TestConnectionId()),
+ receiver_(&simulator_,
+ "Receiver",
+ "QUIC sender",
+ Perspective::IS_SERVER,
+ TestConnectionId()) {
+ rtt_stats_ = quic_sender_.connection()->sent_packet_manager().GetRttStats();
+ sender_ = SendAlgorithmInterface::Create(
+ simulator_.GetClock(), rtt_stats_,
+ QuicSentPacketManagerPeer::GetUnackedPacketMap(
+ QuicConnectionPeer::GetSentPacketManager(
+ quic_sender_.connection())),
+ GetParam().congestion_control_type, &random_, &stats_,
+ kInitialCongestionWindowPackets);
+ quic_sender_.RecordTrace();
+
+ QuicConnectionPeer::SetSendAlgorithm(quic_sender_.connection(), sender_);
+ clock_ = simulator_.GetClock();
+ simulator_.set_random_generator(&random_);
+
+ uint64_t seed = QuicRandom::GetInstance()->RandUint64();
+ random_.set_seed(seed);
+ QUIC_LOG(INFO) << "SendAlgorithmTest simulator set up. Seed: " << seed;
+ }
+
+ // Creates a simulated network, with default settings between the
+ // sender and the switch and the given settings from the switch to
+ // the receiver.
+ void CreateSetup(const QuicBandwidth& test_bandwidth,
+ const QuicTime::Delta& test_link_delay,
+ QuicByteCount bottleneck_queue_length) {
+ switch_ = QuicMakeUnique<simulator::Switch>(&simulator_, "Switch", 8,
+ bottleneck_queue_length);
+ quic_sender_link_ = QuicMakeUnique<simulator::SymmetricLink>(
+ &quic_sender_, switch_->port(1), kLocalLinkBandwidth,
+ kLocalPropagationDelay);
+ receiver_link_ = QuicMakeUnique<simulator::SymmetricLink>(
+ &receiver_, switch_->port(2), test_bandwidth, test_link_delay);
+ }
+
+ void DoSimpleTransfer(QuicByteCount transfer_size, QuicTime::Delta deadline) {
+ quic_sender_.AddBytesToTransfer(transfer_size);
+ bool simulator_result = simulator_.RunUntilOrTimeout(
+ [this]() { return quic_sender_.bytes_to_transfer() == 0; }, deadline);
+ EXPECT_TRUE(simulator_result)
+ << "Simple transfer failed. Bytes remaining: "
+ << quic_sender_.bytes_to_transfer();
+ }
+
+ void SendBursts(size_t number_of_bursts,
+ QuicByteCount bytes,
+ QuicTime::Delta rtt,
+ QuicTime::Delta wait_time) {
+ ASSERT_EQ(0u, quic_sender_.bytes_to_transfer());
+ for (size_t i = 0; i < number_of_bursts; i++) {
+ quic_sender_.AddBytesToTransfer(bytes);
+
+ // Transfer data and wait for three seconds between each transfer.
+ simulator_.RunFor(wait_time);
+
+ // Ensure the connection did not time out.
+ ASSERT_TRUE(quic_sender_.connection()->connected());
+ ASSERT_TRUE(receiver_.connection()->connected());
+ }
+
+ simulator_.RunFor(wait_time + rtt);
+ EXPECT_EQ(0u, quic_sender_.bytes_to_transfer());
+ }
+
+ // Estimates the elapsed time for a given transfer size, given the
+ // bottleneck bandwidth and link propagation delay.
+ QuicTime::Delta EstimatedElapsedTime(
+ QuicByteCount transfer_size_bytes,
+ QuicBandwidth test_link_bandwidth,
+ const QuicTime::Delta& test_link_delay) const {
+ return test_link_bandwidth.TransferTime(transfer_size_bytes) +
+ 2 * test_link_delay;
+ }
+
+ QuicTime QuicSenderStartTime() {
+ return quic_sender_.connection()->GetStats().connection_creation_time;
+ }
+
+ void PrintTransferStats() {
+ const QuicConnectionStats& stats = quic_sender_.connection()->GetStats();
+ QUIC_LOG(INFO) << "Summary for scenario " << GetParam();
+ QUIC_LOG(INFO) << "Sender stats is " << stats;
+ const double rtx_rate =
+ static_cast<double>(stats.bytes_retransmitted) / stats.bytes_sent;
+ QUIC_LOG(INFO) << "Retransmit rate (num_rtx/num_total_sent): " << rtx_rate;
+ QUIC_LOG(INFO) << "Connection elapsed time: "
+ << (clock_->Now() - QuicSenderStartTime()).ToMilliseconds()
+ << " (ms)";
+ }
+
+ simulator::Simulator simulator_;
+ simulator::QuicEndpoint quic_sender_;
+ simulator::QuicEndpoint receiver_;
+ std::unique_ptr<simulator::Switch> switch_;
+ std::unique_ptr<simulator::SymmetricLink> quic_sender_link_;
+ std::unique_ptr<simulator::SymmetricLink> receiver_link_;
+ QuicConnectionStats stats_;
+
+ SimpleRandom random_;
+
+ // Owned by different components of the connection.
+ const QuicClock* clock_;
+ const RttStats* rtt_stats_;
+ SendAlgorithmInterface* sender_;
+};
+
+INSTANTIATE_TEST_SUITE_P(SendAlgorithmTests, SendAlgorithmTest,
+ ::testing::ValuesIn(GetTestParams()),
+ TestParamToString);
+
+// Test a simple long data transfer in the default setup.
+TEST_P(SendAlgorithmTest, SimpleWiredNetworkTransfer) {
+ CreateSetup(kTestLinkWiredBandwidth, kTestLinkWiredPropagationDelay,
+ kTestWiredBdp);
+ const QuicByteCount kTransferSizeBytes = 12 * 1024 * 1024;
+ const QuicTime::Delta maximum_elapsed_time =
+ EstimatedElapsedTime(kTransferSizeBytes, kTestLinkWiredBandwidth,
+ kTestLinkWiredPropagationDelay) *
+ 1.2;
+ DoSimpleTransfer(kTransferSizeBytes, maximum_elapsed_time);
+ PrintTransferStats();
+}
+
+TEST_P(SendAlgorithmTest, LowBdpPolicedNetworkTransfer) {
+ CreateSetup(kTestLinkLowBdpBandwidth, kTestLinkLowBdpPropagationDelay,
+ kTestPolicerQueue);
+ const QuicByteCount kTransferSizeBytes = 5 * 1024 * 1024;
+ const QuicTime::Delta maximum_elapsed_time =
+ EstimatedElapsedTime(kTransferSizeBytes, kTestLinkLowBdpBandwidth,
+ kTestLinkLowBdpPropagationDelay) *
+ 1.2;
+ DoSimpleTransfer(kTransferSizeBytes, maximum_elapsed_time);
+ PrintTransferStats();
+}
+
+TEST_P(SendAlgorithmTest, AppLimitedBurstsOverWiredNetwork) {
+ CreateSetup(kTestLinkWiredBandwidth, kTestLinkWiredPropagationDelay,
+ kTestWiredBdp);
+ const QuicByteCount kBurstSizeBytes = 512;
+ const int kNumBursts = 20;
+ const QuicTime::Delta kWaitTime = QuicTime::Delta::FromSeconds(3);
+ SendBursts(kNumBursts, kBurstSizeBytes, kTestWiredRtt, kWaitTime);
+ PrintTransferStats();
+
+ const QuicTime::Delta estimated_burst_time =
+ EstimatedElapsedTime(kBurstSizeBytes, kTestLinkWiredBandwidth,
+ kTestLinkWiredPropagationDelay) +
+ kWaitTime;
+ const QuicTime::Delta max_elapsed_time =
+ kNumBursts * estimated_burst_time + kWaitTime;
+ const QuicTime::Delta actual_elapsed_time =
+ clock_->Now() - QuicSenderStartTime();
+ EXPECT_GE(max_elapsed_time, actual_elapsed_time);
+}
+
+TEST_P(SendAlgorithmTest, SatelliteNetworkTransfer) {
+ CreateSetup(kTestLinkWiredBandwidth, kTestSatellitePropagationDelay,
+ kTestWiredBdp);
+ const QuicByteCount kTransferSizeBytes = 12 * 1024 * 1024;
+ const QuicTime::Delta maximum_elapsed_time =
+ EstimatedElapsedTime(kTransferSizeBytes, kTestLinkWiredBandwidth,
+ kTestSatellitePropagationDelay) *
+ 1.25;
+ DoSimpleTransfer(kTransferSizeBytes, maximum_elapsed_time);
+ PrintTransferStats();
+}
+
+TEST_P(SendAlgorithmTest, 2GNetworkTransfer) {
+ CreateSetup(kTestLink2GBandwidth, kTestCellularPropagationDelay,
+ kCellularQueue);
+ const QuicByteCount kTransferSizeBytes = 1024 * 1024;
+ const QuicTime::Delta maximum_elapsed_time =
+ EstimatedElapsedTime(kTransferSizeBytes, kTestLink2GBandwidth,
+ kTestCellularPropagationDelay) *
+ 1.2;
+ DoSimpleTransfer(kTransferSizeBytes, maximum_elapsed_time);
+ PrintTransferStats();
+}
+
+TEST_P(SendAlgorithmTest, 3GNetworkTransfer) {
+ CreateSetup(kTestLink3GBandwidth, kTestCellularPropagationDelay,
+ kCellularQueue);
+ const QuicByteCount kTransferSizeBytes = 5 * 1024 * 1024;
+ const QuicTime::Delta maximum_elapsed_time =
+ EstimatedElapsedTime(kTransferSizeBytes, kTestLink3GBandwidth,
+ kTestCellularPropagationDelay) *
+ 1.2;
+ DoSimpleTransfer(kTransferSizeBytes, maximum_elapsed_time);
+ PrintTransferStats();
+}
+
+TEST_P(SendAlgorithmTest, LowRTTTransfer) {
+ CreateSetup(kTestLinkWiredBandwidth, kTestLinkSmallRTTDelay, kCellularQueue);
+
+ const QuicByteCount kTransferSizeBytes = 12 * 1024 * 1024;
+ const QuicTime::Delta maximum_elapsed_time =
+ EstimatedElapsedTime(kTransferSizeBytes, kTestLinkWiredBandwidth,
+ kTestLinkSmallRTTDelay) *
+ 1.2;
+ DoSimpleTransfer(kTransferSizeBytes, maximum_elapsed_time);
+ PrintTransferStats();
+}
+
+} // namespace test
+} // namespace quic
diff --git a/quic/core/congestion_control/tcp_cubic_sender_bytes.cc b/quic/core/congestion_control/tcp_cubic_sender_bytes.cc
new file mode 100644
index 0000000..5b843eb
--- /dev/null
+++ b/quic/core/congestion_control/tcp_cubic_sender_bytes.cc
@@ -0,0 +1,436 @@
+// Copyright (c) 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/third_party/quiche/src/quic/core/congestion_control/tcp_cubic_sender_bytes.h"
+
+#include <algorithm>
+#include <cstdint>
+
+#include "net/third_party/quiche/src/quic/core/congestion_control/prr_sender.h"
+#include "net/third_party/quiche/src/quic/core/congestion_control/rtt_stats.h"
+#include "net/third_party/quiche/src/quic/core/crypto/crypto_protocol.h"
+#include "net/third_party/quiche/src/quic/core/quic_constants.h"
+#include "net/third_party/quiche/src/quic/platform/api/quic_bug_tracker.h"
+#include "net/third_party/quiche/src/quic/platform/api/quic_flags.h"
+#include "net/third_party/quiche/src/quic/platform/api/quic_logging.h"
+#include "net/third_party/quiche/src/quic/platform/api/quic_string.h"
+
+namespace quic {
+
+namespace {
+// Constants based on TCP defaults.
+const QuicByteCount kMaxBurstBytes = 3 * kDefaultTCPMSS;
+const float kRenoBeta = 0.7f; // Reno backoff factor.
+// The minimum cwnd based on RFC 3782 (TCP NewReno) for cwnd reductions on a
+// fast retransmission.
+const QuicByteCount kDefaultMinimumCongestionWindow = 2 * kDefaultTCPMSS;
+} // namespace
+
+TcpCubicSenderBytes::TcpCubicSenderBytes(
+ const QuicClock* clock,
+ const RttStats* rtt_stats,
+ bool reno,
+ QuicPacketCount initial_tcp_congestion_window,
+ QuicPacketCount max_congestion_window,
+ QuicConnectionStats* stats)
+ : rtt_stats_(rtt_stats),
+ stats_(stats),
+ reno_(reno),
+ num_connections_(kDefaultNumConnections),
+ min4_mode_(false),
+ last_cutback_exited_slowstart_(false),
+ slow_start_large_reduction_(false),
+ no_prr_(false),
+ cubic_(clock),
+ num_acked_packets_(0),
+ congestion_window_(initial_tcp_congestion_window * kDefaultTCPMSS),
+ min_congestion_window_(kDefaultMinimumCongestionWindow),
+ max_congestion_window_(max_congestion_window * kDefaultTCPMSS),
+ slowstart_threshold_(max_congestion_window * kDefaultTCPMSS),
+ initial_tcp_congestion_window_(initial_tcp_congestion_window *
+ kDefaultTCPMSS),
+ initial_max_tcp_congestion_window_(max_congestion_window *
+ kDefaultTCPMSS),
+ min_slow_start_exit_window_(min_congestion_window_) {}
+
+TcpCubicSenderBytes::~TcpCubicSenderBytes() {}
+
+void TcpCubicSenderBytes::SetFromConfig(const QuicConfig& config,
+ Perspective perspective) {
+ if (perspective == Perspective::IS_SERVER) {
+ if (!GetQuicReloadableFlag(quic_unified_iw_options)) {
+ if (config.HasReceivedConnectionOptions() &&
+ ContainsQuicTag(config.ReceivedConnectionOptions(), kIW03)) {
+ // Initial window experiment.
+ SetInitialCongestionWindowInPackets(3);
+ }
+ if (config.HasReceivedConnectionOptions() &&
+ ContainsQuicTag(config.ReceivedConnectionOptions(), kIW10)) {
+ // Initial window experiment.
+ SetInitialCongestionWindowInPackets(10);
+ }
+ if (config.HasReceivedConnectionOptions() &&
+ ContainsQuicTag(config.ReceivedConnectionOptions(), kIW20)) {
+ // Initial window experiment.
+ SetInitialCongestionWindowInPackets(20);
+ }
+ if (config.HasReceivedConnectionOptions() &&
+ ContainsQuicTag(config.ReceivedConnectionOptions(), kIW50)) {
+ // Initial window experiment.
+ SetInitialCongestionWindowInPackets(50);
+ }
+ if (config.HasReceivedConnectionOptions() &&
+ ContainsQuicTag(config.ReceivedConnectionOptions(), kMIN1)) {
+ // Min CWND experiment.
+ SetMinCongestionWindowInPackets(1);
+ }
+ }
+ if (config.HasReceivedConnectionOptions() &&
+ ContainsQuicTag(config.ReceivedConnectionOptions(), kMIN4)) {
+ // Min CWND of 4 experiment.
+ min4_mode_ = true;
+ SetMinCongestionWindowInPackets(1);
+ }
+ if (config.HasReceivedConnectionOptions() &&
+ ContainsQuicTag(config.ReceivedConnectionOptions(), kSSLR)) {
+ // Slow Start Fast Exit experiment.
+ slow_start_large_reduction_ = true;
+ }
+ if (config.HasReceivedConnectionOptions() &&
+ ContainsQuicTag(config.ReceivedConnectionOptions(), kNPRR)) {
+ // Use unity pacing instead of PRR.
+ no_prr_ = true;
+ }
+ }
+}
+
+void TcpCubicSenderBytes::AdjustNetworkParameters(QuicBandwidth bandwidth,
+ QuicTime::Delta rtt) {
+ if (bandwidth.IsZero() || rtt.IsZero()) {
+ return;
+ }
+
+ SetCongestionWindowFromBandwidthAndRtt(bandwidth, rtt);
+}
+
+float TcpCubicSenderBytes::RenoBeta() const {
+ // kNConnectionBeta is the backoff factor after loss for our N-connection
+ // emulation, which emulates the effective backoff of an ensemble of N
+ // TCP-Reno connections on a single loss event. The effective multiplier is
+ // computed as:
+ return (num_connections_ - 1 + kRenoBeta) / num_connections_;
+}
+
+void TcpCubicSenderBytes::OnCongestionEvent(
+ bool rtt_updated,
+ QuicByteCount prior_in_flight,
+ QuicTime event_time,
+ const AckedPacketVector& acked_packets,
+ const LostPacketVector& lost_packets) {
+ if (rtt_updated && InSlowStart() &&
+ hybrid_slow_start_.ShouldExitSlowStart(
+ rtt_stats_->latest_rtt(), rtt_stats_->min_rtt(),
+ GetCongestionWindow() / kDefaultTCPMSS)) {
+ ExitSlowstart();
+ }
+ for (const LostPacket& lost_packet : lost_packets) {
+ OnPacketLost(lost_packet.packet_number, lost_packet.bytes_lost,
+ prior_in_flight);
+ }
+ for (const AckedPacket acked_packet : acked_packets) {
+ OnPacketAcked(acked_packet.packet_number, acked_packet.bytes_acked,
+ prior_in_flight, event_time);
+ }
+}
+
+void TcpCubicSenderBytes::OnPacketAcked(QuicPacketNumber acked_packet_number,
+ QuicByteCount acked_bytes,
+ QuicByteCount prior_in_flight,
+ QuicTime event_time) {
+ if (largest_acked_packet_number_.IsInitialized()) {
+ largest_acked_packet_number_ =
+ std::max(acked_packet_number, largest_acked_packet_number_);
+ } else {
+ largest_acked_packet_number_ = acked_packet_number;
+ }
+ if (InRecovery()) {
+ if (!no_prr_) {
+ // PRR is used when in recovery.
+ prr_.OnPacketAcked(acked_bytes);
+ }
+ return;
+ }
+ MaybeIncreaseCwnd(acked_packet_number, acked_bytes, prior_in_flight,
+ event_time);
+ if (InSlowStart()) {
+ hybrid_slow_start_.OnPacketAcked(acked_packet_number);
+ }
+}
+
+void TcpCubicSenderBytes::OnPacketSent(
+ QuicTime /*sent_time*/,
+ QuicByteCount /*bytes_in_flight*/,
+ QuicPacketNumber packet_number,
+ QuicByteCount bytes,
+ HasRetransmittableData is_retransmittable) {
+ if (InSlowStart()) {
+ ++(stats_->slowstart_packets_sent);
+ }
+
+ if (is_retransmittable != HAS_RETRANSMITTABLE_DATA) {
+ return;
+ }
+ if (InRecovery()) {
+ // PRR is used when in recovery.
+ prr_.OnPacketSent(bytes);
+ }
+ DCHECK(!largest_sent_packet_number_.IsInitialized() ||
+ largest_sent_packet_number_ < packet_number);
+ largest_sent_packet_number_ = packet_number;
+ hybrid_slow_start_.OnPacketSent(packet_number);
+}
+
+bool TcpCubicSenderBytes::CanSend(QuicByteCount bytes_in_flight) {
+ if (!no_prr_ && InRecovery()) {
+ // PRR is used when in recovery.
+ return prr_.CanSend(GetCongestionWindow(), bytes_in_flight,
+ GetSlowStartThreshold());
+ }
+ if (GetCongestionWindow() > bytes_in_flight) {
+ return true;
+ }
+ if (min4_mode_ && bytes_in_flight < 4 * kDefaultTCPMSS) {
+ return true;
+ }
+ return false;
+}
+
+QuicBandwidth TcpCubicSenderBytes::PacingRate(
+ QuicByteCount /* bytes_in_flight */) const {
+ // We pace at twice the rate of the underlying sender's bandwidth estimate
+ // during slow start and 1.25x during congestion avoidance to ensure pacing
+ // doesn't prevent us from filling the window.
+ QuicTime::Delta srtt = rtt_stats_->SmoothedOrInitialRtt();
+ const QuicBandwidth bandwidth =
+ QuicBandwidth::FromBytesAndTimeDelta(GetCongestionWindow(), srtt);
+ return bandwidth * (InSlowStart() ? 2 : (no_prr_ && InRecovery() ? 1 : 1.25));
+}
+
+QuicBandwidth TcpCubicSenderBytes::BandwidthEstimate() const {
+ QuicTime::Delta srtt = rtt_stats_->smoothed_rtt();
+ if (srtt.IsZero()) {
+ // If we haven't measured an rtt, the bandwidth estimate is unknown.
+ return QuicBandwidth::Zero();
+ }
+ return QuicBandwidth::FromBytesAndTimeDelta(GetCongestionWindow(), srtt);
+}
+
+bool TcpCubicSenderBytes::InSlowStart() const {
+ return GetCongestionWindow() < GetSlowStartThreshold();
+}
+
+bool TcpCubicSenderBytes::IsCwndLimited(QuicByteCount bytes_in_flight) const {
+ const QuicByteCount congestion_window = GetCongestionWindow();
+ if (bytes_in_flight >= congestion_window) {
+ return true;
+ }
+ const QuicByteCount available_bytes = congestion_window - bytes_in_flight;
+ const bool slow_start_limited =
+ InSlowStart() && bytes_in_flight > congestion_window / 2;
+ return slow_start_limited || available_bytes <= kMaxBurstBytes;
+}
+
+bool TcpCubicSenderBytes::InRecovery() const {
+ return largest_acked_packet_number_.IsInitialized() &&
+ largest_sent_at_last_cutback_.IsInitialized() &&
+ largest_acked_packet_number_ <= largest_sent_at_last_cutback_;
+}
+
+bool TcpCubicSenderBytes::ShouldSendProbingPacket() const {
+ return false;
+}
+
+void TcpCubicSenderBytes::OnRetransmissionTimeout(bool packets_retransmitted) {
+ largest_sent_at_last_cutback_.Clear();
+ if (!packets_retransmitted) {
+ return;
+ }
+ hybrid_slow_start_.Restart();
+ HandleRetransmissionTimeout();
+}
+
+QuicString TcpCubicSenderBytes::GetDebugState() const {
+ return "";
+}
+
+void TcpCubicSenderBytes::OnApplicationLimited(QuicByteCount bytes_in_flight) {}
+
+void TcpCubicSenderBytes::SetCongestionWindowFromBandwidthAndRtt(
+ QuicBandwidth bandwidth,
+ QuicTime::Delta rtt) {
+ QuicByteCount new_congestion_window = bandwidth.ToBytesPerPeriod(rtt);
+ // Limit new CWND if needed.
+ congestion_window_ =
+ std::max(min_congestion_window_,
+ std::min(new_congestion_window,
+ kMaxResumptionCongestionWindow * kDefaultTCPMSS));
+}
+
+void TcpCubicSenderBytes::SetInitialCongestionWindowInPackets(
+ QuicPacketCount congestion_window) {
+ congestion_window_ = congestion_window * kDefaultTCPMSS;
+}
+
+void TcpCubicSenderBytes::SetMinCongestionWindowInPackets(
+ QuicPacketCount congestion_window) {
+ min_congestion_window_ = congestion_window * kDefaultTCPMSS;
+}
+
+void TcpCubicSenderBytes::SetNumEmulatedConnections(int num_connections) {
+ num_connections_ = std::max(1, num_connections);
+ cubic_.SetNumConnections(num_connections_);
+}
+
+void TcpCubicSenderBytes::ExitSlowstart() {
+ slowstart_threshold_ = congestion_window_;
+}
+
+void TcpCubicSenderBytes::OnPacketLost(QuicPacketNumber packet_number,
+ QuicByteCount lost_bytes,
+ QuicByteCount prior_in_flight) {
+ // TCP NewReno (RFC6582) says that once a loss occurs, any losses in packets
+ // already sent should be treated as a single loss event, since it's expected.
+ if (largest_sent_at_last_cutback_.IsInitialized() &&
+ packet_number <= largest_sent_at_last_cutback_) {
+ if (last_cutback_exited_slowstart_) {
+ ++stats_->slowstart_packets_lost;
+ stats_->slowstart_bytes_lost += lost_bytes;
+ if (slow_start_large_reduction_) {
+ // Reduce congestion window by lost_bytes for every loss.
+ congestion_window_ = std::max(congestion_window_ - lost_bytes,
+ min_slow_start_exit_window_);
+ slowstart_threshold_ = congestion_window_;
+ }
+ }
+ QUIC_DVLOG(1) << "Ignoring loss for largest_missing:" << packet_number
+ << " because it was sent prior to the last CWND cutback.";
+ return;
+ }
+ ++stats_->tcp_loss_events;
+ last_cutback_exited_slowstart_ = InSlowStart();
+ if (InSlowStart()) {
+ ++stats_->slowstart_packets_lost;
+ }
+
+ if (!no_prr_) {
+ prr_.OnPacketLost(prior_in_flight);
+ }
+
+ // TODO(b/77268641): Separate out all of slow start into a separate class.
+ if (slow_start_large_reduction_ && InSlowStart()) {
+ DCHECK_LT(kDefaultTCPMSS, congestion_window_);
+ if (congestion_window_ >= 2 * initial_tcp_congestion_window_) {
+ min_slow_start_exit_window_ = congestion_window_ / 2;
+ }
+ congestion_window_ = congestion_window_ - kDefaultTCPMSS;
+ } else if (reno_) {
+ congestion_window_ = congestion_window_ * RenoBeta();
+ } else {
+ congestion_window_ =
+ cubic_.CongestionWindowAfterPacketLoss(congestion_window_);
+ }
+ if (congestion_window_ < min_congestion_window_) {
+ congestion_window_ = min_congestion_window_;
+ }
+ slowstart_threshold_ = congestion_window_;
+ largest_sent_at_last_cutback_ = largest_sent_packet_number_;
+ // Reset packet count from congestion avoidance mode. We start counting again
+ // when we're out of recovery.
+ num_acked_packets_ = 0;
+ QUIC_DVLOG(1) << "Incoming loss; congestion window: " << congestion_window_
+ << " slowstart threshold: " << slowstart_threshold_;
+}
+
+QuicByteCount TcpCubicSenderBytes::GetCongestionWindow() const {
+ return congestion_window_;
+}
+
+QuicByteCount TcpCubicSenderBytes::GetSlowStartThreshold() const {
+ return slowstart_threshold_;
+}
+
+// Called when we receive an ack. Normal TCP tracks how many packets one ack
+// represents, but quic has a separate ack for each packet.
+void TcpCubicSenderBytes::MaybeIncreaseCwnd(
+ QuicPacketNumber acked_packet_number,
+ QuicByteCount acked_bytes,
+ QuicByteCount prior_in_flight,
+ QuicTime event_time) {
+ QUIC_BUG_IF(InRecovery()) << "Never increase the CWND during recovery.";
+ // Do not increase the congestion window unless the sender is close to using
+ // the current window.
+ if (!IsCwndLimited(prior_in_flight)) {
+ cubic_.OnApplicationLimited();
+ return;
+ }
+ if (congestion_window_ >= max_congestion_window_) {
+ return;
+ }
+ if (InSlowStart()) {
+ // TCP slow start, exponential growth, increase by one for each ACK.
+ congestion_window_ += kDefaultTCPMSS;
+ QUIC_DVLOG(1) << "Slow start; congestion window: " << congestion_window_
+ << " slowstart threshold: " << slowstart_threshold_;
+ return;
+ }
+ // Congestion avoidance.
+ if (reno_) {
+ // Classic Reno congestion avoidance.
+ ++num_acked_packets_;
+ // Divide by num_connections to smoothly increase the CWND at a faster rate
+ // than conventional Reno.
+ if (num_acked_packets_ * num_connections_ >=
+ congestion_window_ / kDefaultTCPMSS) {
+ congestion_window_ += kDefaultTCPMSS;
+ num_acked_packets_ = 0;
+ }
+
+ QUIC_DVLOG(1) << "Reno; congestion window: " << congestion_window_
+ << " slowstart threshold: " << slowstart_threshold_
+ << " congestion window count: " << num_acked_packets_;
+ } else {
+ congestion_window_ = std::min(
+ max_congestion_window_,
+ cubic_.CongestionWindowAfterAck(acked_bytes, congestion_window_,
+ rtt_stats_->min_rtt(), event_time));
+ QUIC_DVLOG(1) << "Cubic; congestion window: " << congestion_window_
+ << " slowstart threshold: " << slowstart_threshold_;
+ }
+}
+
+void TcpCubicSenderBytes::HandleRetransmissionTimeout() {
+ cubic_.ResetCubicState();
+ slowstart_threshold_ = congestion_window_ / 2;
+ congestion_window_ = min_congestion_window_;
+}
+
+void TcpCubicSenderBytes::OnConnectionMigration() {
+ hybrid_slow_start_.Restart();
+ prr_ = PrrSender();
+ largest_sent_packet_number_.Clear();
+ largest_acked_packet_number_.Clear();
+ largest_sent_at_last_cutback_.Clear();
+ last_cutback_exited_slowstart_ = false;
+ cubic_.ResetCubicState();
+ num_acked_packets_ = 0;
+ congestion_window_ = initial_tcp_congestion_window_;
+ max_congestion_window_ = initial_max_tcp_congestion_window_;
+ slowstart_threshold_ = initial_max_tcp_congestion_window_;
+}
+
+CongestionControlType TcpCubicSenderBytes::GetCongestionControlType() const {
+ return reno_ ? kRenoBytes : kCubicBytes;
+}
+
+} // namespace quic
diff --git a/quic/core/congestion_control/tcp_cubic_sender_bytes.h b/quic/core/congestion_control/tcp_cubic_sender_bytes.h
new file mode 100644
index 0000000..0ecb6f4
--- /dev/null
+++ b/quic/core/congestion_control/tcp_cubic_sender_bytes.h
@@ -0,0 +1,173 @@
+// Copyright (c) 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// TCP cubic send side congestion algorithm, emulates the behavior of TCP cubic.
+
+#ifndef QUICHE_QUIC_CORE_CONGESTION_CONTROL_TCP_CUBIC_SENDER_BYTES_H_
+#define QUICHE_QUIC_CORE_CONGESTION_CONTROL_TCP_CUBIC_SENDER_BYTES_H_
+
+#include <cstdint>
+
+#include "base/macros.h"
+#include "net/third_party/quiche/src/quic/core/congestion_control/cubic_bytes.h"
+#include "net/third_party/quiche/src/quic/core/congestion_control/hybrid_slow_start.h"
+#include "net/third_party/quiche/src/quic/core/congestion_control/prr_sender.h"
+#include "net/third_party/quiche/src/quic/core/congestion_control/send_algorithm_interface.h"
+#include "net/third_party/quiche/src/quic/core/quic_bandwidth.h"
+#include "net/third_party/quiche/src/quic/core/quic_connection_stats.h"
+#include "net/third_party/quiche/src/quic/core/quic_packets.h"
+#include "net/third_party/quiche/src/quic/core/quic_time.h"
+#include "net/third_party/quiche/src/quic/platform/api/quic_export.h"
+#include "net/third_party/quiche/src/quic/platform/api/quic_string.h"
+
+namespace quic {
+
+class RttStats;
+
+// Maximum window to allow when doing bandwidth resumption.
+const QuicPacketCount kMaxResumptionCongestionWindow = 200;
+
+namespace test {
+class TcpCubicSenderBytesPeer;
+} // namespace test
+
+class QUIC_EXPORT_PRIVATE TcpCubicSenderBytes : public SendAlgorithmInterface {
+ public:
+ TcpCubicSenderBytes(const QuicClock* clock,
+ const RttStats* rtt_stats,
+ bool reno,
+ QuicPacketCount initial_tcp_congestion_window,
+ QuicPacketCount max_congestion_window,
+ QuicConnectionStats* stats);
+ TcpCubicSenderBytes(const TcpCubicSenderBytes&) = delete;
+ TcpCubicSenderBytes& operator=(const TcpCubicSenderBytes&) = delete;
+ ~TcpCubicSenderBytes() override;
+
+ // Start implementation of SendAlgorithmInterface.
+ void SetFromConfig(const QuicConfig& config,
+ Perspective perspective) override;
+ void AdjustNetworkParameters(QuicBandwidth bandwidth,
+ QuicTime::Delta rtt) override;
+ void SetNumEmulatedConnections(int num_connections) override;
+ void SetInitialCongestionWindowInPackets(
+ QuicPacketCount congestion_window) override;
+ void OnConnectionMigration() override;
+ void OnCongestionEvent(bool rtt_updated,
+ QuicByteCount prior_in_flight,
+ QuicTime event_time,
+ const AckedPacketVector& acked_packets,
+ const LostPacketVector& lost_packets) override;
+ void OnPacketSent(QuicTime sent_time,
+ QuicByteCount bytes_in_flight,
+ QuicPacketNumber packet_number,
+ QuicByteCount bytes,
+ HasRetransmittableData is_retransmittable) override;
+ void OnRetransmissionTimeout(bool packets_retransmitted) override;
+ bool CanSend(QuicByteCount bytes_in_flight) override;
+ QuicBandwidth PacingRate(QuicByteCount bytes_in_flight) const override;
+ QuicBandwidth BandwidthEstimate() const override;
+ QuicByteCount GetCongestionWindow() const override;
+ QuicByteCount GetSlowStartThreshold() const override;
+ CongestionControlType GetCongestionControlType() const override;
+ bool InSlowStart() const override;
+ bool InRecovery() const override;
+ bool ShouldSendProbingPacket() const override;
+ QuicString GetDebugState() const override;
+ void OnApplicationLimited(QuicByteCount bytes_in_flight) override;
+ // End implementation of SendAlgorithmInterface.
+
+ QuicByteCount min_congestion_window() const { return min_congestion_window_; }
+
+ protected:
+ // Compute the TCP Reno beta based on the current number of connections.
+ float RenoBeta() const;
+
+ bool IsCwndLimited(QuicByteCount bytes_in_flight) const;
+
+ // TODO(ianswett): Remove these and migrate to OnCongestionEvent.
+ void OnPacketAcked(QuicPacketNumber acked_packet_number,
+ QuicByteCount acked_bytes,
+ QuicByteCount prior_in_flight,
+ QuicTime event_time);
+ void SetCongestionWindowFromBandwidthAndRtt(QuicBandwidth bandwidth,
+ QuicTime::Delta rtt);
+ void SetMinCongestionWindowInPackets(QuicPacketCount congestion_window);
+ void ExitSlowstart();
+ void OnPacketLost(QuicPacketNumber largest_loss,
+ QuicByteCount lost_bytes,
+ QuicByteCount prior_in_flight);
+ void MaybeIncreaseCwnd(QuicPacketNumber acked_packet_number,
+ QuicByteCount acked_bytes,
+ QuicByteCount prior_in_flight,
+ QuicTime event_time);
+ void HandleRetransmissionTimeout();
+
+ private:
+ friend class test::TcpCubicSenderBytesPeer;
+
+ HybridSlowStart hybrid_slow_start_;
+ PrrSender prr_;
+ const RttStats* rtt_stats_;
+ QuicConnectionStats* stats_;
+
+ // If true, Reno congestion control is used instead of Cubic.
+ const bool reno_;
+
+ // Number of connections to simulate.
+ uint32_t num_connections_;
+
+ // Track the largest packet that has been sent.
+ QuicPacketNumber largest_sent_packet_number_;
+
+ // Track the largest packet that has been acked.
+ QuicPacketNumber largest_acked_packet_number_;
+
+ // Track the largest packet number outstanding when a CWND cutback occurs.
+ QuicPacketNumber largest_sent_at_last_cutback_;
+
+ // Whether to use 4 packets as the actual min, but pace lower.
+ bool min4_mode_;
+
+ // Whether the last loss event caused us to exit slowstart.
+ // Used for stats collection of slowstart_packets_lost
+ bool last_cutback_exited_slowstart_;
+
+ // When true, exit slow start with large cutback of congestion window.
+ bool slow_start_large_reduction_;
+
+ // When true, use unity pacing instead of PRR.
+ bool no_prr_;
+
+ CubicBytes cubic_;
+
+ // ACK counter for the Reno implementation.
+ uint64_t num_acked_packets_;
+
+ // Congestion window in bytes.
+ QuicByteCount congestion_window_;
+
+ // Minimum congestion window in bytes.
+ QuicByteCount min_congestion_window_;
+
+ // Maximum congestion window in bytes.
+ QuicByteCount max_congestion_window_;
+
+ // Slow start congestion window in bytes, aka ssthresh.
+ QuicByteCount slowstart_threshold_;
+
+ // Initial TCP congestion window in bytes. This variable can only be set when
+ // this algorithm is created.
+ const QuicByteCount initial_tcp_congestion_window_;
+
+ // Initial maximum TCP congestion window in bytes. This variable can only be
+ // set when this algorithm is created.
+ const QuicByteCount initial_max_tcp_congestion_window_;
+
+ // The minimum window when exiting slow start with large reduction.
+ QuicByteCount min_slow_start_exit_window_;
+};
+
+} // namespace quic
+
+#endif // QUICHE_QUIC_CORE_CONGESTION_CONTROL_TCP_CUBIC_SENDER_BYTES_H_
diff --git a/quic/core/congestion_control/tcp_cubic_sender_bytes_test.cc b/quic/core/congestion_control/tcp_cubic_sender_bytes_test.cc
new file mode 100644
index 0000000..5eb6226
--- /dev/null
+++ b/quic/core/congestion_control/tcp_cubic_sender_bytes_test.cc
@@ -0,0 +1,834 @@
+// Copyright (c) 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/third_party/quiche/src/quic/core/congestion_control/tcp_cubic_sender_bytes.h"
+
+#include <algorithm>
+#include <cstdint>
+#include <memory>
+
+#include "net/third_party/quiche/src/quic/core/congestion_control/rtt_stats.h"
+#include "net/third_party/quiche/src/quic/core/congestion_control/send_algorithm_interface.h"
+#include "net/third_party/quiche/src/quic/core/crypto/crypto_protocol.h"
+#include "net/third_party/quiche/src/quic/core/quic_packets.h"
+#include "net/third_party/quiche/src/quic/core/quic_utils.h"
+#include "net/third_party/quiche/src/quic/platform/api/quic_logging.h"
+#include "net/third_party/quiche/src/quic/platform/api/quic_ptr_util.h"
+#include "net/third_party/quiche/src/quic/platform/api/quic_test.h"
+#include "net/third_party/quiche/src/quic/test_tools/mock_clock.h"
+#include "net/third_party/quiche/src/quic/test_tools/quic_config_peer.h"
+
+namespace quic {
+namespace test {
+
+// TODO(ianswett): A number of theses tests were written with the assumption of
+// an initial CWND of 10. They have carefully calculated values which should be
+// updated to be based on kInitialCongestionWindow.
+const uint32_t kInitialCongestionWindowPackets = 10;
+const uint32_t kMaxCongestionWindowPackets = 200;
+const uint32_t kDefaultWindowTCP =
+ kInitialCongestionWindowPackets * kDefaultTCPMSS;
+const float kRenoBeta = 0.7f; // Reno backoff factor.
+
+class TcpCubicSenderBytesPeer : public TcpCubicSenderBytes {
+ public:
+ TcpCubicSenderBytesPeer(const QuicClock* clock, bool reno)
+ : TcpCubicSenderBytes(clock,
+ &rtt_stats_,
+ reno,
+ kInitialCongestionWindowPackets,
+ kMaxCongestionWindowPackets,
+ &stats_) {}
+
+ const HybridSlowStart& hybrid_slow_start() const {
+ return hybrid_slow_start_;
+ }
+
+ float GetRenoBeta() const { return RenoBeta(); }
+
+ RttStats rtt_stats_;
+ QuicConnectionStats stats_;
+};
+
+class TcpCubicSenderBytesTest : public QuicTest {
+ protected:
+ TcpCubicSenderBytesTest()
+ : one_ms_(QuicTime::Delta::FromMilliseconds(1)),
+ sender_(new TcpCubicSenderBytesPeer(&clock_, true)),
+ packet_number_(1),
+ acked_packet_number_(0),
+ bytes_in_flight_(0) {}
+
+ int SendAvailableSendWindow() {
+ return SendAvailableSendWindow(kDefaultTCPMSS);
+ }
+
+ int SendAvailableSendWindow(QuicPacketLength packet_length) {
+ // Send as long as TimeUntilSend returns Zero.
+ int packets_sent = 0;
+ bool can_send = sender_->CanSend(bytes_in_flight_);
+ while (can_send) {
+ sender_->OnPacketSent(clock_.Now(), bytes_in_flight_,
+ QuicPacketNumber(packet_number_++), kDefaultTCPMSS,
+ HAS_RETRANSMITTABLE_DATA);
+ ++packets_sent;
+ bytes_in_flight_ += kDefaultTCPMSS;
+ can_send = sender_->CanSend(bytes_in_flight_);
+ }
+ return packets_sent;
+ }
+
+ // Normal is that TCP acks every other segment.
+ void AckNPackets(int n) {
+ sender_->rtt_stats_.UpdateRtt(QuicTime::Delta::FromMilliseconds(60),
+ QuicTime::Delta::Zero(), clock_.Now());
+ AckedPacketVector acked_packets;
+ LostPacketVector lost_packets;
+ for (int i = 0; i < n; ++i) {
+ ++acked_packet_number_;
+ acked_packets.push_back(
+ AckedPacket(QuicPacketNumber(acked_packet_number_), kDefaultTCPMSS,
+ QuicTime::Zero()));
+ }
+ sender_->OnCongestionEvent(true, bytes_in_flight_, clock_.Now(),
+ acked_packets, lost_packets);
+ bytes_in_flight_ -= n * kDefaultTCPMSS;
+ clock_.AdvanceTime(one_ms_);
+ }
+
+ void LoseNPackets(int n) { LoseNPackets(n, kDefaultTCPMSS); }
+
+ void LoseNPackets(int n, QuicPacketLength packet_length) {
+ AckedPacketVector acked_packets;
+ LostPacketVector lost_packets;
+ for (int i = 0; i < n; ++i) {
+ ++acked_packet_number_;
+ lost_packets.push_back(
+ LostPacket(QuicPacketNumber(acked_packet_number_), packet_length));
+ }
+ sender_->OnCongestionEvent(false, bytes_in_flight_, clock_.Now(),
+ acked_packets, lost_packets);
+ bytes_in_flight_ -= n * packet_length;
+ }
+
+ // Does not increment acked_packet_number_.
+ void LosePacket(uint64_t packet_number) {
+ AckedPacketVector acked_packets;
+ LostPacketVector lost_packets;
+ lost_packets.push_back(
+ LostPacket(QuicPacketNumber(packet_number), kDefaultTCPMSS));
+ sender_->OnCongestionEvent(false, bytes_in_flight_, clock_.Now(),
+ acked_packets, lost_packets);
+ bytes_in_flight_ -= kDefaultTCPMSS;
+ }
+
+ const QuicTime::Delta one_ms_;
+ MockClock clock_;
+ std::unique_ptr<TcpCubicSenderBytesPeer> sender_;
+ uint64_t packet_number_;
+ uint64_t acked_packet_number_;
+ QuicByteCount bytes_in_flight_;
+};
+
+TEST_F(TcpCubicSenderBytesTest, SimpleSender) {
+ // At startup make sure we are at the default.
+ EXPECT_EQ(kDefaultWindowTCP, sender_->GetCongestionWindow());
+ // At startup make sure we can send.
+ EXPECT_TRUE(sender_->CanSend(0));
+ // Make sure we can send.
+ EXPECT_TRUE(sender_->CanSend(0));
+ // And that window is un-affected.
+ EXPECT_EQ(kDefaultWindowTCP, sender_->GetCongestionWindow());
+
+ // Fill the send window with data, then verify that we can't send.
+ SendAvailableSendWindow();
+ EXPECT_FALSE(sender_->CanSend(sender_->GetCongestionWindow()));
+}
+
+TEST_F(TcpCubicSenderBytesTest, ApplicationLimitedSlowStart) {
+ // Send exactly 10 packets and ensure the CWND ends at 14 packets.
+ const int kNumberOfAcks = 5;
+ // At startup make sure we can send.
+ EXPECT_TRUE(sender_->CanSend(0));
+ // Make sure we can send.
+ EXPECT_TRUE(sender_->CanSend(0));
+
+ SendAvailableSendWindow();
+ for (int i = 0; i < kNumberOfAcks; ++i) {
+ AckNPackets(2);
+ }
+ QuicByteCount bytes_to_send = sender_->GetCongestionWindow();
+ // It's expected 2 acks will arrive when the bytes_in_flight are greater than
+ // half the CWND.
+ EXPECT_EQ(kDefaultWindowTCP + kDefaultTCPMSS * 2 * 2, bytes_to_send);
+}
+
+TEST_F(TcpCubicSenderBytesTest, ExponentialSlowStart) {
+ const int kNumberOfAcks = 20;
+ // At startup make sure we can send.
+ EXPECT_TRUE(sender_->CanSend(0));
+ EXPECT_EQ(QuicBandwidth::Zero(), sender_->BandwidthEstimate());
+ // Make sure we can send.
+ EXPECT_TRUE(sender_->CanSend(0));
+
+ for (int i = 0; i < kNumberOfAcks; ++i) {
+ // Send our full send window.
+ SendAvailableSendWindow();
+ AckNPackets(2);
+ }
+ const QuicByteCount cwnd = sender_->GetCongestionWindow();
+ EXPECT_EQ(kDefaultWindowTCP + kDefaultTCPMSS * 2 * kNumberOfAcks, cwnd);
+ EXPECT_EQ(QuicBandwidth::FromBytesAndTimeDelta(
+ cwnd, sender_->rtt_stats_.smoothed_rtt()),
+ sender_->BandwidthEstimate());
+}
+
+TEST_F(TcpCubicSenderBytesTest, SlowStartPacketLoss) {
+ sender_->SetNumEmulatedConnections(1);
+ const int kNumberOfAcks = 10;
+ for (int i = 0; i < kNumberOfAcks; ++i) {
+ // Send our full send window.
+ SendAvailableSendWindow();
+ AckNPackets(2);
+ }
+ SendAvailableSendWindow();
+ QuicByteCount expected_send_window =
+ kDefaultWindowTCP + (kDefaultTCPMSS * 2 * kNumberOfAcks);
+ EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
+
+ // Lose a packet to exit slow start.
+ LoseNPackets(1);
+ size_t packets_in_recovery_window = expected_send_window / kDefaultTCPMSS;
+
+ // We should now have fallen out of slow start with a reduced window.
+ expected_send_window *= kRenoBeta;
+ EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
+
+ // Recovery phase. We need to ack every packet in the recovery window before
+ // we exit recovery.
+ size_t number_of_packets_in_window = expected_send_window / kDefaultTCPMSS;
+ QUIC_DLOG(INFO) << "number_packets: " << number_of_packets_in_window;
+ AckNPackets(packets_in_recovery_window);
+ SendAvailableSendWindow();
+ EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
+
+ // We need to ack an entire window before we increase CWND by 1.
+ AckNPackets(number_of_packets_in_window - 2);
+ SendAvailableSendWindow();
+ EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
+
+ // Next ack should increase cwnd by 1.
+ AckNPackets(1);
+ expected_send_window += kDefaultTCPMSS;
+ EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
+
+ // Now RTO and ensure slow start gets reset.
+ EXPECT_TRUE(sender_->hybrid_slow_start().started());
+ sender_->OnRetransmissionTimeout(true);
+ EXPECT_FALSE(sender_->hybrid_slow_start().started());
+}
+
+TEST_F(TcpCubicSenderBytesTest, SlowStartPacketLossWithLargeReduction) {
+ QuicConfig config;
+ QuicTagVector options;
+ options.push_back(kSSLR);
+ QuicConfigPeer::SetReceivedConnectionOptions(&config, options);
+ sender_->SetFromConfig(config, Perspective::IS_SERVER);
+
+ sender_->SetNumEmulatedConnections(1);
+ const int kNumberOfAcks = (kDefaultWindowTCP / (2 * kDefaultTCPMSS)) - 1;
+ for (int i = 0; i < kNumberOfAcks; ++i) {
+ // Send our full send window.
+ SendAvailableSendWindow();
+ AckNPackets(2);
+ }
+ SendAvailableSendWindow();
+ QuicByteCount expected_send_window =
+ kDefaultWindowTCP + (kDefaultTCPMSS * 2 * kNumberOfAcks);
+ EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
+
+ // Lose a packet to exit slow start. We should now have fallen out of
+ // slow start with a window reduced by 1.
+ LoseNPackets(1);
+ expected_send_window -= kDefaultTCPMSS;
+ EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
+
+ // Lose 5 packets in recovery and verify that congestion window is reduced
+ // further.
+ LoseNPackets(5);
+ expected_send_window -= 5 * kDefaultTCPMSS;
+ EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
+ // Lose another 10 packets and ensure it reduces below half the peak CWND,
+ // because we never acked the full IW.
+ LoseNPackets(10);
+ expected_send_window -= 10 * kDefaultTCPMSS;
+ EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
+
+ size_t packets_in_recovery_window = expected_send_window / kDefaultTCPMSS;
+
+ // Recovery phase. We need to ack every packet in the recovery window before
+ // we exit recovery.
+ size_t number_of_packets_in_window = expected_send_window / kDefaultTCPMSS;
+ QUIC_DLOG(INFO) << "number_packets: " << number_of_packets_in_window;
+ AckNPackets(packets_in_recovery_window);
+ SendAvailableSendWindow();
+ EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
+
+ // We need to ack an entire window before we increase CWND by 1.
+ AckNPackets(number_of_packets_in_window - 1);
+ SendAvailableSendWindow();
+ EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
+
+ // Next ack should increase cwnd by 1.
+ AckNPackets(1);
+ expected_send_window += kDefaultTCPMSS;
+ EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
+
+ // Now RTO and ensure slow start gets reset.
+ EXPECT_TRUE(sender_->hybrid_slow_start().started());
+ sender_->OnRetransmissionTimeout(true);
+ EXPECT_FALSE(sender_->hybrid_slow_start().started());
+}
+
+TEST_F(TcpCubicSenderBytesTest, SlowStartHalfPacketLossWithLargeReduction) {
+ QuicConfig config;
+ QuicTagVector options;
+ options.push_back(kSSLR);
+ QuicConfigPeer::SetReceivedConnectionOptions(&config, options);
+ sender_->SetFromConfig(config, Perspective::IS_SERVER);
+
+ sender_->SetNumEmulatedConnections(1);
+ const int kNumberOfAcks = 10;
+ for (int i = 0; i < kNumberOfAcks; ++i) {
+ // Send our full send window in half sized packets.
+ SendAvailableSendWindow(kDefaultTCPMSS / 2);
+ AckNPackets(2);
+ }
+ SendAvailableSendWindow(kDefaultTCPMSS / 2);
+ QuicByteCount expected_send_window =
+ kDefaultWindowTCP + (kDefaultTCPMSS * 2 * kNumberOfAcks);
+ EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
+
+ // Lose a packet to exit slow start. We should now have fallen out of
+ // slow start with a window reduced by 1.
+ LoseNPackets(1);
+ expected_send_window -= kDefaultTCPMSS;
+ EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
+
+ // Lose 10 packets in recovery and verify that congestion window is reduced
+ // by 5 packets.
+ LoseNPackets(10, kDefaultTCPMSS / 2);
+ expected_send_window -= 5 * kDefaultTCPMSS;
+ EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
+}
+
+TEST_F(TcpCubicSenderBytesTest, SlowStartPacketLossWithMaxHalfReduction) {
+ QuicConfig config;
+ QuicTagVector options;
+ options.push_back(kSSLR);
+ QuicConfigPeer::SetReceivedConnectionOptions(&config, options);
+ sender_->SetFromConfig(config, Perspective::IS_SERVER);
+
+ sender_->SetNumEmulatedConnections(1);
+ const int kNumberOfAcks = kInitialCongestionWindowPackets / 2;
+ for (int i = 0; i < kNumberOfAcks; ++i) {
+ // Send our full send window.
+ SendAvailableSendWindow();
+ AckNPackets(2);
+ }
+ SendAvailableSendWindow();
+ QuicByteCount expected_send_window =
+ kDefaultWindowTCP + (kDefaultTCPMSS * 2 * kNumberOfAcks);
+ EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
+
+ // Lose a packet to exit slow start. We should now have fallen out of
+ // slow start with a window reduced by 1.
+ LoseNPackets(1);
+ expected_send_window -= kDefaultTCPMSS;
+ EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
+
+ // Lose half the outstanding packets in recovery and verify the congestion
+ // window is only reduced by a max of half.
+ LoseNPackets(kNumberOfAcks * 2);
+ expected_send_window -= (kNumberOfAcks * 2 - 1) * kDefaultTCPMSS;
+ EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
+ LoseNPackets(5);
+ EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
+}
+
+TEST_F(TcpCubicSenderBytesTest, NoPRRWhenLessThanOnePacketInFlight) {
+ SendAvailableSendWindow();
+ LoseNPackets(kInitialCongestionWindowPackets - 1);
+ AckNPackets(1);
+ // PRR will allow 2 packets for every ack during recovery.
+ EXPECT_EQ(2, SendAvailableSendWindow());
+ // Simulate abandoning all packets by supplying a bytes_in_flight of 0.
+ // PRR should now allow a packet to be sent, even though prr's state variables
+ // believe it has sent enough packets.
+ EXPECT_TRUE(sender_->CanSend(0));
+}
+
+TEST_F(TcpCubicSenderBytesTest, SlowStartPacketLossPRR) {
+ sender_->SetNumEmulatedConnections(1);
+ // Test based on the first example in RFC6937.
+ // Ack 10 packets in 5 acks to raise the CWND to 20, as in the example.
+ const int kNumberOfAcks = 5;
+ for (int i = 0; i < kNumberOfAcks; ++i) {
+ // Send our full send window.
+ SendAvailableSendWindow();
+ AckNPackets(2);
+ }
+ SendAvailableSendWindow();
+ QuicByteCount expected_send_window =
+ kDefaultWindowTCP + (kDefaultTCPMSS * 2 * kNumberOfAcks);
+ EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
+
+ LoseNPackets(1);
+
+ // We should now have fallen out of slow start with a reduced window.
+ size_t send_window_before_loss = expected_send_window;
+ expected_send_window *= kRenoBeta;
+ EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
+
+ // Testing TCP proportional rate reduction.
+ // We should send packets paced over the received acks for the remaining
+ // outstanding packets. The number of packets before we exit recovery is the
+ // original CWND minus the packet that has been lost and the one which
+ // triggered the loss.
+ size_t remaining_packets_in_recovery =
+ send_window_before_loss / kDefaultTCPMSS - 2;
+
+ for (size_t i = 0; i < remaining_packets_in_recovery; ++i) {
+ AckNPackets(1);
+ SendAvailableSendWindow();
+ EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
+ }
+
+ // We need to ack another window before we increase CWND by 1.
+ size_t number_of_packets_in_window = expected_send_window / kDefaultTCPMSS;
+ for (size_t i = 0; i < number_of_packets_in_window; ++i) {
+ AckNPackets(1);
+ EXPECT_EQ(1, SendAvailableSendWindow());
+ EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
+ }
+
+ AckNPackets(1);
+ expected_send_window += kDefaultTCPMSS;
+ EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
+}
+
+TEST_F(TcpCubicSenderBytesTest, SlowStartBurstPacketLossPRR) {
+ sender_->SetNumEmulatedConnections(1);
+ // Test based on the second example in RFC6937, though we also implement
+ // forward acknowledgements, so the first two incoming acks will trigger
+ // PRR immediately.
+ // Ack 20 packets in 10 acks to raise the CWND to 30.
+ const int kNumberOfAcks = 10;
+ for (int i = 0; i < kNumberOfAcks; ++i) {
+ // Send our full send window.
+ SendAvailableSendWindow();
+ AckNPackets(2);
+ }
+ SendAvailableSendWindow();
+ QuicByteCount expected_send_window =
+ kDefaultWindowTCP + (kDefaultTCPMSS * 2 * kNumberOfAcks);
+ EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
+
+ // Lose one more than the congestion window reduction, so that after loss,
+ // bytes_in_flight is lesser than the congestion window.
+ size_t send_window_after_loss = kRenoBeta * expected_send_window;
+ size_t num_packets_to_lose =
+ (expected_send_window - send_window_after_loss) / kDefaultTCPMSS + 1;
+ LoseNPackets(num_packets_to_lose);
+ // Immediately after the loss, ensure at least one packet can be sent.
+ // Losses without subsequent acks can occur with timer based loss detection.
+ EXPECT_TRUE(sender_->CanSend(bytes_in_flight_));
+ AckNPackets(1);
+
+ // We should now have fallen out of slow start with a reduced window.
+ expected_send_window *= kRenoBeta;
+ EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
+
+ // Only 2 packets should be allowed to be sent, per PRR-SSRB.
+ EXPECT_EQ(2, SendAvailableSendWindow());
+
+ // Ack the next packet, which triggers another loss.
+ LoseNPackets(1);
+ AckNPackets(1);
+
+ // Send 2 packets to simulate PRR-SSRB.
+ EXPECT_EQ(2, SendAvailableSendWindow());
+
+ // Ack the next packet, which triggers another loss.
+ LoseNPackets(1);
+ AckNPackets(1);
+
+ // Send 2 packets to simulate PRR-SSRB.
+ EXPECT_EQ(2, SendAvailableSendWindow());
+
+ // Exit recovery and return to sending at the new rate.
+ for (int i = 0; i < kNumberOfAcks; ++i) {
+ AckNPackets(1);
+ EXPECT_EQ(1, SendAvailableSendWindow());
+ }
+}
+
+TEST_F(TcpCubicSenderBytesTest, RTOCongestionWindow) {
+ EXPECT_EQ(kDefaultWindowTCP, sender_->GetCongestionWindow());
+ // Expect the window to decrease to the minimum once the RTO fires and slow
+ // start threshold to be set to 1/2 of the CWND.
+ sender_->OnRetransmissionTimeout(true);
+ EXPECT_EQ(2 * kDefaultTCPMSS, sender_->GetCongestionWindow());
+ EXPECT_EQ(5u * kDefaultTCPMSS, sender_->GetSlowStartThreshold());
+}
+
+TEST_F(TcpCubicSenderBytesTest, RTOCongestionWindowNoRetransmission) {
+ EXPECT_EQ(kDefaultWindowTCP, sender_->GetCongestionWindow());
+
+ // Expect the window to remain unchanged if the RTO fires but no packets are
+ // retransmitted.
+ sender_->OnRetransmissionTimeout(false);
+ EXPECT_EQ(kDefaultWindowTCP, sender_->GetCongestionWindow());
+}
+
+TEST_F(TcpCubicSenderBytesTest, TcpCubicResetEpochOnQuiescence) {
+ const int kMaxCongestionWindow = 50;
+ const QuicByteCount kMaxCongestionWindowBytes =
+ kMaxCongestionWindow * kDefaultTCPMSS;
+ int num_sent = SendAvailableSendWindow();
+
+ // Make sure we fall out of slow start.
+ QuicByteCount saved_cwnd = sender_->GetCongestionWindow();
+ LoseNPackets(1);
+ EXPECT_GT(saved_cwnd, sender_->GetCongestionWindow());
+
+ // Ack the rest of the outstanding packets to get out of recovery.
+ for (int i = 1; i < num_sent; ++i) {
+ AckNPackets(1);
+ }
+ EXPECT_EQ(0u, bytes_in_flight_);
+
+ // Send a new window of data and ack all; cubic growth should occur.
+ saved_cwnd = sender_->GetCongestionWindow();
+ num_sent = SendAvailableSendWindow();
+ for (int i = 0; i < num_sent; ++i) {
+ AckNPackets(1);
+ }
+ EXPECT_LT(saved_cwnd, sender_->GetCongestionWindow());
+ EXPECT_GT(kMaxCongestionWindowBytes, sender_->GetCongestionWindow());
+ EXPECT_EQ(0u, bytes_in_flight_);
+
+ // Quiescent time of 100 seconds
+ clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(100000));
+
+ // Send new window of data and ack one packet. Cubic epoch should have
+ // been reset; ensure cwnd increase is not dramatic.
+ saved_cwnd = sender_->GetCongestionWindow();
+ SendAvailableSendWindow();
+ AckNPackets(1);
+ EXPECT_NEAR(saved_cwnd, sender_->GetCongestionWindow(), kDefaultTCPMSS);
+ EXPECT_GT(kMaxCongestionWindowBytes, sender_->GetCongestionWindow());
+}
+
+TEST_F(TcpCubicSenderBytesTest, MultipleLossesInOneWindow) {
+ SendAvailableSendWindow();
+ const QuicByteCount initial_window = sender_->GetCongestionWindow();
+ LosePacket(acked_packet_number_ + 1);
+ const QuicByteCount post_loss_window = sender_->GetCongestionWindow();
+ EXPECT_GT(initial_window, post_loss_window);
+ LosePacket(acked_packet_number_ + 3);
+ EXPECT_EQ(post_loss_window, sender_->GetCongestionWindow());
+ LosePacket(packet_number_ - 1);
+ EXPECT_EQ(post_loss_window, sender_->GetCongestionWindow());
+
+ // Lose a later packet and ensure the window decreases.
+ LosePacket(packet_number_);
+ EXPECT_GT(post_loss_window, sender_->GetCongestionWindow());
+}
+
+TEST_F(TcpCubicSenderBytesTest, ConfigureMaxInitialWindow) {
+ SetQuicReloadableFlag(quic_unified_iw_options, false);
+ QuicConfig config;
+
+ // Verify that kCOPT: kIW10 forces the congestion window to the default of 10.
+ QuicTagVector options;
+ options.push_back(kIW10);
+ QuicConfigPeer::SetReceivedConnectionOptions(&config, options);
+ sender_->SetFromConfig(config, Perspective::IS_SERVER);
+ EXPECT_EQ(10u * kDefaultTCPMSS, sender_->GetCongestionWindow());
+}
+
+TEST_F(TcpCubicSenderBytesTest, SetInitialCongestionWindow) {
+ EXPECT_NE(3u * kDefaultTCPMSS, sender_->GetCongestionWindow());
+ sender_->SetInitialCongestionWindowInPackets(3);
+ EXPECT_EQ(3u * kDefaultTCPMSS, sender_->GetCongestionWindow());
+}
+
+TEST_F(TcpCubicSenderBytesTest, 2ConnectionCongestionAvoidanceAtEndOfRecovery) {
+ sender_->SetNumEmulatedConnections(2);
+ // Ack 10 packets in 5 acks to raise the CWND to 20.
+ const int kNumberOfAcks = 5;
+ for (int i = 0; i < kNumberOfAcks; ++i) {
+ // Send our full send window.
+ SendAvailableSendWindow();
+ AckNPackets(2);
+ }
+ SendAvailableSendWindow();
+ QuicByteCount expected_send_window =
+ kDefaultWindowTCP + (kDefaultTCPMSS * 2 * kNumberOfAcks);
+ EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
+
+ LoseNPackets(1);
+
+ // We should now have fallen out of slow start with a reduced window.
+ expected_send_window = expected_send_window * sender_->GetRenoBeta();
+ EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
+
+ // No congestion window growth should occur in recovery phase, i.e., until the
+ // currently outstanding 20 packets are acked.
+ for (int i = 0; i < 10; ++i) {
+ // Send our full send window.
+ SendAvailableSendWindow();
+ EXPECT_TRUE(sender_->InRecovery());
+ AckNPackets(2);
+ EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
+ }
+ EXPECT_FALSE(sender_->InRecovery());
+
+ // Out of recovery now. Congestion window should not grow for half an RTT.
+ size_t packets_in_send_window = expected_send_window / kDefaultTCPMSS;
+ SendAvailableSendWindow();
+ AckNPackets(packets_in_send_window / 2 - 2);
+ EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
+
+ // Next ack should increase congestion window by 1MSS.
+ SendAvailableSendWindow();
+ AckNPackets(2);
+ expected_send_window += kDefaultTCPMSS;
+ packets_in_send_window += 1;
+ EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
+
+ // Congestion window should remain steady again for half an RTT.
+ SendAvailableSendWindow();
+ AckNPackets(packets_in_send_window / 2 - 1);
+ EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
+
+ // Next ack should cause congestion window to grow by 1MSS.
+ SendAvailableSendWindow();
+ AckNPackets(2);
+ expected_send_window += kDefaultTCPMSS;
+ EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
+}
+
+TEST_F(TcpCubicSenderBytesTest, 1ConnectionCongestionAvoidanceAtEndOfRecovery) {
+ sender_->SetNumEmulatedConnections(1);
+ // Ack 10 packets in 5 acks to raise the CWND to 20.
+ const int kNumberOfAcks = 5;
+ for (int i = 0; i < kNumberOfAcks; ++i) {
+ // Send our full send window.
+ SendAvailableSendWindow();
+ AckNPackets(2);
+ }
+ SendAvailableSendWindow();
+ QuicByteCount expected_send_window =
+ kDefaultWindowTCP + (kDefaultTCPMSS * 2 * kNumberOfAcks);
+ EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
+
+ LoseNPackets(1);
+
+ // We should now have fallen out of slow start with a reduced window.
+ expected_send_window *= kRenoBeta;
+ EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
+
+ // No congestion window growth should occur in recovery phase, i.e., until the
+ // currently outstanding 20 packets are acked.
+ for (int i = 0; i < 10; ++i) {
+ // Send our full send window.
+ SendAvailableSendWindow();
+ EXPECT_TRUE(sender_->InRecovery());
+ AckNPackets(2);
+ EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
+ }
+ EXPECT_FALSE(sender_->InRecovery());
+
+ // Out of recovery now. Congestion window should not grow during RTT.
+ for (uint64_t i = 0; i < expected_send_window / kDefaultTCPMSS - 2; i += 2) {
+ // Send our full send window.
+ SendAvailableSendWindow();
+ AckNPackets(2);
+ EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
+ }
+
+ // Next ack should cause congestion window to grow by 1MSS.
+ SendAvailableSendWindow();
+ AckNPackets(2);
+ expected_send_window += kDefaultTCPMSS;
+ EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
+}
+
+TEST_F(TcpCubicSenderBytesTest, BandwidthResumption) {
+ // Test that when provided with CachedNetworkParameters and opted in to the
+ // bandwidth resumption experiment, that the TcpCubicSenderPackets sets
+ // initial CWND appropriately.
+
+ // Set some common values.
+ const QuicPacketCount kNumberOfPackets = 123;
+ const QuicBandwidth kBandwidthEstimate =
+ QuicBandwidth::FromBytesPerSecond(kNumberOfPackets * kDefaultTCPMSS);
+ const QuicTime::Delta kRttEstimate = QuicTime::Delta::FromSeconds(1);
+ sender_->AdjustNetworkParameters(kBandwidthEstimate, kRttEstimate);
+ EXPECT_EQ(kNumberOfPackets * kDefaultTCPMSS, sender_->GetCongestionWindow());
+
+ // Resume with an illegal value of 0 and verify the server ignores it.
+ sender_->AdjustNetworkParameters(QuicBandwidth::Zero(), kRttEstimate);
+ EXPECT_EQ(kNumberOfPackets * kDefaultTCPMSS, sender_->GetCongestionWindow());
+
+ // Resumed CWND is limited to be in a sensible range.
+ const QuicBandwidth kUnreasonableBandwidth =
+ QuicBandwidth::FromBytesPerSecond((kMaxCongestionWindowPackets + 1) *
+ kDefaultTCPMSS);
+ sender_->AdjustNetworkParameters(kUnreasonableBandwidth,
+ QuicTime::Delta::FromSeconds(1));
+ EXPECT_EQ(kMaxCongestionWindowPackets * kDefaultTCPMSS,
+ sender_->GetCongestionWindow());
+}
+
+TEST_F(TcpCubicSenderBytesTest, PaceBelowCWND) {
+ QuicConfig config;
+
+ // Verify that kCOPT: kMIN4 forces the min CWND to 1 packet, but allows up
+ // to 4 to be sent.
+ QuicTagVector options;
+ options.push_back(kMIN4);
+ QuicConfigPeer::SetReceivedConnectionOptions(&config, options);
+ sender_->SetFromConfig(config, Perspective::IS_SERVER);
+ sender_->OnRetransmissionTimeout(true);
+ EXPECT_EQ(kDefaultTCPMSS, sender_->GetCongestionWindow());
+ EXPECT_TRUE(sender_->CanSend(kDefaultTCPMSS));
+ EXPECT_TRUE(sender_->CanSend(2 * kDefaultTCPMSS));
+ EXPECT_TRUE(sender_->CanSend(3 * kDefaultTCPMSS));
+ EXPECT_FALSE(sender_->CanSend(4 * kDefaultTCPMSS));
+}
+
+TEST_F(TcpCubicSenderBytesTest, NoPRR) {
+ QuicTime::Delta rtt = QuicTime::Delta::FromMilliseconds(100);
+ sender_->rtt_stats_.UpdateRtt(rtt, QuicTime::Delta::Zero(), QuicTime::Zero());
+
+ sender_->SetNumEmulatedConnections(1);
+ // Verify that kCOPT: kNPRR allows all packets to be sent, even if only one
+ // ack has been received.
+ QuicTagVector options;
+ options.push_back(kNPRR);
+ QuicConfig config;
+ QuicConfigPeer::SetReceivedConnectionOptions(&config, options);
+ sender_->SetFromConfig(config, Perspective::IS_SERVER);
+ SendAvailableSendWindow();
+ LoseNPackets(9);
+ AckNPackets(1);
+
+ // We should now have fallen out of slow start with a reduced window.
+ EXPECT_EQ(kRenoBeta * kDefaultWindowTCP, sender_->GetCongestionWindow());
+ const QuicPacketCount window_in_packets =
+ kRenoBeta * kDefaultWindowTCP / kDefaultTCPMSS;
+ const QuicBandwidth expected_pacing_rate =
+ QuicBandwidth::FromBytesAndTimeDelta(kRenoBeta * kDefaultWindowTCP,
+ sender_->rtt_stats_.smoothed_rtt());
+ EXPECT_EQ(expected_pacing_rate, sender_->PacingRate(0));
+ EXPECT_EQ(window_in_packets,
+ static_cast<uint64_t>(SendAvailableSendWindow()));
+ EXPECT_EQ(expected_pacing_rate,
+ sender_->PacingRate(kRenoBeta * kDefaultWindowTCP));
+}
+
+TEST_F(TcpCubicSenderBytesTest, ResetAfterConnectionMigration) {
+ // Starts from slow start.
+ sender_->SetNumEmulatedConnections(1);
+ const int kNumberOfAcks = 10;
+ for (int i = 0; i < kNumberOfAcks; ++i) {
+ // Send our full send window.
+ SendAvailableSendWindow();
+ AckNPackets(2);
+ }
+ SendAvailableSendWindow();
+ QuicByteCount expected_send_window =
+ kDefaultWindowTCP + (kDefaultTCPMSS * 2 * kNumberOfAcks);
+ EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
+
+ // Loses a packet to exit slow start.
+ LoseNPackets(1);
+
+ // We should now have fallen out of slow start with a reduced window. Slow
+ // start threshold is also updated.
+ expected_send_window *= kRenoBeta;
+ EXPECT_EQ(expected_send_window, sender_->GetCongestionWindow());
+ EXPECT_EQ(expected_send_window, sender_->GetSlowStartThreshold());
+
+ // Resets cwnd and slow start threshold on connection migrations.
+ sender_->OnConnectionMigration();
+ EXPECT_EQ(kDefaultWindowTCP, sender_->GetCongestionWindow());
+ EXPECT_EQ(kMaxCongestionWindowPackets * kDefaultTCPMSS,
+ sender_->GetSlowStartThreshold());
+ EXPECT_FALSE(sender_->hybrid_slow_start().started());
+}
+
+TEST_F(TcpCubicSenderBytesTest, DefaultMaxCwnd) {
+ RttStats rtt_stats;
+ QuicConnectionStats stats;
+ std::unique_ptr<SendAlgorithmInterface> sender(SendAlgorithmInterface::Create(
+ &clock_, &rtt_stats, /*unacked_packets=*/nullptr, kCubicBytes,
+ QuicRandom::GetInstance(), &stats, kInitialCongestionWindow));
+
+ AckedPacketVector acked_packets;
+ LostPacketVector missing_packets;
+ for (uint64_t i = 1; i < kDefaultMaxCongestionWindowPackets; ++i) {
+ acked_packets.clear();
+ acked_packets.push_back(
+ AckedPacket(QuicPacketNumber(i), 1350, QuicTime::Zero()));
+ sender->OnCongestionEvent(true, sender->GetCongestionWindow(), clock_.Now(),
+ acked_packets, missing_packets);
+ }
+ EXPECT_EQ(kDefaultMaxCongestionWindowPackets,
+ sender->GetCongestionWindow() / kDefaultTCPMSS);
+}
+
+TEST_F(TcpCubicSenderBytesTest, LimitCwndIncreaseInCongestionAvoidance) {
+ // Enable Cubic.
+ sender_ = QuicMakeUnique<TcpCubicSenderBytesPeer>(&clock_, false);
+
+ int num_sent = SendAvailableSendWindow();
+
+ // Make sure we fall out of slow start.
+ QuicByteCount saved_cwnd = sender_->GetCongestionWindow();
+ LoseNPackets(1);
+ EXPECT_GT(saved_cwnd, sender_->GetCongestionWindow());
+
+ // Ack the rest of the outstanding packets to get out of recovery.
+ for (int i = 1; i < num_sent; ++i) {
+ AckNPackets(1);
+ }
+ EXPECT_EQ(0u, bytes_in_flight_);
+ // Send a new window of data and ack all; cubic growth should occur.
+ saved_cwnd = sender_->GetCongestionWindow();
+ num_sent = SendAvailableSendWindow();
+
+ // Ack packets until the CWND increases.
+ while (sender_->GetCongestionWindow() == saved_cwnd) {
+ AckNPackets(1);
+ SendAvailableSendWindow();
+ }
+ // Bytes in flight may be larger than the CWND if the CWND isn't an exact
+ // multiple of the packet sizes being sent.
+ EXPECT_GE(bytes_in_flight_, sender_->GetCongestionWindow());
+ saved_cwnd = sender_->GetCongestionWindow();
+
+ // Advance time 2 seconds waiting for an ack.
+ clock_.AdvanceTime(QuicTime::Delta::FromMilliseconds(2000));
+
+ // Ack two packets. The CWND should increase by only one packet.
+ AckNPackets(2);
+ EXPECT_EQ(saved_cwnd + kDefaultTCPMSS, sender_->GetCongestionWindow());
+}
+
+} // namespace test
+} // namespace quic
diff --git a/quic/core/congestion_control/uber_loss_algorithm.cc b/quic/core/congestion_control/uber_loss_algorithm.cc
new file mode 100644
index 0000000..8db151b
--- /dev/null
+++ b/quic/core/congestion_control/uber_loss_algorithm.cc
@@ -0,0 +1,86 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/third_party/quiche/src/quic/core/congestion_control/uber_loss_algorithm.h"
+
+#include <algorithm>
+
+namespace quic {
+
+UberLossAlgorithm::UberLossAlgorithm() : UberLossAlgorithm(kNack) {}
+
+UberLossAlgorithm::UberLossAlgorithm(LossDetectionType loss_type)
+ : loss_type_(loss_type) {
+ SetLossDetectionType(loss_type);
+ for (int8_t i = INITIAL_DATA; i < NUM_PACKET_NUMBER_SPACES; ++i) {
+ general_loss_algorithms_[i].SetPacketNumberSpace(
+ static_cast<PacketNumberSpace>(i));
+ }
+}
+
+LossDetectionType UberLossAlgorithm::GetLossDetectionType() const {
+ return loss_type_;
+}
+
+void UberLossAlgorithm::SetLossDetectionType(LossDetectionType loss_type) {
+ loss_type_ = loss_type;
+ for (auto& loss_algorithm : general_loss_algorithms_) {
+ loss_algorithm.SetLossDetectionType(loss_type);
+ }
+}
+
+void UberLossAlgorithm::DetectLosses(
+ const QuicUnackedPacketMap& unacked_packets,
+ QuicTime time,
+ const RttStats& rtt_stats,
+ QuicPacketNumber /*largest_newly_acked*/,
+ const AckedPacketVector& packets_acked,
+ LostPacketVector* packets_lost) {
+ DCHECK(unacked_packets.use_uber_loss_algorithm());
+ for (int8_t i = INITIAL_DATA; i < NUM_PACKET_NUMBER_SPACES; ++i) {
+ const QuicPacketNumber largest_acked =
+ unacked_packets.GetLargestAckedOfPacketNumberSpace(
+ static_cast<PacketNumberSpace>(i));
+ if (!largest_acked.IsInitialized() ||
+ unacked_packets.GetLeastUnacked() > largest_acked) {
+ // Skip detecting losses if no packet has been received for this packet
+ // number space or the least_unacked is greater than largest_acked.
+ continue;
+ }
+
+ general_loss_algorithms_[i].DetectLosses(unacked_packets, time, rtt_stats,
+ largest_acked, packets_acked,
+ packets_lost);
+ }
+}
+
+QuicTime UberLossAlgorithm::GetLossTimeout() const {
+ QuicTime loss_timeout = QuicTime::Zero();
+ // Returns the earliest non-zero loss timeout.
+ for (int8_t i = INITIAL_DATA; i < NUM_PACKET_NUMBER_SPACES; ++i) {
+ const QuicTime timeout = general_loss_algorithms_[i].GetLossTimeout();
+ if (!loss_timeout.IsInitialized()) {
+ loss_timeout = timeout;
+ continue;
+ }
+ if (timeout.IsInitialized()) {
+ loss_timeout = std::min(loss_timeout, timeout);
+ }
+ }
+ return loss_timeout;
+}
+
+void UberLossAlgorithm::SpuriousRetransmitDetected(
+ const QuicUnackedPacketMap& unacked_packets,
+ QuicTime time,
+ const RttStats& rtt_stats,
+ QuicPacketNumber spurious_retransmission) {
+ DCHECK(unacked_packets.use_uber_loss_algorithm());
+ general_loss_algorithms_[unacked_packets.GetPacketNumberSpace(
+ spurious_retransmission)]
+ .SpuriousRetransmitDetected(unacked_packets, time, rtt_stats,
+ spurious_retransmission);
+}
+
+} // namespace quic
diff --git a/quic/core/congestion_control/uber_loss_algorithm.h b/quic/core/congestion_control/uber_loss_algorithm.h
new file mode 100644
index 0000000..dddbcb3
--- /dev/null
+++ b/quic/core/congestion_control/uber_loss_algorithm.h
@@ -0,0 +1,53 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef QUICHE_QUIC_CORE_CONGESTION_CONTROL_UBER_LOSS_ALGORITHM_H_
+#define QUICHE_QUIC_CORE_CONGESTION_CONTROL_UBER_LOSS_ALGORITHM_H_
+
+#include "net/third_party/quiche/src/quic/core/congestion_control/general_loss_algorithm.h"
+
+namespace quic {
+
+// This class comprises multiple loss algorithms, each per packet number space.
+class QUIC_EXPORT_PRIVATE UberLossAlgorithm : public LossDetectionInterface {
+ public:
+ UberLossAlgorithm();
+ explicit UberLossAlgorithm(LossDetectionType loss_type);
+ UberLossAlgorithm(const UberLossAlgorithm&) = delete;
+ UberLossAlgorithm& operator=(const UberLossAlgorithm&) = delete;
+ ~UberLossAlgorithm() override {}
+
+ LossDetectionType GetLossDetectionType() const override;
+
+ // Switches the loss detection type to |loss_type| and resets loss algorithm
+ // for all packet number spaces.
+ void SetLossDetectionType(LossDetectionType loss_type);
+
+ // Detects lost packets.
+ void DetectLosses(const QuicUnackedPacketMap& unacked_packets,
+ QuicTime time,
+ const RttStats& rtt_stats,
+ QuicPacketNumber largest_newly_acked,
+ const AckedPacketVector& packets_acked,
+ LostPacketVector* packets_lost) override;
+
+ // Returns the earliest time the early retransmit timer should be active.
+ QuicTime GetLossTimeout() const override;
+
+ // Increases the loss detection threshold for time loss detection.
+ void SpuriousRetransmitDetected(
+ const QuicUnackedPacketMap& unacked_packets,
+ QuicTime time,
+ const RttStats& rtt_stats,
+ QuicPacketNumber spurious_retransmission) override;
+
+ private:
+ LossDetectionType loss_type_;
+ // One loss algorithm per packet number space.
+ GeneralLossAlgorithm general_loss_algorithms_[NUM_PACKET_NUMBER_SPACES];
+};
+
+} // namespace quic
+
+#endif // QUICHE_QUIC_CORE_CONGESTION_CONTROL_UBER_LOSS_ALGORITHM_H_
diff --git a/quic/core/congestion_control/uber_loss_algorithm_test.cc b/quic/core/congestion_control/uber_loss_algorithm_test.cc
new file mode 100644
index 0000000..9aa2041
--- /dev/null
+++ b/quic/core/congestion_control/uber_loss_algorithm_test.cc
@@ -0,0 +1,159 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/third_party/quiche/src/quic/core/congestion_control/uber_loss_algorithm.h"
+
+#include "net/third_party/quiche/src/quic/core/congestion_control/rtt_stats.h"
+#include "net/third_party/quiche/src/quic/core/quic_utils.h"
+#include "net/third_party/quiche/src/quic/platform/api/quic_ptr_util.h"
+#include "net/third_party/quiche/src/quic/platform/api/quic_test.h"
+#include "net/third_party/quiche/src/quic/test_tools/mock_clock.h"
+#include "net/third_party/quiche/src/quic/test_tools/quic_unacked_packet_map_peer.h"
+
+namespace quic {
+namespace test {
+namespace {
+
+// Default packet length.
+const uint32_t kDefaultLength = 1000;
+
+class UberLossAlgorithmTest : public QuicTest {
+ protected:
+ UberLossAlgorithmTest() {
+ SetQuicReloadableFlag(quic_use_uber_loss_algorithm, true);
+ unacked_packets_ =
+ QuicMakeUnique<QuicUnackedPacketMap>(Perspective::IS_CLIENT);
+ rtt_stats_.UpdateRtt(QuicTime::Delta::FromMilliseconds(100),
+ QuicTime::Delta::Zero(), clock_.Now());
+ EXPECT_LT(0, rtt_stats_.smoothed_rtt().ToMicroseconds());
+ }
+
+ void SendPacket(uint64_t packet_number, EncryptionLevel encryption_level) {
+ QuicStreamFrame frame;
+ frame.stream_id =
+ encryption_level == ENCRYPTION_NONE
+ ? QuicUtils::GetCryptoStreamId(
+ CurrentSupportedVersions()[0].transport_version)
+ : QuicUtils::GetHeadersStreamId(
+ CurrentSupportedVersions()[0].transport_version);
+ SerializedPacket packet(QuicPacketNumber(packet_number),
+ PACKET_1BYTE_PACKET_NUMBER, nullptr, kDefaultLength,
+ false, false);
+ packet.encryption_level = encryption_level;
+ packet.retransmittable_frames.push_back(QuicFrame(frame));
+ unacked_packets_->AddSentPacket(&packet, QuicPacketNumber(),
+ NOT_RETRANSMISSION, clock_.Now(), true);
+ }
+
+ void AckPackets(const std::vector<uint64_t>& packets_acked) {
+ packets_acked_.clear();
+ for (uint64_t acked : packets_acked) {
+ unacked_packets_->RemoveFromInFlight(QuicPacketNumber(acked));
+ packets_acked_.push_back(AckedPacket(QuicPacketNumber(acked),
+ kMaxPacketSize, QuicTime::Zero()));
+ }
+ }
+
+ void VerifyLosses(uint64_t largest_newly_acked,
+ const AckedPacketVector& packets_acked,
+ const std::vector<uint64_t>& losses_expected) {
+ LostPacketVector lost_packets;
+ loss_algorithm_.DetectLosses(*unacked_packets_, clock_.Now(), rtt_stats_,
+ QuicPacketNumber(largest_newly_acked),
+ packets_acked, &lost_packets);
+ ASSERT_EQ(losses_expected.size(), lost_packets.size());
+ for (size_t i = 0; i < losses_expected.size(); ++i) {
+ EXPECT_EQ(lost_packets[i].packet_number,
+ QuicPacketNumber(losses_expected[i]));
+ }
+ }
+
+ MockClock clock_;
+ std::unique_ptr<QuicUnackedPacketMap> unacked_packets_;
+ RttStats rtt_stats_;
+ UberLossAlgorithm loss_algorithm_;
+ AckedPacketVector packets_acked_;
+};
+
+TEST_F(UberLossAlgorithmTest, ScenarioA) {
+ // This test mimics a scenario: client sends 1-CHLO, 2-0RTT, 3-0RTT,
+ // timeout and retransmits 4-CHLO. Server acks packet 1 (ack gets lost).
+ // Server receives and buffers packets 2 and 3. Server receives packet 4 and
+ // processes handshake asynchronously, so server acks 4 and cannot process
+ // packets 2 and 3.
+ SendPacket(1, ENCRYPTION_NONE);
+ SendPacket(2, ENCRYPTION_ZERO_RTT);
+ SendPacket(3, ENCRYPTION_ZERO_RTT);
+ unacked_packets_->RemoveFromInFlight(QuicPacketNumber(1));
+ SendPacket(4, ENCRYPTION_NONE);
+
+ AckPackets({1, 4});
+ unacked_packets_->MaybeUpdateLargestAckedOfPacketNumberSpace(
+ ENCRYPTION_NONE, QuicPacketNumber(4));
+ // Verify no packet is detected lost.
+ VerifyLosses(4, packets_acked_, std::vector<uint64_t>{});
+ EXPECT_EQ(QuicTime::Zero(), loss_algorithm_.GetLossTimeout());
+}
+
+TEST_F(UberLossAlgorithmTest, ScenarioB) {
+ // This test mimics a scenario: client sends 3-0RTT, 4-0RTT, receives SHLO,
+ // sends 5-1RTT, 6-1RTT.
+ SendPacket(3, ENCRYPTION_ZERO_RTT);
+ SendPacket(4, ENCRYPTION_ZERO_RTT);
+ SendPacket(5, ENCRYPTION_FORWARD_SECURE);
+ SendPacket(6, ENCRYPTION_FORWARD_SECURE);
+
+ AckPackets({4});
+ unacked_packets_->MaybeUpdateLargestAckedOfPacketNumberSpace(
+ ENCRYPTION_ZERO_RTT, QuicPacketNumber(4));
+ // No packet loss by acking 4.
+ VerifyLosses(4, packets_acked_, std::vector<uint64_t>{});
+ EXPECT_EQ(QuicTime::Zero(), loss_algorithm_.GetLossTimeout());
+
+ // Acking 6 causes 3 to be detected loss.
+ AckPackets({6});
+ unacked_packets_->MaybeUpdateLargestAckedOfPacketNumberSpace(
+ ENCRYPTION_FORWARD_SECURE, QuicPacketNumber(6));
+ VerifyLosses(6, packets_acked_, std::vector<uint64_t>{3});
+ EXPECT_EQ(clock_.Now() + 1.25 * rtt_stats_.smoothed_rtt(),
+ loss_algorithm_.GetLossTimeout());
+ packets_acked_.clear();
+
+ clock_.AdvanceTime(1.25 * rtt_stats_.latest_rtt());
+ // Verify 5 will be early retransmitted.
+ VerifyLosses(6, packets_acked_, {5});
+}
+
+TEST_F(UberLossAlgorithmTest, ScenarioC) {
+ // This test mimics a scenario: server sends 1-SHLO, 2-1RTT, 3-1RTT, 4-1RTT
+ // and retransmit 4-SHLO. Client receives and buffers packet 4. Client
+ // receives packet 5 and processes 4.
+ QuicUnackedPacketMapPeer::SetPerspective(unacked_packets_.get(),
+ Perspective::IS_SERVER);
+ SendPacket(1, ENCRYPTION_ZERO_RTT);
+ SendPacket(2, ENCRYPTION_FORWARD_SECURE);
+ SendPacket(3, ENCRYPTION_FORWARD_SECURE);
+ SendPacket(4, ENCRYPTION_FORWARD_SECURE);
+ unacked_packets_->RemoveFromInFlight(QuicPacketNumber(1));
+ SendPacket(5, ENCRYPTION_ZERO_RTT);
+
+ AckPackets({4, 5});
+ unacked_packets_->MaybeUpdateLargestAckedOfPacketNumberSpace(
+ ENCRYPTION_FORWARD_SECURE, QuicPacketNumber(4));
+ unacked_packets_->MaybeUpdateLargestAckedOfPacketNumberSpace(
+ ENCRYPTION_ZERO_RTT, QuicPacketNumber(5));
+ // No packet loss by acking 5.
+ VerifyLosses(5, packets_acked_, std::vector<uint64_t>{});
+ EXPECT_EQ(clock_.Now() + 1.25 * rtt_stats_.smoothed_rtt(),
+ loss_algorithm_.GetLossTimeout());
+ packets_acked_.clear();
+
+ clock_.AdvanceTime(1.25 * rtt_stats_.latest_rtt());
+ // Verify 2 and 3 will be early retransmitted.
+ VerifyLosses(5, packets_acked_, std::vector<uint64_t>{2, 3});
+}
+
+} // namespace
+} // namespace test
+} // namespace quic
diff --git a/quic/core/congestion_control/windowed_filter.h b/quic/core/congestion_control/windowed_filter.h
new file mode 100644
index 0000000..8729895
--- /dev/null
+++ b/quic/core/congestion_control/windowed_filter.h
@@ -0,0 +1,160 @@
+// Copyright (c) 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef QUICHE_QUIC_CORE_CONGESTION_CONTROL_WINDOWED_FILTER_H_
+#define QUICHE_QUIC_CORE_CONGESTION_CONTROL_WINDOWED_FILTER_H_
+
+// Implements Kathleen Nichols' algorithm for tracking the minimum (or maximum)
+// estimate of a stream of samples over some fixed time interval. (E.g.,
+// the minimum RTT over the past five minutes.) The algorithm keeps track of
+// the best, second best, and third best min (or max) estimates, maintaining an
+// invariant that the measurement time of the n'th best >= n-1'th best.
+
+// The algorithm works as follows. On a reset, all three estimates are set to
+// the same sample. The second best estimate is then recorded in the second
+// quarter of the window, and a third best estimate is recorded in the second
+// half of the window, bounding the worst case error when the true min is
+// monotonically increasing (or true max is monotonically decreasing) over the
+// window.
+//
+// A new best sample replaces all three estimates, since the new best is lower
+// (or higher) than everything else in the window and it is the most recent.
+// The window thus effectively gets reset on every new min. The same property
+// holds true for second best and third best estimates. Specifically, when a
+// sample arrives that is better than the second best but not better than the
+// best, it replaces the second and third best estimates but not the best
+// estimate. Similarly, a sample that is better than the third best estimate
+// but not the other estimates replaces only the third best estimate.
+//
+// Finally, when the best expires, it is replaced by the second best, which in
+// turn is replaced by the third best. The newest sample replaces the third
+// best.
+
+#include "net/third_party/quiche/src/quic/core/quic_time.h"
+
+namespace quic {
+
+// Compares two values and returns true if the first is less than or equal
+// to the second.
+template <class T>
+struct MinFilter {
+ bool operator()(const T& lhs, const T& rhs) const { return lhs <= rhs; }
+};
+
+// Compares two values and returns true if the first is greater than or equal
+// to the second.
+template <class T>
+struct MaxFilter {
+ bool operator()(const T& lhs, const T& rhs) const { return lhs >= rhs; }
+};
+
+// Use the following to construct a windowed filter object of type T.
+// For example, a min filter using QuicTime as the time type:
+// WindowedFilter<T, MinFilter<T>, QuicTime, QuicTime::Delta> ObjectName;
+// A max filter using 64-bit integers as the time type:
+// WindowedFilter<T, MaxFilter<T>, uint64_t, int64_t> ObjectName;
+// Specifically, this template takes four arguments:
+// 1. T -- type of the measurement that is being filtered.
+// 2. Compare -- MinFilter<T> or MaxFilter<T>, depending on the type of filter
+// desired.
+// 3. TimeT -- the type used to represent timestamps.
+// 4. TimeDeltaT -- the type used to represent continuous time intervals between
+// two timestamps. Has to be the type of (a - b) if both |a| and |b| are
+// of type TimeT.
+template <class T, class Compare, typename TimeT, typename TimeDeltaT>
+class WindowedFilter {
+ public:
+ // |window_length| is the period after which a best estimate expires.
+ // |zero_value| is used as the uninitialized value for objects of T.
+ // Importantly, |zero_value| should be an invalid value for a true sample.
+ WindowedFilter(TimeDeltaT window_length, T zero_value, TimeT zero_time)
+ : window_length_(window_length),
+ zero_value_(zero_value),
+ estimates_{Sample(zero_value_, zero_time),
+ Sample(zero_value_, zero_time),
+ Sample(zero_value_, zero_time)} {}
+
+ // Changes the window length. Does not update any current samples.
+ void SetWindowLength(TimeDeltaT window_length) {
+ window_length_ = window_length;
+ }
+
+ // Updates best estimates with |sample|, and expires and updates best
+ // estimates as necessary.
+ void Update(T new_sample, TimeT new_time) {
+ // Reset all estimates if they have not yet been initialized, if new sample
+ // is a new best, or if the newest recorded estimate is too old.
+ if (estimates_[0].sample == zero_value_ ||
+ Compare()(new_sample, estimates_[0].sample) ||
+ new_time - estimates_[2].time > window_length_) {
+ Reset(new_sample, new_time);
+ return;
+ }
+
+ if (Compare()(new_sample, estimates_[1].sample)) {
+ estimates_[1] = Sample(new_sample, new_time);
+ estimates_[2] = estimates_[1];
+ } else if (Compare()(new_sample, estimates_[2].sample)) {
+ estimates_[2] = Sample(new_sample, new_time);
+ }
+
+ // Expire and update estimates as necessary.
+ if (new_time - estimates_[0].time > window_length_) {
+ // The best estimate hasn't been updated for an entire window, so promote
+ // second and third best estimates.
+ estimates_[0] = estimates_[1];
+ estimates_[1] = estimates_[2];
+ estimates_[2] = Sample(new_sample, new_time);
+ // Need to iterate one more time. Check if the new best estimate is
+ // outside the window as well, since it may also have been recorded a
+ // long time ago. Don't need to iterate once more since we cover that
+ // case at the beginning of the method.
+ if (new_time - estimates_[0].time > window_length_) {
+ estimates_[0] = estimates_[1];
+ estimates_[1] = estimates_[2];
+ }
+ return;
+ }
+ if (estimates_[1].sample == estimates_[0].sample &&
+ new_time - estimates_[1].time > window_length_ >> 2) {
+ // A quarter of the window has passed without a better sample, so the
+ // second-best estimate is taken from the second quarter of the window.
+ estimates_[2] = estimates_[1] = Sample(new_sample, new_time);
+ return;
+ }
+
+ if (estimates_[2].sample == estimates_[1].sample &&
+ new_time - estimates_[2].time > window_length_ >> 1) {
+ // We've passed a half of the window without a better estimate, so take
+ // a third-best estimate from the second half of the window.
+ estimates_[2] = Sample(new_sample, new_time);
+ }
+ }
+
+ // Resets all estimates to new sample.
+ void Reset(T new_sample, TimeT new_time) {
+ estimates_[0] = estimates_[1] = estimates_[2] =
+ Sample(new_sample, new_time);
+ }
+
+ T GetBest() const { return estimates_[0].sample; }
+ T GetSecondBest() const { return estimates_[1].sample; }
+ T GetThirdBest() const { return estimates_[2].sample; }
+
+ private:
+ struct Sample {
+ T sample;
+ TimeT time;
+ Sample(T init_sample, TimeT init_time)
+ : sample(init_sample), time(init_time) {}
+ };
+
+ TimeDeltaT window_length_; // Time length of window.
+ T zero_value_; // Uninitialized value of T.
+ Sample estimates_[3]; // Best estimate is element 0.
+};
+
+} // namespace quic
+
+#endif // QUICHE_QUIC_CORE_CONGESTION_CONTROL_WINDOWED_FILTER_H_
diff --git a/quic/core/congestion_control/windowed_filter_test.cc b/quic/core/congestion_control/windowed_filter_test.cc
new file mode 100644
index 0000000..d9f3655
--- /dev/null
+++ b/quic/core/congestion_control/windowed_filter_test.cc
@@ -0,0 +1,387 @@
+// Copyright (c) 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/third_party/quiche/src/quic/core/congestion_control/windowed_filter.h"
+
+#include "net/third_party/quiche/src/quic/core/congestion_control/rtt_stats.h"
+#include "net/third_party/quiche/src/quic/core/quic_bandwidth.h"
+#include "net/third_party/quiche/src/quic/core/quic_packets.h"
+#include "net/third_party/quiche/src/quic/platform/api/quic_test.h"
+
+namespace quic {
+namespace test {
+
+class WindowedFilterTest : public QuicTest {
+ public:
+ // Set the window to 99ms, so 25ms is more than a quarter rtt.
+ WindowedFilterTest()
+ : windowed_min_rtt_(QuicTime::Delta::FromMilliseconds(99),
+ QuicTime::Delta::Zero(),
+ QuicTime::Zero()),
+ windowed_max_bw_(QuicTime::Delta::FromMilliseconds(99),
+ QuicBandwidth::Zero(),
+ QuicTime::Zero()) {}
+
+ // Sets up windowed_min_rtt_ to have the following values:
+ // Best = 20ms, recorded at 25ms
+ // Second best = 40ms, recorded at 75ms
+ // Third best = 50ms, recorded at 100ms
+ void InitializeMinFilter() {
+ QuicTime now = QuicTime::Zero();
+ QuicTime::Delta rtt_sample = QuicTime::Delta::FromMilliseconds(10);
+ for (int i = 0; i < 5; ++i) {
+ windowed_min_rtt_.Update(rtt_sample, now);
+ VLOG(1) << "i: " << i << " sample: " << rtt_sample.ToMilliseconds()
+ << " mins: "
+ << " " << windowed_min_rtt_.GetBest().ToMilliseconds() << " "
+ << windowed_min_rtt_.GetSecondBest().ToMilliseconds() << " "
+ << windowed_min_rtt_.GetThirdBest().ToMilliseconds();
+ now = now + QuicTime::Delta::FromMilliseconds(25);
+ rtt_sample = rtt_sample + QuicTime::Delta::FromMilliseconds(10);
+ }
+ EXPECT_EQ(QuicTime::Delta::FromMilliseconds(20),
+ windowed_min_rtt_.GetBest());
+ EXPECT_EQ(QuicTime::Delta::FromMilliseconds(40),
+ windowed_min_rtt_.GetSecondBest());
+ EXPECT_EQ(QuicTime::Delta::FromMilliseconds(50),
+ windowed_min_rtt_.GetThirdBest());
+ }
+
+ // Sets up windowed_max_bw_ to have the following values:
+ // Best = 900 bps, recorded at 25ms
+ // Second best = 700 bps, recorded at 75ms
+ // Third best = 600 bps, recorded at 100ms
+ void InitializeMaxFilter() {
+ QuicTime now = QuicTime::Zero();
+ QuicBandwidth bw_sample = QuicBandwidth::FromBitsPerSecond(1000);
+ for (int i = 0; i < 5; ++i) {
+ windowed_max_bw_.Update(bw_sample, now);
+ VLOG(1) << "i: " << i << " sample: " << bw_sample.ToBitsPerSecond()
+ << " maxs: "
+ << " " << windowed_max_bw_.GetBest().ToBitsPerSecond() << " "
+ << windowed_max_bw_.GetSecondBest().ToBitsPerSecond() << " "
+ << windowed_max_bw_.GetThirdBest().ToBitsPerSecond();
+ now = now + QuicTime::Delta::FromMilliseconds(25);
+ bw_sample = bw_sample - QuicBandwidth::FromBitsPerSecond(100);
+ }
+ EXPECT_EQ(QuicBandwidth::FromBitsPerSecond(900),
+ windowed_max_bw_.GetBest());
+ EXPECT_EQ(QuicBandwidth::FromBitsPerSecond(700),
+ windowed_max_bw_.GetSecondBest());
+ EXPECT_EQ(QuicBandwidth::FromBitsPerSecond(600),
+ windowed_max_bw_.GetThirdBest());
+ }
+
+ protected:
+ WindowedFilter<QuicTime::Delta,
+ MinFilter<QuicTime::Delta>,
+ QuicTime,
+ QuicTime::Delta>
+ windowed_min_rtt_;
+ WindowedFilter<QuicBandwidth,
+ MaxFilter<QuicBandwidth>,
+ QuicTime,
+ QuicTime::Delta>
+ windowed_max_bw_;
+};
+
+namespace {
+// Test helper function: updates the filter with a lot of small values in order
+// to ensure that it is not susceptible to noise.
+void UpdateWithIrrelevantSamples(
+ WindowedFilter<uint64_t, MaxFilter<uint64_t>, uint64_t, uint64_t>* filter,
+ uint64_t max_value,
+ uint64_t time) {
+ for (uint64_t i = 0; i < 1000; i++) {
+ filter->Update(i % max_value, time);
+ }
+}
+} // namespace
+
+TEST_F(WindowedFilterTest, UninitializedEstimates) {
+ EXPECT_EQ(QuicTime::Delta::Zero(), windowed_min_rtt_.GetBest());
+ EXPECT_EQ(QuicTime::Delta::Zero(), windowed_min_rtt_.GetSecondBest());
+ EXPECT_EQ(QuicTime::Delta::Zero(), windowed_min_rtt_.GetThirdBest());
+ EXPECT_EQ(QuicBandwidth::Zero(), windowed_max_bw_.GetBest());
+ EXPECT_EQ(QuicBandwidth::Zero(), windowed_max_bw_.GetSecondBest());
+ EXPECT_EQ(QuicBandwidth::Zero(), windowed_max_bw_.GetThirdBest());
+}
+
+TEST_F(WindowedFilterTest, MonotonicallyIncreasingMin) {
+ QuicTime now = QuicTime::Zero();
+ QuicTime::Delta rtt_sample = QuicTime::Delta::FromMilliseconds(10);
+ windowed_min_rtt_.Update(rtt_sample, now);
+ EXPECT_EQ(QuicTime::Delta::FromMilliseconds(10), windowed_min_rtt_.GetBest());
+
+ // Gradually increase the rtt samples and ensure the windowed min rtt starts
+ // rising.
+ for (int i = 0; i < 6; ++i) {
+ now = now + QuicTime::Delta::FromMilliseconds(25);
+ rtt_sample = rtt_sample + QuicTime::Delta::FromMilliseconds(10);
+ windowed_min_rtt_.Update(rtt_sample, now);
+ VLOG(1) << "i: " << i << " sample: " << rtt_sample.ToMilliseconds()
+ << " mins: "
+ << " " << windowed_min_rtt_.GetBest().ToMilliseconds() << " "
+ << windowed_min_rtt_.GetSecondBest().ToMilliseconds() << " "
+ << windowed_min_rtt_.GetThirdBest().ToMilliseconds();
+ if (i < 3) {
+ EXPECT_EQ(QuicTime::Delta::FromMilliseconds(10),
+ windowed_min_rtt_.GetBest());
+ } else if (i == 3) {
+ EXPECT_EQ(QuicTime::Delta::FromMilliseconds(20),
+ windowed_min_rtt_.GetBest());
+ } else if (i < 6) {
+ EXPECT_EQ(QuicTime::Delta::FromMilliseconds(40),
+ windowed_min_rtt_.GetBest());
+ }
+ }
+}
+
+TEST_F(WindowedFilterTest, MonotonicallyDecreasingMax) {
+ QuicTime now = QuicTime::Zero();
+ QuicBandwidth bw_sample = QuicBandwidth::FromBitsPerSecond(1000);
+ windowed_max_bw_.Update(bw_sample, now);
+ EXPECT_EQ(QuicBandwidth::FromBitsPerSecond(1000), windowed_max_bw_.GetBest());
+
+ // Gradually decrease the bw samples and ensure the windowed max bw starts
+ // decreasing.
+ for (int i = 0; i < 6; ++i) {
+ now = now + QuicTime::Delta::FromMilliseconds(25);
+ bw_sample = bw_sample - QuicBandwidth::FromBitsPerSecond(100);
+ windowed_max_bw_.Update(bw_sample, now);
+ VLOG(1) << "i: " << i << " sample: " << bw_sample.ToBitsPerSecond()
+ << " maxs: "
+ << " " << windowed_max_bw_.GetBest().ToBitsPerSecond() << " "
+ << windowed_max_bw_.GetSecondBest().ToBitsPerSecond() << " "
+ << windowed_max_bw_.GetThirdBest().ToBitsPerSecond();
+ if (i < 3) {
+ EXPECT_EQ(QuicBandwidth::FromBitsPerSecond(1000),
+ windowed_max_bw_.GetBest());
+ } else if (i == 3) {
+ EXPECT_EQ(QuicBandwidth::FromBitsPerSecond(900),
+ windowed_max_bw_.GetBest());
+ } else if (i < 6) {
+ EXPECT_EQ(QuicBandwidth::FromBitsPerSecond(700),
+ windowed_max_bw_.GetBest());
+ }
+ }
+}
+
+TEST_F(WindowedFilterTest, SampleChangesThirdBestMin) {
+ InitializeMinFilter();
+ // RTT sample lower than the third-choice min-rtt sets that, but nothing else.
+ QuicTime::Delta rtt_sample =
+ windowed_min_rtt_.GetThirdBest() - QuicTime::Delta::FromMilliseconds(5);
+ // This assert is necessary to avoid triggering -Wstrict-overflow
+ // See crbug/616957
+ ASSERT_GT(windowed_min_rtt_.GetThirdBest(),
+ QuicTime::Delta::FromMilliseconds(5));
+ // Latest sample was recorded at 100ms.
+ QuicTime now = QuicTime::Zero() + QuicTime::Delta::FromMilliseconds(101);
+ windowed_min_rtt_.Update(rtt_sample, now);
+ EXPECT_EQ(rtt_sample, windowed_min_rtt_.GetThirdBest());
+ EXPECT_EQ(QuicTime::Delta::FromMilliseconds(40),
+ windowed_min_rtt_.GetSecondBest());
+ EXPECT_EQ(QuicTime::Delta::FromMilliseconds(20), windowed_min_rtt_.GetBest());
+}
+
+TEST_F(WindowedFilterTest, SampleChangesThirdBestMax) {
+ InitializeMaxFilter();
+ // BW sample higher than the third-choice max sets that, but nothing else.
+ QuicBandwidth bw_sample =
+ windowed_max_bw_.GetThirdBest() + QuicBandwidth::FromBitsPerSecond(50);
+ // Latest sample was recorded at 100ms.
+ QuicTime now = QuicTime::Zero() + QuicTime::Delta::FromMilliseconds(101);
+ windowed_max_bw_.Update(bw_sample, now);
+ EXPECT_EQ(bw_sample, windowed_max_bw_.GetThirdBest());
+ EXPECT_EQ(QuicBandwidth::FromBitsPerSecond(700),
+ windowed_max_bw_.GetSecondBest());
+ EXPECT_EQ(QuicBandwidth::FromBitsPerSecond(900), windowed_max_bw_.GetBest());
+}
+
+TEST_F(WindowedFilterTest, SampleChangesSecondBestMin) {
+ InitializeMinFilter();
+ // RTT sample lower than the second-choice min sets that and also
+ // the third-choice min.
+ QuicTime::Delta rtt_sample =
+ windowed_min_rtt_.GetSecondBest() - QuicTime::Delta::FromMilliseconds(5);
+ // This assert is necessary to avoid triggering -Wstrict-overflow
+ // See crbug/616957
+ ASSERT_GT(windowed_min_rtt_.GetSecondBest(),
+ QuicTime::Delta::FromMilliseconds(5));
+ // Latest sample was recorded at 100ms.
+ QuicTime now = QuicTime::Zero() + QuicTime::Delta::FromMilliseconds(101);
+ windowed_min_rtt_.Update(rtt_sample, now);
+ EXPECT_EQ(rtt_sample, windowed_min_rtt_.GetThirdBest());
+ EXPECT_EQ(rtt_sample, windowed_min_rtt_.GetSecondBest());
+ EXPECT_EQ(QuicTime::Delta::FromMilliseconds(20), windowed_min_rtt_.GetBest());
+}
+
+TEST_F(WindowedFilterTest, SampleChangesSecondBestMax) {
+ InitializeMaxFilter();
+ // BW sample higher than the second-choice max sets that and also
+ // the third-choice max.
+ QuicBandwidth bw_sample =
+ windowed_max_bw_.GetSecondBest() + QuicBandwidth::FromBitsPerSecond(50);
+ // Latest sample was recorded at 100ms.
+ QuicTime now = QuicTime::Zero() + QuicTime::Delta::FromMilliseconds(101);
+ windowed_max_bw_.Update(bw_sample, now);
+ EXPECT_EQ(bw_sample, windowed_max_bw_.GetThirdBest());
+ EXPECT_EQ(bw_sample, windowed_max_bw_.GetSecondBest());
+ EXPECT_EQ(QuicBandwidth::FromBitsPerSecond(900), windowed_max_bw_.GetBest());
+}
+
+TEST_F(WindowedFilterTest, SampleChangesAllMins) {
+ InitializeMinFilter();
+ // RTT sample lower than the first-choice min-rtt sets that and also
+ // the second and third-choice mins.
+ QuicTime::Delta rtt_sample =
+ windowed_min_rtt_.GetBest() - QuicTime::Delta::FromMilliseconds(5);
+ // This assert is necessary to avoid triggering -Wstrict-overflow
+ // See crbug/616957
+ ASSERT_GT(windowed_min_rtt_.GetBest(), QuicTime::Delta::FromMilliseconds(5));
+ // Latest sample was recorded at 100ms.
+ QuicTime now = QuicTime::Zero() + QuicTime::Delta::FromMilliseconds(101);
+ windowed_min_rtt_.Update(rtt_sample, now);
+ EXPECT_EQ(rtt_sample, windowed_min_rtt_.GetThirdBest());
+ EXPECT_EQ(rtt_sample, windowed_min_rtt_.GetSecondBest());
+ EXPECT_EQ(rtt_sample, windowed_min_rtt_.GetBest());
+}
+
+TEST_F(WindowedFilterTest, SampleChangesAllMaxs) {
+ InitializeMaxFilter();
+ // BW sample higher than the first-choice max sets that and also
+ // the second and third-choice maxs.
+ QuicBandwidth bw_sample =
+ windowed_max_bw_.GetBest() + QuicBandwidth::FromBitsPerSecond(50);
+ // Latest sample was recorded at 100ms.
+ QuicTime now = QuicTime::Zero() + QuicTime::Delta::FromMilliseconds(101);
+ windowed_max_bw_.Update(bw_sample, now);
+ EXPECT_EQ(bw_sample, windowed_max_bw_.GetThirdBest());
+ EXPECT_EQ(bw_sample, windowed_max_bw_.GetSecondBest());
+ EXPECT_EQ(bw_sample, windowed_max_bw_.GetBest());
+}
+
+TEST_F(WindowedFilterTest, ExpireBestMin) {
+ InitializeMinFilter();
+ QuicTime::Delta old_third_best = windowed_min_rtt_.GetThirdBest();
+ QuicTime::Delta old_second_best = windowed_min_rtt_.GetSecondBest();
+ QuicTime::Delta rtt_sample =
+ old_third_best + QuicTime::Delta::FromMilliseconds(5);
+ // Best min sample was recorded at 25ms, so expiry time is 124ms.
+ QuicTime now = QuicTime::Zero() + QuicTime::Delta::FromMilliseconds(125);
+ windowed_min_rtt_.Update(rtt_sample, now);
+ EXPECT_EQ(rtt_sample, windowed_min_rtt_.GetThirdBest());
+ EXPECT_EQ(old_third_best, windowed_min_rtt_.GetSecondBest());
+ EXPECT_EQ(old_second_best, windowed_min_rtt_.GetBest());
+}
+
+TEST_F(WindowedFilterTest, ExpireBestMax) {
+ InitializeMaxFilter();
+ QuicBandwidth old_third_best = windowed_max_bw_.GetThirdBest();
+ QuicBandwidth old_second_best = windowed_max_bw_.GetSecondBest();
+ QuicBandwidth bw_sample =
+ old_third_best - QuicBandwidth::FromBitsPerSecond(50);
+ // Best max sample was recorded at 25ms, so expiry time is 124ms.
+ QuicTime now = QuicTime::Zero() + QuicTime::Delta::FromMilliseconds(125);
+ windowed_max_bw_.Update(bw_sample, now);
+ EXPECT_EQ(bw_sample, windowed_max_bw_.GetThirdBest());
+ EXPECT_EQ(old_third_best, windowed_max_bw_.GetSecondBest());
+ EXPECT_EQ(old_second_best, windowed_max_bw_.GetBest());
+}
+
+TEST_F(WindowedFilterTest, ExpireSecondBestMin) {
+ InitializeMinFilter();
+ QuicTime::Delta old_third_best = windowed_min_rtt_.GetThirdBest();
+ QuicTime::Delta rtt_sample =
+ old_third_best + QuicTime::Delta::FromMilliseconds(5);
+ // Second best min sample was recorded at 75ms, so expiry time is 174ms.
+ QuicTime now = QuicTime::Zero() + QuicTime::Delta::FromMilliseconds(175);
+ windowed_min_rtt_.Update(rtt_sample, now);
+ EXPECT_EQ(rtt_sample, windowed_min_rtt_.GetThirdBest());
+ EXPECT_EQ(rtt_sample, windowed_min_rtt_.GetSecondBest());
+ EXPECT_EQ(old_third_best, windowed_min_rtt_.GetBest());
+}
+
+TEST_F(WindowedFilterTest, ExpireSecondBestMax) {
+ InitializeMaxFilter();
+ QuicBandwidth old_third_best = windowed_max_bw_.GetThirdBest();
+ QuicBandwidth bw_sample =
+ old_third_best - QuicBandwidth::FromBitsPerSecond(50);
+ // Second best max sample was recorded at 75ms, so expiry time is 174ms.
+ QuicTime now = QuicTime::Zero() + QuicTime::Delta::FromMilliseconds(175);
+ windowed_max_bw_.Update(bw_sample, now);
+ EXPECT_EQ(bw_sample, windowed_max_bw_.GetThirdBest());
+ EXPECT_EQ(bw_sample, windowed_max_bw_.GetSecondBest());
+ EXPECT_EQ(old_third_best, windowed_max_bw_.GetBest());
+}
+
+TEST_F(WindowedFilterTest, ExpireAllMins) {
+ InitializeMinFilter();
+ QuicTime::Delta rtt_sample =
+ windowed_min_rtt_.GetThirdBest() + QuicTime::Delta::FromMilliseconds(5);
+ // This assert is necessary to avoid triggering -Wstrict-overflow
+ // See crbug/616957
+ ASSERT_LT(windowed_min_rtt_.GetThirdBest(),
+ QuicTime::Delta::Infinite() - QuicTime::Delta::FromMilliseconds(5));
+ // Third best min sample was recorded at 100ms, so expiry time is 199ms.
+ QuicTime now = QuicTime::Zero() + QuicTime::Delta::FromMilliseconds(200);
+ windowed_min_rtt_.Update(rtt_sample, now);
+ EXPECT_EQ(rtt_sample, windowed_min_rtt_.GetThirdBest());
+ EXPECT_EQ(rtt_sample, windowed_min_rtt_.GetSecondBest());
+ EXPECT_EQ(rtt_sample, windowed_min_rtt_.GetBest());
+}
+
+TEST_F(WindowedFilterTest, ExpireAllMaxs) {
+ InitializeMaxFilter();
+ QuicBandwidth bw_sample =
+ windowed_max_bw_.GetThirdBest() - QuicBandwidth::FromBitsPerSecond(50);
+ // Third best max sample was recorded at 100ms, so expiry time is 199ms.
+ QuicTime now = QuicTime::Zero() + QuicTime::Delta::FromMilliseconds(200);
+ windowed_max_bw_.Update(bw_sample, now);
+ EXPECT_EQ(bw_sample, windowed_max_bw_.GetThirdBest());
+ EXPECT_EQ(bw_sample, windowed_max_bw_.GetSecondBest());
+ EXPECT_EQ(bw_sample, windowed_max_bw_.GetBest());
+}
+
+// Test the windowed filter where the time used is an exact counter instead of a
+// timestamp. This is useful if, for example, the time is measured in round
+// trips.
+TEST_F(WindowedFilterTest, ExpireCounterBasedMax) {
+ // Create a window which starts at t = 0 and expires after two cycles.
+ WindowedFilter<uint64_t, MaxFilter<uint64_t>, uint64_t, uint64_t> max_filter(
+ 2, 0, 0);
+
+ const uint64_t kBest = 50000;
+ // Insert 50000 at t = 1.
+ max_filter.Update(50000, 1);
+ EXPECT_EQ(kBest, max_filter.GetBest());
+ UpdateWithIrrelevantSamples(&max_filter, 20, 1);
+ EXPECT_EQ(kBest, max_filter.GetBest());
+
+ // Insert 40000 at t = 2. Nothing is expected to expire.
+ max_filter.Update(40000, 2);
+ EXPECT_EQ(kBest, max_filter.GetBest());
+ UpdateWithIrrelevantSamples(&max_filter, 20, 2);
+ EXPECT_EQ(kBest, max_filter.GetBest());
+
+ // Insert 30000 at t = 3. Nothing is expected to expire yet.
+ max_filter.Update(30000, 3);
+ EXPECT_EQ(kBest, max_filter.GetBest());
+ UpdateWithIrrelevantSamples(&max_filter, 20, 3);
+ EXPECT_EQ(kBest, max_filter.GetBest());
+ VLOG(0) << max_filter.GetSecondBest();
+ VLOG(0) << max_filter.GetThirdBest();
+
+ // Insert 20000 at t = 4. 50000 at t = 1 expires, so 40000 becomes the new
+ // maximum.
+ const uint64_t kNewBest = 40000;
+ max_filter.Update(20000, 4);
+ EXPECT_EQ(kNewBest, max_filter.GetBest());
+ UpdateWithIrrelevantSamples(&max_filter, 20, 4);
+ EXPECT_EQ(kNewBest, max_filter.GetBest());
+}
+
+} // namespace test
+} // namespace quic