| // Copyright (c) 2015 The Chromium Authors. All rights reserved. |
| // Use of this source code is governed by a BSD-style license that can be |
| // found in the LICENSE file. |
| |
| #include "quiche/quic/core/congestion_control/tcp_cubic_sender_bytes.h" |
| |
| #include <algorithm> |
| #include <cstdint> |
| #include <string> |
| |
| #include "quiche/quic/core/congestion_control/prr_sender.h" |
| #include "quiche/quic/core/congestion_control/rtt_stats.h" |
| #include "quiche/quic/core/crypto/crypto_protocol.h" |
| #include "quiche/quic/core/quic_constants.h" |
| #include "quiche/quic/platform/api/quic_bug_tracker.h" |
| #include "quiche/quic/platform/api/quic_flags.h" |
| #include "quiche/quic/platform/api/quic_logging.h" |
| |
| namespace quic { |
| |
| namespace { |
| // Constants based on TCP defaults. |
| const QuicByteCount kMaxBurstBytes = 3 * kDefaultTCPMSS; |
| const float kRenoBeta = 0.7f; // Reno backoff factor. |
| // The minimum cwnd based on RFC 3782 (TCP NewReno) for cwnd reductions on a |
| // fast retransmission. |
| const QuicByteCount kDefaultMinimumCongestionWindow = 2 * kDefaultTCPMSS; |
| } // namespace |
| |
| TcpCubicSenderBytes::TcpCubicSenderBytes( |
| const QuicClock* clock, const RttStats* rtt_stats, bool reno, |
| QuicPacketCount initial_tcp_congestion_window, |
| QuicPacketCount max_congestion_window, QuicConnectionStats* stats) |
| : rtt_stats_(rtt_stats), |
| stats_(stats), |
| reno_(reno), |
| num_connections_(kDefaultNumConnections), |
| min4_mode_(false), |
| last_cutback_exited_slowstart_(false), |
| slow_start_large_reduction_(false), |
| no_prr_(false), |
| cubic_(clock), |
| num_acked_packets_(0), |
| congestion_window_(initial_tcp_congestion_window * kDefaultTCPMSS), |
| min_congestion_window_(kDefaultMinimumCongestionWindow), |
| max_congestion_window_(max_congestion_window * kDefaultTCPMSS), |
| slowstart_threshold_(max_congestion_window * kDefaultTCPMSS), |
| initial_tcp_congestion_window_(initial_tcp_congestion_window * |
| kDefaultTCPMSS), |
| initial_max_tcp_congestion_window_(max_congestion_window * |
| kDefaultTCPMSS), |
| min_slow_start_exit_window_(min_congestion_window_) {} |
| |
| TcpCubicSenderBytes::~TcpCubicSenderBytes() {} |
| |
| void TcpCubicSenderBytes::SetFromConfig(const QuicConfig& config, |
| Perspective perspective) { |
| if (perspective == Perspective::IS_SERVER && |
| config.HasReceivedConnectionOptions()) { |
| if (ContainsQuicTag(config.ReceivedConnectionOptions(), kMIN4)) { |
| // Min CWND of 4 experiment. |
| min4_mode_ = true; |
| SetMinCongestionWindowInPackets(1); |
| } |
| if (ContainsQuicTag(config.ReceivedConnectionOptions(), kSSLR)) { |
| // Slow Start Fast Exit experiment. |
| slow_start_large_reduction_ = true; |
| } |
| if (ContainsQuicTag(config.ReceivedConnectionOptions(), kNPRR)) { |
| // Use unity pacing instead of PRR. |
| no_prr_ = true; |
| } |
| } |
| } |
| |
| void TcpCubicSenderBytes::AdjustNetworkParameters(const NetworkParams& params) { |
| if (params.bandwidth.IsZero() || params.rtt.IsZero()) { |
| return; |
| } |
| SetCongestionWindowFromBandwidthAndRtt(params.bandwidth, params.rtt); |
| } |
| |
| float TcpCubicSenderBytes::RenoBeta() const { |
| // kNConnectionBeta is the backoff factor after loss for our N-connection |
| // emulation, which emulates the effective backoff of an ensemble of N |
| // TCP-Reno connections on a single loss event. The effective multiplier is |
| // computed as: |
| return (num_connections_ - 1 + kRenoBeta) / num_connections_; |
| } |
| |
| void TcpCubicSenderBytes::OnCongestionEvent( |
| bool rtt_updated, QuicByteCount prior_in_flight, QuicTime event_time, |
| const AckedPacketVector& acked_packets, |
| const LostPacketVector& lost_packets) { |
| if (rtt_updated && InSlowStart() && |
| hybrid_slow_start_.ShouldExitSlowStart( |
| rtt_stats_->latest_rtt(), rtt_stats_->min_rtt(), |
| GetCongestionWindow() / kDefaultTCPMSS)) { |
| ExitSlowstart(); |
| } |
| for (const LostPacket& lost_packet : lost_packets) { |
| OnPacketLost(lost_packet.packet_number, lost_packet.bytes_lost, |
| prior_in_flight); |
| } |
| for (const AckedPacket& acked_packet : acked_packets) { |
| OnPacketAcked(acked_packet.packet_number, acked_packet.bytes_acked, |
| prior_in_flight, event_time); |
| } |
| } |
| |
| void TcpCubicSenderBytes::OnPacketAcked(QuicPacketNumber acked_packet_number, |
| QuicByteCount acked_bytes, |
| QuicByteCount prior_in_flight, |
| QuicTime event_time) { |
| largest_acked_packet_number_.UpdateMax(acked_packet_number); |
| if (InRecovery()) { |
| if (!no_prr_) { |
| // PRR is used when in recovery. |
| prr_.OnPacketAcked(acked_bytes); |
| } |
| return; |
| } |
| MaybeIncreaseCwnd(acked_packet_number, acked_bytes, prior_in_flight, |
| event_time); |
| if (InSlowStart()) { |
| hybrid_slow_start_.OnPacketAcked(acked_packet_number); |
| } |
| } |
| |
| void TcpCubicSenderBytes::OnPacketSent( |
| QuicTime /*sent_time*/, QuicByteCount /*bytes_in_flight*/, |
| QuicPacketNumber packet_number, QuicByteCount bytes, |
| HasRetransmittableData is_retransmittable) { |
| if (InSlowStart()) { |
| ++(stats_->slowstart_packets_sent); |
| } |
| |
| if (is_retransmittable != HAS_RETRANSMITTABLE_DATA) { |
| return; |
| } |
| if (InRecovery()) { |
| // PRR is used when in recovery. |
| prr_.OnPacketSent(bytes); |
| } |
| QUICHE_DCHECK(!largest_sent_packet_number_.IsInitialized() || |
| largest_sent_packet_number_ < packet_number); |
| largest_sent_packet_number_ = packet_number; |
| hybrid_slow_start_.OnPacketSent(packet_number); |
| } |
| |
| bool TcpCubicSenderBytes::CanSend(QuicByteCount bytes_in_flight) { |
| if (!no_prr_ && InRecovery()) { |
| // PRR is used when in recovery. |
| return prr_.CanSend(GetCongestionWindow(), bytes_in_flight, |
| GetSlowStartThreshold()); |
| } |
| if (GetCongestionWindow() > bytes_in_flight) { |
| return true; |
| } |
| if (min4_mode_ && bytes_in_flight < 4 * kDefaultTCPMSS) { |
| return true; |
| } |
| return false; |
| } |
| |
| QuicBandwidth TcpCubicSenderBytes::PacingRate( |
| QuicByteCount /* bytes_in_flight */) const { |
| // We pace at twice the rate of the underlying sender's bandwidth estimate |
| // during slow start and 1.25x during congestion avoidance to ensure pacing |
| // doesn't prevent us from filling the window. |
| QuicTime::Delta srtt = rtt_stats_->SmoothedOrInitialRtt(); |
| const QuicBandwidth bandwidth = |
| QuicBandwidth::FromBytesAndTimeDelta(GetCongestionWindow(), srtt); |
| return bandwidth * (InSlowStart() ? 2 : (no_prr_ && InRecovery() ? 1 : 1.25)); |
| } |
| |
| QuicBandwidth TcpCubicSenderBytes::BandwidthEstimate() const { |
| QuicTime::Delta srtt = rtt_stats_->smoothed_rtt(); |
| if (srtt.IsZero()) { |
| // If we haven't measured an rtt, the bandwidth estimate is unknown. |
| return QuicBandwidth::Zero(); |
| } |
| return QuicBandwidth::FromBytesAndTimeDelta(GetCongestionWindow(), srtt); |
| } |
| |
| bool TcpCubicSenderBytes::InSlowStart() const { |
| return GetCongestionWindow() < GetSlowStartThreshold(); |
| } |
| |
| bool TcpCubicSenderBytes::IsCwndLimited(QuicByteCount bytes_in_flight) const { |
| const QuicByteCount congestion_window = GetCongestionWindow(); |
| if (bytes_in_flight >= congestion_window) { |
| return true; |
| } |
| const QuicByteCount available_bytes = congestion_window - bytes_in_flight; |
| const bool slow_start_limited = |
| InSlowStart() && bytes_in_flight > congestion_window / 2; |
| return slow_start_limited || available_bytes <= kMaxBurstBytes; |
| } |
| |
| bool TcpCubicSenderBytes::InRecovery() const { |
| return largest_acked_packet_number_.IsInitialized() && |
| largest_sent_at_last_cutback_.IsInitialized() && |
| largest_acked_packet_number_ <= largest_sent_at_last_cutback_; |
| } |
| |
| void TcpCubicSenderBytes::OnRetransmissionTimeout(bool packets_retransmitted) { |
| largest_sent_at_last_cutback_.Clear(); |
| if (!packets_retransmitted) { |
| return; |
| } |
| hybrid_slow_start_.Restart(); |
| HandleRetransmissionTimeout(); |
| } |
| |
| std::string TcpCubicSenderBytes::GetDebugState() const { return ""; } |
| |
| void TcpCubicSenderBytes::OnApplicationLimited( |
| QuicByteCount /*bytes_in_flight*/) {} |
| |
| void TcpCubicSenderBytes::SetCongestionWindowFromBandwidthAndRtt( |
| QuicBandwidth bandwidth, QuicTime::Delta rtt) { |
| QuicByteCount new_congestion_window = bandwidth.ToBytesPerPeriod(rtt); |
| // Limit new CWND if needed. |
| congestion_window_ = |
| std::max(min_congestion_window_, |
| std::min(new_congestion_window, |
| kMaxResumptionCongestionWindow * kDefaultTCPMSS)); |
| } |
| |
| void TcpCubicSenderBytes::SetInitialCongestionWindowInPackets( |
| QuicPacketCount congestion_window) { |
| congestion_window_ = congestion_window * kDefaultTCPMSS; |
| } |
| |
| void TcpCubicSenderBytes::SetMinCongestionWindowInPackets( |
| QuicPacketCount congestion_window) { |
| min_congestion_window_ = congestion_window * kDefaultTCPMSS; |
| } |
| |
| void TcpCubicSenderBytes::SetNumEmulatedConnections(int num_connections) { |
| num_connections_ = std::max(1, num_connections); |
| cubic_.SetNumConnections(num_connections_); |
| } |
| |
| void TcpCubicSenderBytes::ExitSlowstart() { |
| slowstart_threshold_ = congestion_window_; |
| } |
| |
| void TcpCubicSenderBytes::OnPacketLost(QuicPacketNumber packet_number, |
| QuicByteCount lost_bytes, |
| QuicByteCount prior_in_flight) { |
| // TCP NewReno (RFC6582) says that once a loss occurs, any losses in packets |
| // already sent should be treated as a single loss event, since it's expected. |
| if (largest_sent_at_last_cutback_.IsInitialized() && |
| packet_number <= largest_sent_at_last_cutback_) { |
| if (last_cutback_exited_slowstart_) { |
| ++stats_->slowstart_packets_lost; |
| stats_->slowstart_bytes_lost += lost_bytes; |
| if (slow_start_large_reduction_) { |
| // Reduce congestion window by lost_bytes for every loss. |
| congestion_window_ = std::max(congestion_window_ - lost_bytes, |
| min_slow_start_exit_window_); |
| slowstart_threshold_ = congestion_window_; |
| } |
| } |
| QUIC_DVLOG(1) << "Ignoring loss for largest_missing:" << packet_number |
| << " because it was sent prior to the last CWND cutback."; |
| return; |
| } |
| ++stats_->tcp_loss_events; |
| last_cutback_exited_slowstart_ = InSlowStart(); |
| if (InSlowStart()) { |
| ++stats_->slowstart_packets_lost; |
| } |
| |
| if (!no_prr_) { |
| prr_.OnPacketLost(prior_in_flight); |
| } |
| |
| // TODO(b/77268641): Separate out all of slow start into a separate class. |
| if (slow_start_large_reduction_ && InSlowStart()) { |
| QUICHE_DCHECK_LT(kDefaultTCPMSS, congestion_window_); |
| if (congestion_window_ >= 2 * initial_tcp_congestion_window_) { |
| min_slow_start_exit_window_ = congestion_window_ / 2; |
| } |
| congestion_window_ = congestion_window_ - kDefaultTCPMSS; |
| } else if (reno_) { |
| congestion_window_ = congestion_window_ * RenoBeta(); |
| } else { |
| congestion_window_ = |
| cubic_.CongestionWindowAfterPacketLoss(congestion_window_); |
| } |
| if (congestion_window_ < min_congestion_window_) { |
| congestion_window_ = min_congestion_window_; |
| } |
| slowstart_threshold_ = congestion_window_; |
| largest_sent_at_last_cutback_ = largest_sent_packet_number_; |
| // Reset packet count from congestion avoidance mode. We start counting again |
| // when we're out of recovery. |
| num_acked_packets_ = 0; |
| QUIC_DVLOG(1) << "Incoming loss; congestion window: " << congestion_window_ |
| << " slowstart threshold: " << slowstart_threshold_; |
| } |
| |
| QuicByteCount TcpCubicSenderBytes::GetCongestionWindow() const { |
| return congestion_window_; |
| } |
| |
| QuicByteCount TcpCubicSenderBytes::GetSlowStartThreshold() const { |
| return slowstart_threshold_; |
| } |
| |
| // Called when we receive an ack. Normal TCP tracks how many packets one ack |
| // represents, but quic has a separate ack for each packet. |
| void TcpCubicSenderBytes::MaybeIncreaseCwnd( |
| QuicPacketNumber /*acked_packet_number*/, QuicByteCount acked_bytes, |
| QuicByteCount prior_in_flight, QuicTime event_time) { |
| QUIC_BUG_IF(quic_bug_10439_1, InRecovery()) |
| << "Never increase the CWND during recovery."; |
| // Do not increase the congestion window unless the sender is close to using |
| // the current window. |
| if (!IsCwndLimited(prior_in_flight)) { |
| cubic_.OnApplicationLimited(); |
| return; |
| } |
| if (congestion_window_ >= max_congestion_window_) { |
| return; |
| } |
| if (InSlowStart()) { |
| // TCP slow start, exponential growth, increase by one for each ACK. |
| congestion_window_ += kDefaultTCPMSS; |
| QUIC_DVLOG(1) << "Slow start; congestion window: " << congestion_window_ |
| << " slowstart threshold: " << slowstart_threshold_; |
| return; |
| } |
| // Congestion avoidance. |
| if (reno_) { |
| // Classic Reno congestion avoidance. |
| ++num_acked_packets_; |
| // Divide by num_connections to smoothly increase the CWND at a faster rate |
| // than conventional Reno. |
| if (num_acked_packets_ * num_connections_ >= |
| congestion_window_ / kDefaultTCPMSS) { |
| congestion_window_ += kDefaultTCPMSS; |
| num_acked_packets_ = 0; |
| } |
| |
| QUIC_DVLOG(1) << "Reno; congestion window: " << congestion_window_ |
| << " slowstart threshold: " << slowstart_threshold_ |
| << " congestion window count: " << num_acked_packets_; |
| } else { |
| congestion_window_ = std::min( |
| max_congestion_window_, |
| cubic_.CongestionWindowAfterAck(acked_bytes, congestion_window_, |
| rtt_stats_->min_rtt(), event_time)); |
| QUIC_DVLOG(1) << "Cubic; congestion window: " << congestion_window_ |
| << " slowstart threshold: " << slowstart_threshold_; |
| } |
| } |
| |
| void TcpCubicSenderBytes::HandleRetransmissionTimeout() { |
| cubic_.ResetCubicState(); |
| slowstart_threshold_ = congestion_window_ / 2; |
| congestion_window_ = min_congestion_window_; |
| } |
| |
| void TcpCubicSenderBytes::OnConnectionMigration() { |
| hybrid_slow_start_.Restart(); |
| prr_ = PrrSender(); |
| largest_sent_packet_number_.Clear(); |
| largest_acked_packet_number_.Clear(); |
| largest_sent_at_last_cutback_.Clear(); |
| last_cutback_exited_slowstart_ = false; |
| cubic_.ResetCubicState(); |
| num_acked_packets_ = 0; |
| congestion_window_ = initial_tcp_congestion_window_; |
| max_congestion_window_ = initial_max_tcp_congestion_window_; |
| slowstart_threshold_ = initial_max_tcp_congestion_window_; |
| } |
| |
| CongestionControlType TcpCubicSenderBytes::GetCongestionControlType() const { |
| return reno_ ? kRenoBytes : kCubicBytes; |
| } |
| |
| } // namespace quic |