Internal QUICHE change

PiperOrigin-RevId: 253074929
Change-Id: I7c0b265ae4ba000bec3b4ea6936a93bc92b73ddc
diff --git a/quic/core/congestion_control/bbr_sender.cc b/quic/core/congestion_control/bbr_sender.cc
index aa81d5a..c455751 100644
--- a/quic/core/congestion_control/bbr_sender.cc
+++ b/quic/core/congestion_control/bbr_sender.cc
@@ -109,7 +109,7 @@
       pacing_gain_(1),
       congestion_window_gain_(1),
       congestion_window_gain_constant_(
-          static_cast<float>(FLAGS_quic_bbr_cwnd_gain)),
+          static_cast<float>(GetQuicFlag(FLAGS_quic_bbr_cwnd_gain))),
       num_startup_rtts_(kRoundTripsWithoutGrowthBeforeExitingStartup),
       exit_startup_on_loss_(false),
       cycle_current_offset_(0),
diff --git a/quic/core/congestion_control/bbr_sender_test.cc b/quic/core/congestion_control/bbr_sender_test.cc
index be4f291..5a7ee40 100644
--- a/quic/core/congestion_control/bbr_sender_test.cc
+++ b/quic/core/congestion_control/bbr_sender_test.cc
@@ -374,7 +374,7 @@
 // Test a simple long data transfer with 2 rtts of aggregation.
 TEST_F(BbrSenderTest, SimpleTransferAckDecimation) {
   // Decrease the CWND gain so extra CWND is required with stretch acks.
-  FLAGS_quic_bbr_cwnd_gain = 1.0;
+  SetQuicFlag(FLAGS_quic_bbr_cwnd_gain, 1.0);
   sender_ = new BbrSender(
       bbr_sender_.connection()->clock()->Now(), rtt_stats_,
       QuicSentPacketManagerPeer::GetUnackedPacketMap(
diff --git a/quic/core/quic_dispatcher.cc b/quic/core/quic_dispatcher.cc
index 8a91711..562cf47 100644
--- a/quic/core/quic_dispatcher.cc
+++ b/quic/core/quic_dispatcher.cc
@@ -1153,7 +1153,7 @@
                                                form != GOOGLE_QUIC_PACKET)) {
     return;
   }
-  if (FLAGS_quic_allow_chlo_buffering &&
+  if (GetQuicFlag(FLAGS_quic_allow_chlo_buffering) &&
       new_sessions_allowed_per_event_loop_ <= 0) {
     // Can't create new session any more. Wait till next event loop.
     QUIC_BUG_IF(
@@ -1225,7 +1225,7 @@
   }
 
   ChloAlpnExtractor alpn_extractor;
-  if (FLAGS_quic_allow_chlo_buffering &&
+  if (GetQuicFlag(FLAGS_quic_allow_chlo_buffering) &&
       !ChloExtractor::Extract(*current_packet_, GetSupportedVersions(),
                               config_->create_session_tag_indicators(),
                               &alpn_extractor, server_connection_id.length())) {
diff --git a/quic/core/quic_packet_creator.cc b/quic/core/quic_packet_creator.cc
index 20566de..cc59cc7 100644
--- a/quic/core/quic_packet_creator.cc
+++ b/quic/core/quic_packet_creator.cc
@@ -193,7 +193,7 @@
   }
   CreateStreamFrame(id, data_size, offset, fin, frame);
   // Explicitly disallow multi-packet CHLOs.
-  if (FLAGS_quic_enforce_single_packet_chlo &&
+  if (GetQuicFlag(FLAGS_quic_enforce_single_packet_chlo) &&
       StreamFrameIsClientHello(frame->stream_frame) &&
       frame->stream_frame.data_length < data_size) {
     const std::string error_details =
diff --git a/quic/core/quic_packet_creator_test.cc b/quic/core/quic_packet_creator_test.cc
index 665dbac..d960342 100644
--- a/quic/core/quic_packet_creator_test.cc
+++ b/quic/core/quic_packet_creator_test.cc
@@ -714,7 +714,7 @@
 TEST_P(QuicPacketCreatorTest, CryptoStreamFramePacketPadding) {
   // This test serializes crypto payloads slightly larger than a packet, which
   // Causes the multi-packet ClientHello check to fail.
-  FLAGS_quic_enforce_single_packet_chlo = false;
+  SetQuicFlag(FLAGS_quic_enforce_single_packet_chlo, false);
   // Compute the total overhead for a single frame in packet.
   size_t overhead =
       GetPacketHeaderOverhead(client_framer_.transport_version()) +
diff --git a/quic/core/quic_sent_packet_manager.cc b/quic/core/quic_sent_packet_manager.cc
index 204c020..c8ae53b 100644
--- a/quic/core/quic_sent_packet_manager.cc
+++ b/quic/core/quic_sent_packet_manager.cc
@@ -203,7 +203,7 @@
     }
   }
 
-  using_pacing_ = !FLAGS_quic_disable_pacing_for_perf_tests;
+  using_pacing_ = !GetQuicFlag(FLAGS_quic_disable_pacing_for_perf_tests);
 
   if (config.HasClientSentConnectionOption(k1CON, perspective)) {
     send_algorithm_->SetNumEmulatedConnections(1);
diff --git a/quic/core/quic_time_wait_list_manager.cc b/quic/core/quic_time_wait_list_manager.cc
index f1f6dcf..8bcb162 100644
--- a/quic/core/quic_time_wait_list_manager.cc
+++ b/quic/core/quic_time_wait_list_manager.cc
@@ -51,8 +51,8 @@
     Visitor* visitor,
     const QuicClock* clock,
     QuicAlarmFactory* alarm_factory)
-    : time_wait_period_(
-          QuicTime::Delta::FromSeconds(FLAGS_quic_time_wait_list_seconds)),
+    : time_wait_period_(QuicTime::Delta::FromSeconds(
+          GetQuicFlag(FLAGS_quic_time_wait_list_seconds))),
       connection_id_clean_up_alarm_(
           alarm_factory->CreateAlarm(new ConnectionIdCleanUpAlarm(this))),
       clock_(clock),
@@ -81,8 +81,9 @@
     connection_id_map_.erase(it);
   }
   TrimTimeWaitListIfNeeded();
-  DCHECK_LT(num_connections(),
-            static_cast<size_t>(FLAGS_quic_time_wait_list_max_connections));
+  int64_t max_connections =
+      GetQuicFlag(FLAGS_quic_time_wait_list_max_connections);
+  DCHECK_LT(num_connections(), static_cast<size_t>(max_connections));
   ConnectionIdData data(num_packets, ietf_quic, clock_->ApproximateNow(),
                         action);
   if (termination_packets != nullptr) {
@@ -371,11 +372,12 @@
 }
 
 void QuicTimeWaitListManager::TrimTimeWaitListIfNeeded() {
-  if (FLAGS_quic_time_wait_list_max_connections < 0) {
+  const int64_t kMaxConnections =
+      GetQuicFlag(FLAGS_quic_time_wait_list_max_connections);
+  if (kMaxConnections < 0) {
     return;
   }
-  while (num_connections() >=
-         static_cast<size_t>(FLAGS_quic_time_wait_list_max_connections)) {
+  while (num_connections() >= static_cast<size_t>(kMaxConnections)) {
     MaybeExpireOldestConnection(QuicTime::Infinite());
   }
 }
diff --git a/quic/core/quic_time_wait_list_manager_test.cc b/quic/core/quic_time_wait_list_manager_test.cc
index 209f26f..3fdd454 100644
--- a/quic/core/quic_time_wait_list_manager_test.cc
+++ b/quic/core/quic_time_wait_list_manager_test.cc
@@ -542,12 +542,14 @@
 
 TEST_F(QuicTimeWaitListManagerTest, MaxConnectionsTest) {
   // Basically, shut off time-based eviction.
-  FLAGS_quic_time_wait_list_seconds = 10000000000;
-  FLAGS_quic_time_wait_list_max_connections = 5;
+  SetQuicFlag(FLAGS_quic_time_wait_list_seconds, 10000000000);
+  SetQuicFlag(FLAGS_quic_time_wait_list_max_connections, 5);
 
   uint64_t current_conn_id = 0;
+  const int64_t kMaxConnections =
+      GetQuicFlag(FLAGS_quic_time_wait_list_max_connections);
   // Add exactly the maximum number of connections
-  for (int64_t i = 0; i < FLAGS_quic_time_wait_list_max_connections; ++i) {
+  for (int64_t i = 0; i < kMaxConnections; ++i) {
     ++current_conn_id;
     QuicConnectionId current_connection_id = TestConnectionId(current_conn_id);
     EXPECT_FALSE(IsConnectionIdInTimeWait(current_connection_id));
@@ -560,17 +562,17 @@
 
   // Now keep adding.  Since we're already at the max, every new connection-id
   // will evict the oldest one.
-  for (int64_t i = 0; i < FLAGS_quic_time_wait_list_max_connections; ++i) {
+  for (int64_t i = 0; i < kMaxConnections; ++i) {
     ++current_conn_id;
     QuicConnectionId current_connection_id = TestConnectionId(current_conn_id);
-    const QuicConnectionId id_to_evict = TestConnectionId(
-        current_conn_id - FLAGS_quic_time_wait_list_max_connections);
+    const QuicConnectionId id_to_evict =
+        TestConnectionId(current_conn_id - kMaxConnections);
     EXPECT_TRUE(IsConnectionIdInTimeWait(id_to_evict));
     EXPECT_FALSE(IsConnectionIdInTimeWait(current_connection_id));
     EXPECT_CALL(visitor_,
                 OnConnectionAddedToTimeWaitList(current_connection_id));
     AddConnectionId(current_connection_id, QuicTimeWaitListManager::DO_NOTHING);
-    EXPECT_EQ(static_cast<size_t>(FLAGS_quic_time_wait_list_max_connections),
+    EXPECT_EQ(static_cast<size_t>(kMaxConnections),
               time_wait_list_manager_.num_connections());
     EXPECT_FALSE(IsConnectionIdInTimeWait(id_to_evict));
     EXPECT_TRUE(IsConnectionIdInTimeWait(current_connection_id));