Each Quic server session only accepts up to certain number of new requests in each event loop. The rest will be postponed to the following event loop.
The behavior is off by default and can be enabled by calling set_max_streams_accepted_per_loop().
PiperOrigin-RevId: 590959802
diff --git a/quiche/quic/core/http/end_to_end_test.cc b/quiche/quic/core/http/end_to_end_test.cc
index 78eeffc..2775fc1 100644
--- a/quiche/quic/core/http/end_to_end_test.cc
+++ b/quiche/quic/core/http/end_to_end_test.cc
@@ -7493,6 +7493,37 @@
EXPECT_LE(duration, QuicTime::Delta::FromSeconds(25));
}
+TEST_P(EndToEndTest, RequestsBurstMitigation) {
+ ASSERT_TRUE(Initialize());
+ if (!version_.HasIetfQuicFrames()) {
+ return;
+ }
+
+ // Send 50 requests simutanuously and wait for their responses. Hopefully at
+ // least more than 5 of these requests will arrive at the server in the same
+ // event loop and cause some of them to be pending till the next loop.
+ for (int i = 0; i < 50; ++i) {
+ EXPECT_LT(0, client_->SendRequest("/foo"));
+ }
+
+ while (50 > client_->num_responses()) {
+ client_->ClearPerRequestState();
+ client_->WaitForResponse();
+ CheckResponseHeaders(client_.get());
+ }
+ EXPECT_TRUE(client_->connected());
+
+ server_thread_->Pause();
+ QuicConnection* server_connection = GetServerConnection();
+ if (server_connection != nullptr) {
+ const QuicConnectionStats& server_stats = server_connection->GetStats();
+ EXPECT_LT(0u, server_stats.num_total_pending_streams);
+ } else {
+ ADD_FAILURE() << "Missing server connection";
+ }
+ server_thread_->Resume();
+}
+
} // namespace
} // namespace test
} // namespace quic
diff --git a/quiche/quic/core/http/quic_server_session_base_test.cc b/quiche/quic/core/http/quic_server_session_base_test.cc
index 7a1c952..c1ecce9 100644
--- a/quiche/quic/core/http/quic_server_session_base_test.cc
+++ b/quiche/quic/core/http/quic_server_session_base_test.cc
@@ -67,13 +67,19 @@
: QuicServerSessionBase(config, CurrentSupportedVersions(), connection,
visitor, helper, crypto_config,
compressed_certs_cache),
- quic_simple_server_backend_(quic_simple_server_backend) {}
+ quic_simple_server_backend_(quic_simple_server_backend) {
+ // Change the limit to be smaller than kMaxStreamsForTest to test pending
+ // streams handling across multiple loops.
+ set_max_streams_accepted_per_loop(4u);
+ }
~TestServerSession() override { DeleteConnection(); }
MOCK_METHOD(bool, WriteControlFrame,
(const QuicFrame& frame, TransmissionType type), (override));
+ using QuicServerSessionBase::pending_streams_size;
+
protected:
QuicSpdyStream* CreateIncomingStream(QuicStreamId id) override {
if (!ShouldCreateIncomingStream(id)) {
@@ -116,6 +122,11 @@
stream_helper());
}
+ QuicStream* ProcessBidirectionalPendingStream(
+ PendingStream* pending) override {
+ return CreateIncomingStream(pending);
+ }
+
private:
QuicSimpleServerBackend*
quic_simple_server_backend_; // Owned by QuicServerSessionBaseTest
@@ -370,6 +381,12 @@
EXPECT_TRUE(QuicServerSessionBasePeer::GetOrCreateStream(session_.get(),
stream_id));
stream_id += QuicUtils::StreamIdDelta(transport_version());
+ // Reset the stream count to make it not a bottleneck.
+ QuicAlarm* alarm =
+ QuicSessionPeer::GetStreamCountResetAlarm(session_.get());
+ if (alarm->IsSet()) {
+ alarm_factory_.FireAlarm(alarm);
+ }
}
if (!VersionHasIetfQuicFrames(transport_version())) {
@@ -494,6 +511,8 @@
MOCK_METHOD(std::string, GetAddressToken, (const CachedNetworkParameters*),
(const, override));
+
+ MOCK_METHOD(bool, encryption_established, (), (const, override));
};
TEST_P(QuicServerSessionBaseTest, BandwidthEstimates) {
@@ -720,6 +739,69 @@
QuicServerSessionBasePeer::IsBandwidthResumptionEnabled(session_.get()));
}
+TEST_P(QuicServerSessionBaseTest, OpenStreamLimitPerEventLoop) {
+ if (!VersionHasIetfQuicFrames(transport_version())) {
+ // Only needed for version 99/IETF QUIC. Noop otherwise.
+ return;
+ }
+ MockTlsServerHandshaker* crypto_stream =
+ new MockTlsServerHandshaker(session_.get(), &crypto_config_);
+ QuicServerSessionBasePeer::SetCryptoStream(session_.get(), crypto_stream);
+ EXPECT_CALL(*crypto_stream, encryption_established())
+ .WillRepeatedly(testing::Return(true));
+ connection_->SetDefaultEncryptionLevel(ENCRYPTION_FORWARD_SECURE);
+ session_->OnConfigNegotiated();
+
+ size_t i = 0u;
+ QuicStreamFrame data(GetNthClientInitiatedBidirectionalId(i), false, 0,
+ kStreamData);
+ session_->OnStreamFrame(data);
+ EXPECT_EQ(1u, session_->GetNumActiveStreams());
+ ++i;
+
+ // Start another loop.
+ QuicAlarm* alarm = QuicSessionPeer::GetStreamCountResetAlarm(session_.get());
+ EXPECT_TRUE(alarm->IsSet());
+ alarm_factory_.FireAlarm(alarm);
+ // Receive data on a read uni stream with incomplete type and the stream
+ // should become pending.
+ QuicStreamId control_stream_id =
+ GetNthClientInitiatedUnidirectionalStreamId(transport_version(), 3);
+ QuicStreamFrame data1(control_stream_id, false, 1, "aaaa");
+ session_->OnStreamFrame(data1);
+ EXPECT_EQ(1u, session_->pending_streams_size());
+ // Receive data on 9 more bidi streams. Only the first 4 should open new
+ // streams.
+ for (; i < 10u; ++i) {
+ QuicStreamFrame more_data(GetNthClientInitiatedBidirectionalId(i), false, 0,
+ kStreamData);
+ session_->OnStreamFrame(more_data);
+ }
+ EXPECT_EQ(5u, session_->GetNumActiveStreams());
+ EXPECT_EQ(6u, session_->pending_streams_size());
+ EXPECT_EQ(
+ GetNthClientInitiatedBidirectionalId(i - 1),
+ QuicSessionPeer::GetLargestPeerCreatedStreamId(session_.get(), false));
+
+ // Start another loop should cause 4 more pending bidi streams to open.
+ helper_.GetClock()->AdvanceTime(QuicTime::Delta::FromMicroseconds(100));
+ EXPECT_TRUE(alarm->IsSet());
+ alarm_factory_.FireAlarm(alarm);
+ EXPECT_EQ(9u, session_->GetNumActiveStreams());
+ // The control stream and the 10th bidi stream should remain pending.
+ EXPECT_EQ(2u, session_->pending_streams_size());
+ EXPECT_EQ(nullptr, session_->GetActiveStream(control_stream_id));
+ EXPECT_EQ(nullptr, session_->GetActiveStream(
+ GetNthClientInitiatedBidirectionalId(i - 1)));
+
+ // Receiving 1 more new stream should violate max stream limit even though the
+ // stream would have become pending.
+ EXPECT_CALL(*connection_, CloseConnection(QUIC_INVALID_STREAM_ID, _, _));
+ QuicStreamFrame bad_stream(GetNthClientInitiatedBidirectionalId(i), false, 0,
+ kStreamData);
+ session_->OnStreamFrame(bad_stream);
+}
+
// Tests which check the lifetime management of data members of
// QuicCryptoServerStream objects when async GetProof is in use.
class StreamMemberLifetimeTest : public QuicServerSessionBaseTest {
diff --git a/quiche/quic/core/quic_session.cc b/quiche/quic/core/quic_session.cc
index fe48e83..48977c2 100644
--- a/quiche/quic/core/quic_session.cc
+++ b/quiche/quic/core/quic_session.cc
@@ -56,6 +56,26 @@
QuicSession* session_;
};
+class StreamCountResetAlarmDelegate : public QuicAlarm::Delegate {
+ public:
+ explicit StreamCountResetAlarmDelegate(QuicSession* session)
+ : session_(session) {}
+ StreamCountResetAlarmDelegate(const StreamCountResetAlarmDelegate&) = delete;
+ StreamCountResetAlarmDelegate& operator=(
+ const StreamCountResetAlarmDelegate&) = delete;
+
+ QuicConnectionContext* GetConnectionContext() override {
+ return (session_->connection() == nullptr)
+ ? nullptr
+ : session_->connection()->context();
+ }
+
+ void OnAlarm() override { session_->OnStreamCountReset(); }
+
+ private:
+ QuicSession* session_;
+};
+
} // namespace
#define ENDPOINT \
@@ -111,7 +131,10 @@
was_zero_rtt_rejected_(false),
liveness_testing_in_progress_(false),
limit_sending_max_streams_(
- GetQuicReloadableFlag(quic_limit_sending_max_streams2)) {
+ GetQuicReloadableFlag(quic_limit_sending_max_streams2)),
+ stream_count_reset_alarm_(
+ absl::WrapUnique<QuicAlarm>(connection->alarm_factory()->CreateAlarm(
+ new StreamCountResetAlarmDelegate(this)))) {
closed_streams_clean_up_alarm_ =
absl::WrapUnique<QuicAlarm>(connection_->alarm_factory()->CreateAlarm(
new ClosedStreamsCleanUpDelegate(this)));
@@ -160,6 +183,9 @@
if (closed_streams_clean_up_alarm_ != nullptr) {
closed_streams_clean_up_alarm_->PermanentCancel();
}
+ if (stream_count_reset_alarm_ != nullptr) {
+ stream_count_reset_alarm_->PermanentCancel();
+ }
}
PendingStream* QuicSession::PendingStreamOnStreamFrame(
@@ -185,10 +211,19 @@
}
bool QuicSession::MaybeProcessPendingStream(PendingStream* pending) {
- QUICHE_DCHECK(pending != nullptr);
+ QUICHE_DCHECK(pending != nullptr && connection()->connected());
+
+ if (ExceedsPerLoopStreamLimit()) {
+ QUIC_DLOG(INFO) << "Skip processing pending stream " << pending->id()
+ << " because it exceeds per loop limit.";
+ QUIC_CODE_COUNT_N(quic_pending_stream, 1, 3);
+ return false;
+ }
+
QuicStreamId stream_id = pending->id();
std::optional<QuicResetStreamError> stop_sending_error_code =
pending->GetStopSendingErrorCode();
+ QUIC_DLOG(INFO) << "Process pending stream " << pending->id();
QuicStream* stream = ProcessPendingStream(pending);
if (stream != nullptr) {
// The pending stream should now be in the scope of normal streams.
@@ -475,6 +510,7 @@
});
closed_streams_clean_up_alarm_->Cancel();
+ stream_count_reset_alarm_->Cancel();
if (visitor_) {
visitor_->OnConnectionClosed(connection_->GetOneActiveServerConnectionId(),
@@ -1103,8 +1139,9 @@
bool QuicSession::ShouldProcessFrameByPendingStream(QuicFrameType type,
QuicStreamId id) const {
- return UsesPendingStreamForFrame(type, id) &&
- stream_map_.find(id) == stream_map_.end();
+ return stream_map_.find(id) == stream_map_.end() &&
+ ((version().HasIetfQuicFrames() && ExceedsPerLoopStreamLimit()) ||
+ UsesPendingStreamForFrame(type, id));
}
void QuicSession::OnFinalByteOffsetReceived(
@@ -1894,6 +1931,15 @@
++num_static_streams_;
return;
}
+ if (version().HasIetfQuicFrames() && IsIncomingStream(stream_id) &&
+ max_streams_accepted_per_loop_ != kMaxQuicStreamCount) {
+ QUICHE_DCHECK(!ExceedsPerLoopStreamLimit());
+ // Per-loop stream limit is emposed.
+ ++new_incoming_streams_in_current_loop_;
+ if (!stream_count_reset_alarm_->IsSet()) {
+ stream_count_reset_alarm_->Set(connection()->clock()->ApproximateNow());
+ }
+ }
if (!VersionHasIetfQuicFrames(transport_version())) {
// Do not inform stream ID manager of static streams.
stream_id_manager_.ActivateStream(
@@ -2723,6 +2769,7 @@
}
for (auto* pending_stream : pending_streams) {
if (!MaybeProcessPendingStream(pending_stream)) {
+ // Defer any further pending stream processing to the next event loop.
return;
}
}
@@ -2806,5 +2853,22 @@
// (potentially undefined behavior)
}
+bool QuicSession::ExceedsPerLoopStreamLimit() const {
+ QUICHE_DCHECK(version().HasIetfQuicFrames());
+ return new_incoming_streams_in_current_loop_ >=
+ max_streams_accepted_per_loop_;
+}
+
+void QuicSession::OnStreamCountReset() {
+ const bool exceeded_per_loop_stream_limit = ExceedsPerLoopStreamLimit();
+ new_incoming_streams_in_current_loop_ = 0;
+ if (exceeded_per_loop_stream_limit) {
+ QUIC_CODE_COUNT_N(quic_pending_stream, 2, 3);
+ // Convert as many leftover pending streams from last loop to active streams
+ // as allowed.
+ ProcessAllPendingStreams();
+ }
+}
+
#undef ENDPOINT // undef for jumbo builds
} // namespace quic
diff --git a/quiche/quic/core/quic_session.h b/quiche/quic/core/quic_session.h
index af86bbe..ab7ba05 100644
--- a/quiche/quic/core/quic_session.h
+++ b/quiche/quic/core/quic_session.h
@@ -26,6 +26,7 @@
#include "quiche/quic/core/legacy_quic_stream_id_manager.h"
#include "quiche/quic/core/proto/cached_network_parameters_proto.h"
#include "quiche/quic/core/quic_connection.h"
+#include "quiche/quic/core/quic_constants.h"
#include "quiche/quic/core/quic_control_frame_manager.h"
#include "quiche/quic/core/quic_crypto_stream.h"
#include "quiche/quic/core/quic_datagram_queue.h"
@@ -639,7 +640,8 @@
virtual QuicSSLConfig GetSSLConfig() const { return QuicSSLConfig(); }
- // Try converting all pending streams to normal streams.
+ // Start converting all pending streams to normal streams in the same order as
+ // they are created, which may need several event loops to finish.
void ProcessAllPendingStreams();
const ParsedQuicVersionVector& client_original_supported_versions() const {
@@ -674,6 +676,10 @@
// streams.
QuicStream* GetActiveStream(QuicStreamId id) const;
+ // Called in the following event loop to reset
+ // |new_incoming_streams_in_current_loop_| and process any pending streams.
+ void OnStreamCountReset();
+
// Returns the priority type used by the streams in the session.
QuicPriorityType priority_type() const { return QuicPriorityType::kHttp; }
@@ -681,6 +687,8 @@
using StreamMap =
absl::flat_hash_map<QuicStreamId, std::unique_ptr<QuicStream>>;
+ // Use a linked hash map for pending streams so that they will be processed in
+ // a FIFO order to avoid starvation.
using PendingStreamMap =
quiche::QuicheLinkedHashMap<QuicStreamId, std::unique_ptr<PendingStream>>;
@@ -804,7 +812,8 @@
size_t num_draining_streams() const { return num_draining_streams_; }
// How a pending stream is converted to a full QuicStream depends on subclass
- // implementations. Here as UsesPendingStreamForFrame() returns false, this
+ // implementations. As the default value of max_streams_accepted_per_loop_ is
+ // kMaxQuicStreamCount and UsesPendingStreamForFrame() returns false, this
// method is not supposed to be called at all.
virtual QuicStream* ProcessReadUnidirectionalPendingStream(
PendingStream* /*pending*/) {
@@ -813,7 +822,7 @@
}
virtual QuicStream* ProcessBidirectionalPendingStream(
PendingStream* /*pending*/) {
- QUICHE_BUG(received unexpected bidirectional pending stream);
+ QUICHE_BUG(received unexpected pending bidirectional stream);
return nullptr;
}
@@ -860,6 +869,14 @@
// it has to be in the write blocked list.
virtual bool CheckStreamWriteBlocked(QuicStream* stream) const;
+ // Sets the limit on the maximum number of new streams that can be created in
+ // a single event loop. Any addition stream data will be stored in a
+ // PendingStream until a subsequent event loop.
+ void set_max_streams_accepted_per_loop(
+ QuicStreamCount max_streams_accepted_per_loop) {
+ max_streams_accepted_per_loop_ = max_streams_accepted_per_loop;
+ }
+
private:
friend class test::QuicSessionPeer;
@@ -943,6 +960,8 @@
// pointer to the new stream; otherwise, returns nullptr.
QuicStream* ProcessPendingStream(PendingStream* pending);
+ bool ExceedsPerLoopStreamLimit() const;
+
// Keep track of highest received byte offset of locally closed streams, while
// waiting for a definitive final highest offset from the peer.
absl::flat_hash_map<QuicStreamId, QuicStreamOffset>
@@ -1062,6 +1081,14 @@
// If true, then do not send MAX_STREAM frames if there are already two
// outstanding. Latched value of flag quic_limit_sending_max_streams.
bool limit_sending_max_streams_;
+
+ // The counter for newly created non-static incoming streams in the current
+ // event loop and gets reset for each event loop.
+ QuicStreamCount new_incoming_streams_in_current_loop_ = 0u;
+ // Default to max stream count so that there is no stream creation limit per
+ // event loop.
+ QuicStreamCount max_streams_accepted_per_loop_ = kMaxQuicStreamCount;
+ std::unique_ptr<QuicAlarm> stream_count_reset_alarm_;
};
} // namespace quic
diff --git a/quiche/quic/core/quic_session_test.cc b/quiche/quic/core/quic_session_test.cc
index 7539658..3042d23 100644
--- a/quiche/quic/core/quic_session_test.cc
+++ b/quiche/quic/core/quic_session_test.cc
@@ -246,6 +246,7 @@
writev_consumes_all_data_(false),
uses_pending_streams_(false),
num_incoming_streams_created_(0) {
+ set_max_streams_accepted_per_loop(5);
Initialize();
this->connection()->SetEncrypter(
ENCRYPTION_FORWARD_SECURE,
@@ -1318,6 +1319,10 @@
// Opening and closing the next max streams window should NOT result
// in any MAX_STREAMS frames being sent.
+ QuicAlarm* alarm = QuicSessionPeer::GetStreamCountResetAlarm(&session_);
+ if (alarm->IsSet()) {
+ alarm_factory_.FireAlarm(alarm);
+ }
for (size_t i = 0; i < kMaxStreams; ++i) {
QuicStreamId stream_id =
GetNthClientInitiatedBidirectionalId(i + kMaxStreams);
@@ -1337,6 +1342,9 @@
->advertised_max_incoming_bidirectional_streams());
// Open (but do not close) all available streams to consume the full window.
+ if (alarm->IsSet()) {
+ alarm_factory_.FireAlarm(alarm);
+ }
for (size_t i = 0; i < kMaxStreams; ++i) {
QuicStreamId stream_id =
GetNthClientInitiatedBidirectionalId(i + 2 * kMaxStreams);
@@ -2178,6 +2186,10 @@
EXPECT_EQ(1u, QuicSessionPeer::GetNumOpenDynamicStreams(&session_));
session_.StreamDraining(i, /*unidirectional=*/false);
EXPECT_EQ(0u, QuicSessionPeer::GetNumOpenDynamicStreams(&session_));
+ QuicAlarm* alarm = QuicSessionPeer::GetStreamCountResetAlarm(&session_);
+ if (alarm->IsSet()) {
+ alarm_factory_.FireAlarm(alarm);
+ }
}
}
@@ -3350,6 +3362,71 @@
.IsInitialized());
}
+TEST_P(QuicSessionTestServer, OpenStreamLimitPerEventLoop) {
+ if (!VersionHasIetfQuicFrames(transport_version())) {
+ // Only needed for version 99/IETF QUIC. Noop otherwise.
+ return;
+ }
+ session_.set_uses_pending_streams(true);
+ CompleteHandshake();
+
+ // Receive data on a read uni stream without 1st byte and the stream
+ // should become pending.
+ QuicStreamId unidirectional_stream_id =
+ QuicUtils::GetFirstUnidirectionalStreamId(transport_version(),
+ Perspective::IS_CLIENT);
+ QuicStreamFrame data1(unidirectional_stream_id, false, 10,
+ absl::string_view("HT"));
+ session_.OnStreamFrame(data1);
+ EXPECT_TRUE(
+ QuicSessionPeer::GetPendingStream(&session_, unidirectional_stream_id));
+ EXPECT_EQ(0, session_.num_incoming_streams_created());
+ // Receive data on 10 more bidi streams. Only the first 5 should open new
+ // streams.
+ size_t i = 0u;
+ for (; i < 10u; ++i) {
+ QuicStreamId bidi_stream_id = GetNthClientInitiatedBidirectionalId(i);
+ QuicStreamFrame data(bidi_stream_id, false, 0, "aaaa");
+ session_.OnStreamFrame(data);
+ if (i > 4u) {
+ EXPECT_TRUE(QuicSessionPeer::GetPendingStream(&session_, bidi_stream_id));
+ }
+ }
+ EXPECT_EQ(5u, session_.num_incoming_streams_created());
+ EXPECT_EQ(GetNthClientInitiatedBidirectionalId(i - 1),
+ QuicSessionPeer::GetLargestPeerCreatedStreamId(&session_, false));
+ EXPECT_TRUE(session_.GetActiveStream(GetNthClientInitiatedBidirectionalId(4))
+ ->pending_duration()
+ .IsZero());
+ // Receive 1st byte on the read uni stream. The stream should still be pending
+ // due to the stream limit.
+ QuicStreamFrame data2(unidirectional_stream_id, false, 0,
+ absl::string_view("HT"));
+ session_.OnStreamFrame(data2);
+ EXPECT_TRUE(
+ QuicSessionPeer::GetPendingStream(&session_, unidirectional_stream_id));
+
+ // Start another loop should cause 5 more pending streams to open, including
+ // the unidirectional stream.
+ helper_.GetClock()->AdvanceTime(QuicTime::Delta::FromMicroseconds(100));
+ QuicAlarm* alarm = QuicSessionPeer::GetStreamCountResetAlarm(&session_);
+ EXPECT_TRUE(alarm->IsSet());
+ alarm_factory_.FireAlarm(alarm);
+ EXPECT_EQ(10u, session_.num_incoming_streams_created());
+ EXPECT_NE(nullptr, session_.GetActiveStream(unidirectional_stream_id));
+ EXPECT_EQ(100, session_.GetActiveStream(unidirectional_stream_id)
+ ->pending_duration()
+ .ToMicroseconds());
+ EXPECT_EQ(
+ 100,
+ session_.GetActiveStream(GetNthClientInitiatedBidirectionalId(i - 2))
+ ->pending_duration()
+ .ToMicroseconds());
+ // The 10th bidi stream should remain pending.
+ EXPECT_EQ(nullptr, session_.GetActiveStream(
+ GetNthClientInitiatedBidirectionalId(i - 1)));
+}
+
// A client test class that can be used when the automatic configuration is not
// desired.
class QuicSessionTestClientUnconfigured : public QuicSessionTestBase {
diff --git a/quiche/quic/core/quic_stream.cc b/quiche/quic/core/quic_stream.cc
index 0da973d..1db3cd3 100644
--- a/quiche/quic/core/quic_stream.cc
+++ b/quiche/quic/core/quic_stream.cc
@@ -127,7 +127,11 @@
session->flow_controller()->auto_tune_receive_window(),
session->flow_controller()),
sequencer_(this),
- creation_time_(session->GetClock()->ApproximateNow()) {}
+ creation_time_(session->GetClock()->ApproximateNow()) {
+ if (is_bidirectional_) {
+ QUIC_CODE_COUNT_N(quic_pending_stream, 3, 3);
+ }
+}
void PendingStream::OnDataAvailable() {
// Data should be kept in the sequencer so that
diff --git a/quiche/quic/test_tools/quic_session_peer.cc b/quiche/quic/test_tools/quic_session_peer.cc
index 3986a0e..981af9f 100644
--- a/quiche/quic/test_tools/quic_session_peer.cc
+++ b/quiche/quic/test_tools/quic_session_peer.cc
@@ -242,5 +242,10 @@
return session->num_draining_streams_;
}
+// static
+QuicAlarm* QuicSessionPeer::GetStreamCountResetAlarm(QuicSession* session) {
+ return session->stream_count_reset_alarm_.get();
+}
+
} // namespace test
} // namespace quic
diff --git a/quiche/quic/test_tools/quic_session_peer.h b/quiche/quic/test_tools/quic_session_peer.h
index f0e83c9..d4963c6 100644
--- a/quiche/quic/test_tools/quic_session_peer.h
+++ b/quiche/quic/test_tools/quic_session_peer.h
@@ -87,6 +87,7 @@
bool unidirectional) {
return session->GetLargestPeerCreatedStreamId(unidirectional);
}
+ static QuicAlarm* GetStreamCountResetAlarm(QuicSession* session);
};
} // namespace test
diff --git a/quiche/quic/tools/quic_simple_server_session.cc b/quiche/quic/tools/quic_simple_server_session.cc
index 7e49470..1f82e11 100644
--- a/quiche/quic/tools/quic_simple_server_session.cc
+++ b/quiche/quic/tools/quic_simple_server_session.cc
@@ -30,6 +30,7 @@
helper, crypto_config, compressed_certs_cache),
quic_simple_server_backend_(quic_simple_server_backend) {
QUICHE_DCHECK(quic_simple_server_backend_);
+ set_max_streams_accepted_per_loop(5u);
}
QuicSimpleServerSession::~QuicSimpleServerSession() { DeleteConnection(); }
@@ -103,4 +104,10 @@
return stream;
}
+QuicStream* QuicSimpleServerSession::ProcessBidirectionalPendingStream(
+ PendingStream* pending) {
+ QUICHE_DCHECK(IsEncryptionEstablished());
+ return CreateIncomingStream(pending);
+}
+
} // namespace quic
diff --git a/quiche/quic/tools/quic_simple_server_session.h b/quiche/quic/tools/quic_simple_server_session.h
index 5585214..14516f5 100644
--- a/quiche/quic/tools/quic_simple_server_session.h
+++ b/quiche/quic/tools/quic_simple_server_session.h
@@ -62,6 +62,10 @@
const QuicCryptoServerConfig* crypto_config,
QuicCompressedCertsCache* compressed_certs_cache) override;
+ // Overridden to handle conversion from bidi pending stream.
+ QuicStream* ProcessBidirectionalPendingStream(
+ PendingStream* pending) override;
+
QuicSimpleServerBackend* server_backend() {
return quic_simple_server_backend_;
}