Created and integrated QuicIntervalDeque class for index management. Improves code readability.
gfe-relnote: Integrated QuicIntervalDeque class into QuicStreamSendBuffer affecting the QUIC GFE path. Protected by --gfe2_reloadable_flag_quic_interval_deque
PiperOrigin-RevId: 286471930
Change-Id: Ida91b6c7d066d9710d06932c9f4726946bfbd430
diff --git a/quic/core/quic_interval_deque.h b/quic/core/quic_interval_deque.h
new file mode 100644
index 0000000..ed77d68
--- /dev/null
+++ b/quic/core/quic_interval_deque.h
@@ -0,0 +1,389 @@
+// Copyright (c) 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef QUICHE_QUIC_CORE_QUIC_SEEKER_H_
+#define QUICHE_QUIC_CORE_QUIC_SEEKER_H_
+
+#include <algorithm>
+#include "net/third_party/quiche/src/quic/core/quic_interval.h"
+#include "net/third_party/quiche/src/quic/core/quic_types.h"
+#include "net/third_party/quiche/src/quic/platform/api/quic_bug_tracker.h"
+#include "net/third_party/quiche/src/quic/platform/api/quic_containers.h"
+#include "net/third_party/quiche/src/quic/platform/api/quic_logging.h"
+#include "net/third_party/quiche/src/quic/platform/api/quic_optional.h"
+
+namespace quic {
+
+namespace test {
+class QuicIntervalDequePeer;
+} // namespace test
+
+// QuicIntervalDeque<T, C> is a templated wrapper container, wrapping random
+// access data structures. The wrapper allows items to be added to the
+// underlying container with intervals associated with each item. The intervals
+// _should_ be added already sorted and represent searchable indices. The search
+// is optimized for sequential usage.
+//
+// As the intervals are to be searched sequentially the search for the next
+// interval can be achieved in O(1), by simply remembering the last interval
+// consumed. The structure also checks for an "off-by-one" use case wherein the
+// |cached_index_| is off by one index as the caller didn't call operator |++|
+// to increment the index. Other intervals can be found in O(log(n)) as they are
+// binary searchable. A use case for this structure is packet buffering: Packets
+// are sent sequentially but can sometimes needed for retransmission. The
+// packets and their payloads are added with associated intervals representing
+// data ranges they carry. When a user asks for a particular interval it's very
+// likely they are requesting the next sequential interval, receiving it in O(1)
+// time. Updating the sequential index is done automatically through the
+// |DataAt| method and its iterator operator |++|.
+//
+// The intervals are represented using the usual C++ STL convention, namely as
+// the half-open QuicInterval [min, max). A point p is considered to be
+// contained in the QuicInterval iff p >= min && p < max. One consequence of
+// this definition is that for any non-empty QuicInterval, min is contained in
+// the QuicInterval but max is not. There is no canonical representation for the
+// empty QuicInterval; and empty intervals are forbidden from being added to
+// this container as they would be unsearchable.
+//
+// The type T is required to be copy-constructable or move-constructable. The
+// type T is also expected to have an |interval()| method returning a
+// QuicInterval<std::size> for the particular value. The type C is required to
+// be a random access container supporting the methods |pop_front|, |push_back|,
+// |operator[]|, |size|, and iterator support for |std::lower_bound| eg. a
+// |deque| or |vector|.
+//
+// The QuicIntervalDeque<T, C>, like other C++ STL random access containers,
+// doesn't have any explicit support for any equality operators.
+//
+//
+// Examples with internal state:
+//
+// // An example class to be stored inside the Interval Deque.
+// struct IntervalVal {
+// const int32_t val;
+// const size_t interval_begin, interval_end;
+// QuicInterval<size_t> interval();
+// };
+// typedef IntervalVal IV;
+// QuicIntervialDeque<IntervalValue> deque;
+//
+// // State:
+// // cached_index -> None
+// // container -> {}
+//
+// // Add interval items
+// deque.PushBack(IV(val: 0, interval_begin: 0, interval_end: 10));
+// deque.PushBack(IV(val: 1, interval_begin: 20, interval_end: 25));
+// deque.PushBack(IV(val: 2, interval_begin: 25, interval_end: 30));
+//
+// // State:
+// // cached_index -> 0
+// // container -> {{0, [0, 10)}, {1, [20, 25)}, {2, [25, 30)}}
+//
+// // Look for 0 and return [0, 10). Time: O(1)
+// auto it = deque.DataAt(0);
+// assert(it->val == 0);
+// it++; // Increment and move the |cached_index_| over [0, 10) to [20, 25).
+// assert(it->val == 1);
+//
+// // State:
+// // cached_index -> 1
+// // container -> {{0, [0, 10)}, {1, [20, 25)}, {2, [25, 30)}}
+//
+// // Look for 20 and return [20, 25). Time: O(1)
+// auto it = deque.DataAt(20); // |cached_index_| remains unchanged.
+// assert(it->val == 1);
+//
+// // State:
+// // cached_index -> 1
+// // container -> {{0, [0, 10)}, {1, [20, 25)}, {2, [25, 30)}}
+//
+// // Look for 15 and return deque.DataEnd(). Time: O(log(n))
+// auto it = deque.DataAt(15); // |cached_index_| remains unchanged.
+// assert(it == deque.DataEnd());
+//
+// // Look for 25 and return [25, 30). Time: O(1) with off-by-one.
+// auto it = deque.DataAt(25); // |cached_index_| is updated to 2.
+// assert(it->val == 2);
+// it++; // |cached_index_| is set to |None| as all data has been iterated.
+//
+//
+// // State:
+// // cached_index -> None
+// // container -> {{0, [0, 10)}, {1, [20, 25)}, {2, [25, 30)}}
+//
+// // Look again for 0 and return [0, 10). Time: O(log(n))
+// auto it = deque.DataAt(0);
+//
+//
+// deque.PopFront(); // Pop -> {0, [0, 10)}
+//
+// // State:
+// // cached_index -> None
+// // container -> {{1, [20, 25)}, {2, [25, 30)}}
+//
+// deque.PopFront(); // Pop -> {1, [20, 25)}
+//
+// // State:
+// // cached_index -> None
+// // container -> {{2, [25, 30)}}
+//
+// deque.PushBack(IV(val: 3, interval_begin: 35, interval_end: 50));
+//
+// // State:
+// // cached_index -> 1
+// // container -> {{2, [25, 30)}, {3, [35, 50)}}
+
+template <class T, class C = QUIC_NO_EXPORT QuicDeque<T>>
+class QUIC_EXPORT_PRIVATE QuicIntervalDeque {
+ public:
+ class QUIC_EXPORT_PRIVATE Iterator {
+ public:
+ // Used by |std::lower_bound|
+ using iterator_category = std::forward_iterator_tag;
+ using value_type = T;
+ using difference_type = std::ptrdiff_t;
+ using pointer = T*;
+ using reference = T&;
+
+ // Every increment of the iterator will increment the |cached_index_| if
+ // |index_| is larger than the current |cached_index_|. |index_| is bounded
+ // at |deque_.size()| and any attempt to increment above that will be
+ // ignored. Once an iterator has iterated all elements the |cached_index_|
+ // will be reset.
+ Iterator(std::size_t index, QuicIntervalDeque* deque)
+ : index_(index), deque_(deque) {}
+ // Only the ++ operator attempts to update the cached index. Other operators
+ // are used by |lower_bound| to binary search and are thus private.
+ Iterator& operator++() {
+ // Don't increment when we are at the end.
+ const std::size_t container_size = deque_->container_.size();
+ if (index_ >= container_size) {
+ QUIC_BUG << "Iterator out of bounds.";
+ return *this;
+ }
+ index_++;
+ if (deque_->cached_index_.has_value()) {
+ const std::size_t cached_index = deque_->cached_index_.value();
+ // If all items are iterated then reset the |cached_index_|
+ if (index_ == container_size) {
+ deque_->cached_index_.reset();
+ } else {
+ // Otherwise the new |cached_index_| is the max of itself and |index_|
+ if (cached_index < index_) {
+ deque_->cached_index_ = index_;
+ }
+ }
+ }
+ return *this;
+ }
+ Iterator operator++(int) {
+ Iterator copy = *this;
+ ++(*this);
+ return copy;
+ }
+ reference operator*() { return deque_->container_[index_]; }
+ pointer operator->() { return &deque_->container_[index_]; }
+ bool operator==(const Iterator& rhs) const {
+ return index_ == rhs.index_ && deque_ == rhs.deque_;
+ }
+ bool operator!=(const Iterator& rhs) const { return !(*this == rhs); }
+
+ private:
+ // A set of private operators for |std::lower_bound|
+ Iterator operator+(difference_type amount) const {
+ Iterator copy = *this;
+ copy.index_ += amount;
+ DCHECK(copy.index_ < copy.deque_->size());
+ return copy;
+ }
+ Iterator& operator+=(difference_type amount) {
+ index_ += amount;
+ DCHECK(index_ < deque_->size());
+ return *this;
+ }
+ difference_type operator-(const Iterator& rhs) const {
+ return static_cast<difference_type>(index_) -
+ static_cast<difference_type>(rhs.index_);
+ }
+
+ // |index_| is the index of the item in |*deque_|.
+ std::size_t index_;
+ // |deque_| is a pointer to the container the iterator came from.
+ QuicIntervalDeque* deque_;
+
+ friend class QuicIntervalDeque;
+ };
+
+ QuicIntervalDeque();
+
+ // Adds an item to the underlying container. The |item|'s interval _should_ be
+ // strictly greater than the last interval added.
+ void PushBack(T&& item);
+ void PushBack(const T& item);
+ // Removes the front/top of the underlying container and the associated
+ // interval.
+ void PopFront();
+ // Returns an iterator to the beginning of the data. The iterator will move
+ // the |cached_index_| as the iterator moves.
+ Iterator DataBegin();
+ // Returns an iterator to the end of the data.
+ Iterator DataEnd();
+ // Returns an iterator pointing to the item in |interval_begin|. The iterator
+ // will move the |cached_index_| as the iterator moves.
+ Iterator DataAt(const std::size_t interval_begin);
+
+ // Returns the number of items contained inside the structure.
+ std::size_t Size() const;
+ // Returns whether the structure is empty.
+ bool Empty() const;
+
+ private:
+ struct QUIC_EXPORT_PRIVATE IntervalCompare {
+ bool operator()(const T& item, std::size_t interval_begin) const {
+ return item.interval().max() <= interval_begin;
+ }
+ };
+
+ template <class U>
+ void PushBackUniversal(U&& item);
+
+ Iterator Search(const std::size_t interval_begin,
+ const std::size_t begin_index,
+ const std::size_t end_index);
+
+ // For accessing the |cached_index_|
+ friend class test::QuicIntervalDequePeer;
+
+ C container_;
+ QuicOptional<std::size_t> cached_index_;
+};
+
+template <class T, class C>
+QuicIntervalDeque<T, C>::QuicIntervalDeque() {}
+
+template <class T, class C>
+void QuicIntervalDeque<T, C>::PushBack(T&& item) {
+ PushBackUniversal(std::move(item));
+}
+
+template <class T, class C>
+void QuicIntervalDeque<T, C>::PushBack(const T& item) {
+ PushBackUniversal(item);
+}
+
+template <class T, class C>
+void QuicIntervalDeque<T, C>::PopFront() {
+ if (container_.size() == 0) {
+ QUIC_BUG << "Trying to pop from an empty container.";
+ return;
+ }
+ container_.pop_front();
+ if (container_.size() == 0) {
+ cached_index_.reset();
+ }
+ if (cached_index_.value_or(0) > 0) {
+ cached_index_ = cached_index_.value() - 1;
+ }
+}
+
+template <class T, class C>
+typename QuicIntervalDeque<T, C>::Iterator
+QuicIntervalDeque<T, C>::DataBegin() {
+ return Iterator(0, this);
+}
+
+template <class T, class C>
+typename QuicIntervalDeque<T, C>::Iterator QuicIntervalDeque<T, C>::DataEnd() {
+ return Iterator(container_.size(), this);
+}
+
+template <class T, class C>
+typename QuicIntervalDeque<T, C>::Iterator QuicIntervalDeque<T, C>::DataAt(
+ const std::size_t interval_begin) {
+ // No |cached_index_| value means all items can be searched.
+ if (!cached_index_.has_value()) {
+ return Search(interval_begin, 0, container_.size());
+ }
+
+ const std::size_t cached_index = cached_index_.value();
+ DCHECK(cached_index < container_.size());
+
+ const QuicInterval<size_t> cached_interval =
+ container_[cached_index].interval();
+ // Does our cached index point directly to what we want?
+ if (cached_interval.Contains(interval_begin)) {
+ return Iterator(cached_index, this);
+ }
+
+ // Are we off-by-one?
+ const std::size_t next_index = cached_index + 1;
+ if (next_index < container_.size()) {
+ if (container_[next_index].interval().Contains(interval_begin)) {
+ cached_index_ = next_index;
+ return Iterator(next_index, this);
+ }
+ }
+
+ // Small optimization:
+ // Determine if we should binary search above or below the cached interval.
+ const std::size_t cached_begin = cached_interval.min();
+ bool looking_below = interval_begin < cached_begin;
+ const std::size_t lower = looking_below ? 0 : cached_index + 1;
+ const std::size_t upper = looking_below ? cached_index : container_.size();
+ Iterator ret = Search(interval_begin, lower, upper);
+ if (ret == DataEnd()) {
+ return ret;
+ }
+ // Update the |cached_index_| to point to the higher index.
+ if (!looking_below) {
+ cached_index_ = ret.index_;
+ }
+ return ret;
+}
+
+template <class T, class C>
+std::size_t QuicIntervalDeque<T, C>::Size() const {
+ return container_.size();
+}
+
+template <class T, class C>
+bool QuicIntervalDeque<T, C>::Empty() const {
+ return container_.size() == 0;
+}
+
+template <class T, class C>
+template <class U>
+void QuicIntervalDeque<T, C>::PushBackUniversal(U&& item) {
+ QuicInterval<std::size_t> interval = item.interval();
+ // Adding an empty interval is a bug.
+ if (interval.Empty()) {
+ QUIC_BUG << "Trying to save empty interval to QuicDeque.";
+ return;
+ }
+ container_.push_back(std::forward<U>(item));
+ if (!cached_index_.has_value()) {
+ cached_index_ = container_.size() - 1;
+ }
+}
+
+template <class T, class C>
+typename QuicIntervalDeque<T, C>::Iterator QuicIntervalDeque<T, C>::Search(
+ const std::size_t interval_begin,
+ const std::size_t begin_index,
+ const std::size_t end_index) {
+ auto begin = container_.begin() + begin_index;
+ auto end = container_.begin() + end_index;
+ auto res = std::lower_bound(begin, end, interval_begin, IntervalCompare());
+ // Just because we run |lower_bound| and it didn't return |container_.end()|
+ // doesn't mean we found our desired interval.
+ if (res != end && res->interval().Contains(interval_begin)) {
+ return Iterator(std::distance(begin, res) + begin_index, this);
+ }
+ return DataEnd();
+}
+
+} // namespace quic
+
+#endif // QUICHE_QUIC_CORE_QUIC_SEEKER_H_
diff --git a/quic/core/quic_interval_deque_test.cc b/quic/core/quic_interval_deque_test.cc
new file mode 100644
index 0000000..c31bebe
--- /dev/null
+++ b/quic/core/quic_interval_deque_test.cc
@@ -0,0 +1,364 @@
+// Copyright (c) 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "net/third_party/quiche/src/quic/core/quic_interval_deque.h"
+#include <cstdint>
+#include <ostream>
+#include "net/third_party/quiche/src/quic/core/quic_interval.h"
+#include "net/third_party/quiche/src/quic/platform/api/quic_expect_bug.h"
+#include "net/third_party/quiche/src/quic/platform/api/quic_test.h"
+#include "net/third_party/quiche/src/quic/test_tools/quic_interval_deque_peer.h"
+#include "net/third_party/quiche/src/quic/test_tools/quic_test_utils.h"
+
+namespace quic {
+namespace test {
+namespace {
+
+struct IntervalItem {
+ int32_t val;
+ std::size_t interval_start, interval_end;
+ QuicInterval<std::size_t> interval() const {
+ return QuicInterval<std::size_t>(interval_start, interval_end);
+ }
+ IntervalItem(int32_t val,
+ std::size_t interval_start,
+ std::size_t interval_end)
+ : val(val), interval_start(interval_start), interval_end(interval_end) {}
+};
+
+namespace {
+
+typedef QuicIntervalDeque<IntervalItem> QID;
+
+const int32_t kSize = 100;
+const std::size_t kIntervalStep = 10;
+
+} // namespace
+
+class QuicIntervalDequeTest : public QuicTest {
+ public:
+ QuicIntervalDequeTest() {
+ // Add items with intervals of |kIntervalStep| size.
+ for (int32_t i = 0; i < kSize; ++i) {
+ const std::size_t interval_begin = kIntervalStep * i;
+ const std::size_t interval_end = interval_begin + kIntervalStep;
+ qid_.PushBack(IntervalItem(i, interval_begin, interval_end));
+ }
+ }
+
+ QID qid_;
+};
+
+// The goal of this test is to show insertion/push_back, iteration, and and
+// deletion/pop_front from the container.
+TEST_F(QuicIntervalDequeTest, QuicDequeInsertRemoveSize) {
+ QID qid;
+
+ EXPECT_EQ(qid.Size(), std::size_t(0));
+ qid.PushBack(IntervalItem(0, 0, 10));
+ EXPECT_EQ(qid.Size(), std::size_t(1));
+ qid.PushBack(IntervalItem(1, 10, 20));
+ EXPECT_EQ(qid.Size(), std::size_t(2));
+ qid.PushBack(IntervalItem(2, 20, 30));
+ EXPECT_EQ(qid.Size(), std::size_t(3));
+ qid.PushBack(IntervalItem(3, 30, 40));
+ EXPECT_EQ(qid.Size(), std::size_t(4));
+
+ // Advance the index all the way...
+ int32_t i = 0;
+ for (auto it = qid.DataAt(0); it != qid.DataEnd(); ++it, ++i) {
+ const int32_t index = QuicIntervalDequePeer::GetCachedIndex(&qid);
+ EXPECT_EQ(index, i);
+ EXPECT_EQ(it->val, i);
+ }
+ const int32_t index = QuicIntervalDequePeer::GetCachedIndex(&qid);
+ EXPECT_EQ(index, -1);
+
+ qid.PopFront();
+ EXPECT_EQ(qid.Size(), std::size_t(3));
+ qid.PopFront();
+ EXPECT_EQ(qid.Size(), std::size_t(2));
+ qid.PopFront();
+ EXPECT_EQ(qid.Size(), std::size_t(1));
+ qid.PopFront();
+ EXPECT_EQ(qid.Size(), std::size_t(0));
+
+ EXPECT_QUIC_DEBUG_DEATH(qid.PopFront(),
+ "Trying to pop from an empty container.");
+}
+
+// The goal of this test is to push data into the container at specific
+// intervals and show how the |DataAt| method can move the |cached_index| as the
+// iterator moves through the data.
+TEST_F(QuicIntervalDequeTest, QuicDequeInsertIterateWhole) {
+ // The write index should point to the beginning of the container.
+ const int32_t cached_index = QuicIntervalDequePeer::GetCachedIndex(&qid_);
+ EXPECT_EQ(cached_index, 0);
+
+ auto it = qid_.DataBegin();
+ auto end = qid_.DataEnd();
+ for (int32_t i = 0; i < kSize; ++i, ++it) {
+ EXPECT_EQ(it->val, i);
+ const std::size_t current_iteraval_begin = i * kIntervalStep;
+ // The |DataAt| method should find the correct interval.
+ auto lookup = qid_.DataAt(current_iteraval_begin);
+ EXPECT_EQ(i, lookup->val);
+ // Make sure the index hasn't changed just from using |DataAt|
+ const int32_t index_before = QuicIntervalDequePeer::GetCachedIndex(&qid_);
+ EXPECT_EQ(index_before, i);
+ // This increment should move the index forward.
+ lookup++;
+ // Check that the index has changed.
+ const int32_t index_after = QuicIntervalDequePeer::GetCachedIndex(&qid_);
+ const int32_t after_i = (i + 1) == kSize ? -1 : (i + 1);
+ EXPECT_EQ(index_after, after_i);
+ EXPECT_NE(it, end);
+ }
+}
+
+// The goal of this test is to push data into the container at specific
+// intervals and show how the |DataAt| method can move the |cached_index| using
+// the off-by-one logic.
+TEST_F(QuicIntervalDequeTest, QuicDequeOffByOne) {
+ // The write index should point to the beginning of the container.
+ const int32_t cached_index = QuicIntervalDequePeer::GetCachedIndex(&qid_);
+ EXPECT_EQ(cached_index, 0);
+
+ auto it = qid_.DataBegin();
+ auto end = qid_.DataEnd();
+ for (int32_t i = 0; i < kSize - 1; ++i, ++it) {
+ EXPECT_EQ(it->val, i);
+ const int32_t off_by_one_i = i + 1;
+ const std::size_t current_iteraval_begin = off_by_one_i * kIntervalStep;
+ // Make sure the index has changed just from using |DataAt|
+ const int32_t index_before = QuicIntervalDequePeer::GetCachedIndex(&qid_);
+ EXPECT_EQ(index_before, i);
+ // The |DataAt| method should find the correct interval.
+ auto lookup = qid_.DataAt(current_iteraval_begin);
+ EXPECT_EQ(off_by_one_i, lookup->val);
+ // Check that the index has changed.
+ const int32_t index_after = QuicIntervalDequePeer::GetCachedIndex(&qid_);
+ const int32_t after_i = off_by_one_i == kSize ? -1 : off_by_one_i;
+ EXPECT_EQ(index_after, after_i);
+ EXPECT_NE(it, end);
+ }
+}
+
+// The goal of this test is to push data into the container at specific
+// intervals and show modify the structure with a live iterator.
+TEST_F(QuicIntervalDequeTest, QuicDequeIteratorInvalidation) {
+ // The write index should point to the beginning of the container.
+ const int32_t cached_index = QuicIntervalDequePeer::GetCachedIndex(&qid_);
+ EXPECT_EQ(cached_index, 0);
+
+ const std::size_t iteraval_begin = (kSize - 1) * kIntervalStep;
+ auto lookup = qid_.DataAt(iteraval_begin);
+ EXPECT_EQ((*lookup).val, (kSize - 1));
+ qid_.PopFront();
+ EXPECT_QUIC_BUG(lookup++, "Iterator out of bounds.");
+ auto lookup_end = qid_.DataAt(iteraval_begin + kIntervalStep);
+ EXPECT_EQ(lookup_end, qid_.DataEnd());
+}
+
+// The goal of this test is the same as |QuicDequeInsertIterateWhole| but to
+// skip certain intervals and show the |cached_index| is updated properly.
+TEST_F(QuicIntervalDequeTest, QuicDequeInsertIterateSkip) {
+ // The write index should point to the beginning of the container.
+ const int32_t cached_index = QuicIntervalDequePeer::GetCachedIndex(&qid_);
+ EXPECT_EQ(cached_index, 0);
+
+ const std::size_t step = 4;
+ for (int32_t i = 0; i < kSize; i += 4) {
+ if (i != 0) {
+ const int32_t before_i = (i - (step - 1));
+ EXPECT_EQ(QuicIntervalDequePeer::GetCachedIndex(&qid_), before_i);
+ }
+ const std::size_t current_iteraval_begin = i * kIntervalStep;
+ // The |DataAt| method should find the correct interval.
+ auto lookup = qid_.DataAt(current_iteraval_begin);
+ EXPECT_EQ(i, lookup->val);
+ // Make sure the index _has_ changed just from using |DataAt| since we're
+ // skipping data.
+ const int32_t index_before = QuicIntervalDequePeer::GetCachedIndex(&qid_);
+ EXPECT_EQ(index_before, i);
+ // This increment should move the index forward.
+ lookup++;
+ // Check that the index has changed.
+ const int32_t index_after = QuicIntervalDequePeer::GetCachedIndex(&qid_);
+ const int32_t after_i = (i + 1) == kSize ? -1 : (i + 1);
+ EXPECT_EQ(index_after, after_i);
+ }
+}
+
+// The goal of this test is the same as |QuicDequeInsertIterateWhole| but it has
+// |PopFront| calls interleaved to show the |cached_index| updates correctly.
+TEST_F(QuicIntervalDequeTest, QuicDequeInsertDeleteIterate) {
+ // The write index should point to the beginning of the container.
+ const int32_t index = QuicIntervalDequePeer::GetCachedIndex(&qid_);
+ EXPECT_EQ(index, 0);
+
+ std::size_t limit = 0;
+ for (int32_t i = 0; limit < qid_.Size(); ++i, ++limit) {
+ // Always point to the beginning of the container.
+ auto it = qid_.DataBegin();
+ EXPECT_EQ(it->val, i);
+
+ // Get an iterator.
+ const std::size_t current_iteraval_begin = i * kIntervalStep;
+ auto lookup = qid_.DataAt(current_iteraval_begin);
+ const int32_t index_before = QuicIntervalDequePeer::GetCachedIndex(&qid_);
+ // The index should always point to 0.
+ EXPECT_EQ(index_before, 0);
+ // This iterator increment should effect the index.
+ lookup++;
+ const int32_t index_after = QuicIntervalDequePeer::GetCachedIndex(&qid_);
+ EXPECT_EQ(index_after, 1);
+ // Decrement the |temp_size| and pop from the front.
+ qid_.PopFront();
+ // Show the index has been updated to point to 0 again (from 1).
+ const int32_t index_after_pop =
+ QuicIntervalDequePeer::GetCachedIndex(&qid_);
+ EXPECT_EQ(index_after_pop, 0);
+ }
+}
+
+// The goal of this test is to move the index to the end and then add more data
+// to show it can be reset to a valid index.
+TEST_F(QuicIntervalDequeTest, QuicDequeInsertIterateInsert) {
+ // The write index should point to the beginning of the container.
+ const int32_t index = QuicIntervalDequePeer::GetCachedIndex(&qid_);
+ EXPECT_EQ(index, 0);
+
+ int32_t iterated_elements = 0;
+ for (int32_t i = 0; i < kSize; ++i, ++iterated_elements) {
+ // Get an iterator.
+ const std::size_t current_iteraval_begin = i * kIntervalStep;
+ auto lookup = qid_.DataAt(current_iteraval_begin);
+ const int32_t index_before = QuicIntervalDequePeer::GetCachedIndex(&qid_);
+ // The index should always point to i.
+ EXPECT_EQ(index_before, i);
+ // This iterator increment should effect the index.
+ lookup++;
+ // Show the index has been updated to point to i + 1 or -1 if at the end.
+ const int32_t index_after = QuicIntervalDequePeer::GetCachedIndex(&qid_);
+ const int32_t after_i = (i + 1) == kSize ? -1 : (i + 1);
+ EXPECT_EQ(index_after, after_i);
+ }
+ const int32_t invalid_index = QuicIntervalDequePeer::GetCachedIndex(&qid_);
+ EXPECT_EQ(invalid_index, -1);
+
+ // Add more data to the container, making the index valid.
+ const std::size_t offset = qid_.Size();
+ for (int32_t i = 0; i < kSize; ++i) {
+ const std::size_t interval_begin = offset + (kIntervalStep * i);
+ const std::size_t interval_end = offset + interval_begin + kIntervalStep;
+ qid_.PushBack(IntervalItem(i + offset, interval_begin, interval_end));
+ const int32_t index_current = QuicIntervalDequePeer::GetCachedIndex(&qid_);
+ // Index should now be valid and equal to the size of the container before
+ // adding more items to it.
+ EXPECT_EQ(index_current, iterated_elements);
+ }
+ // Show the index is still valid and hasn't changed since the first iteration
+ // of the loop.
+ const int32_t index_after_add = QuicIntervalDequePeer::GetCachedIndex(&qid_);
+ EXPECT_EQ(index_after_add, iterated_elements);
+
+ // Iterate over all the data in the container and eventually reset the index
+ // as we did before.
+ for (int32_t i = 0; i < kSize; ++i, ++iterated_elements) {
+ const std::size_t interval_begin = offset + (kIntervalStep * i);
+ const int32_t index_current = QuicIntervalDequePeer::GetCachedIndex(&qid_);
+ EXPECT_EQ(index_current, iterated_elements);
+ auto lookup = qid_.DataAt(interval_begin);
+ const int32_t expected_value = i + offset;
+ EXPECT_EQ(lookup->val, expected_value);
+ lookup++;
+ const int32_t after_inc =
+ (iterated_elements + 1) == (kSize * 2) ? -1 : (iterated_elements + 1);
+ const int32_t after_index = QuicIntervalDequePeer::GetCachedIndex(&qid_);
+ EXPECT_EQ(after_index, after_inc);
+ }
+ // Show the index is now invalid.
+ const int32_t invalid_index_again =
+ QuicIntervalDequePeer::GetCachedIndex(&qid_);
+ EXPECT_EQ(invalid_index_again, -1);
+}
+
+// The goal of this test is to push data into the container at specific
+// intervals and show how the |DataAt| can iterate over already scanned data.
+TEST_F(QuicIntervalDequeTest, QuicDequeRescanData) {
+ // The write index should point to the beginning of the container.
+ const int32_t index = QuicIntervalDequePeer::GetCachedIndex(&qid_);
+ EXPECT_EQ(index, 0);
+
+ auto it = qid_.DataBegin();
+ auto end = qid_.DataEnd();
+ for (int32_t i = 0; i < kSize - 1; ++i, ++it) {
+ EXPECT_EQ(it->val, i);
+ const std::size_t current_iteraval_begin = i * kIntervalStep;
+ // The |DataAt| method should find the correct interval.
+ auto lookup = qid_.DataAt(current_iteraval_begin);
+ EXPECT_EQ(i, lookup->val);
+ // Make sure the index has changed just from using |DataAt|
+ const int32_t cached_index_before =
+ QuicIntervalDequePeer::GetCachedIndex(&qid_);
+ EXPECT_EQ(cached_index_before, i);
+ // Ensure the real index has changed just from using |DataAt| and the
+ // off-by-one logic
+ const int32_t index_before = QuicIntervalDequePeer::GetCachedIndex(&qid_);
+ const int32_t before_i = i;
+ EXPECT_EQ(index_before, before_i);
+ // This increment should move the cached index forward.
+ lookup++;
+ // Check that the cached index has moved foward.
+ const int32_t cached_index_after =
+ QuicIntervalDequePeer::GetCachedIndex(&qid_);
+ const int32_t after_i = (i + 1);
+ EXPECT_EQ(cached_index_after, after_i);
+ EXPECT_NE(it, end);
+ }
+
+ // Iterate over items which have been consumed before.
+ int32_t expected_index = static_cast<int32_t>(kSize - 1);
+ for (int32_t i = 0; i < kSize - 1; ++i) {
+ const std::size_t current_iteraval_begin = i * kIntervalStep;
+ // The |DataAt| method should find the correct interval.
+ auto lookup = qid_.DataAt(current_iteraval_begin);
+ EXPECT_EQ(i, lookup->val);
+ // This increment shouldn't move the index forward as the index is currently
+ // ahead.
+ lookup++;
+ // Check that the index hasn't moved foward.
+ const int32_t index_after = QuicIntervalDequePeer::GetCachedIndex(&qid_);
+ EXPECT_EQ(index_after, expected_index);
+ EXPECT_NE(it, end);
+ }
+}
+
+// The goal of this test is to show that popping from an empty container is a
+// bug.
+TEST_F(QuicIntervalDequeTest, QuicDequePopEmpty) {
+ QID qid;
+ EXPECT_TRUE(qid.Empty());
+ EXPECT_QUIC_BUG(qid.PopFront(), "Trying to pop from an empty container.");
+}
+
+// The goal of this test is to show that adding a zero-sized interval is a bug.
+TEST_F(QuicIntervalDequeTest, QuicDequeZeroSizedInterval) {
+ QID qid;
+ EXPECT_QUIC_BUG(qid.PushBack(IntervalItem(0, 0, 0)),
+ "Trying to save empty interval to QuicDeque.");
+}
+
+// The goal of this test is to show that an iterator to an empty container
+// returns |DataEnd|.
+TEST_F(QuicIntervalDequeTest, QuicDequeIteratorEmpty) {
+ QID qid;
+ auto it = qid.DataAt(0);
+ EXPECT_EQ(it, qid.DataEnd());
+}
+
+} // namespace
+} // namespace test
+} // namespace quic
diff --git a/quic/core/quic_stream_send_buffer.cc b/quic/core/quic_stream_send_buffer.cc
index 158afd8..2008cf2 100644
--- a/quic/core/quic_stream_send_buffer.cc
+++ b/quic/core/quic_stream_send_buffer.cc
@@ -35,13 +35,21 @@
BufferedSlice::~BufferedSlice() {}
+QuicInterval<std::size_t> BufferedSlice::interval() const {
+ const std::size_t length = slice.length();
+ return QuicInterval<std::size_t>(offset, offset + length);
+}
+
bool StreamPendingRetransmission::operator==(
const StreamPendingRetransmission& other) const {
return offset == other.offset && length == other.length;
}
QuicStreamSendBuffer::QuicStreamSendBuffer(QuicBufferAllocator* allocator)
- : stream_offset_(0),
+ // TODO(b/144690240): Remove this variable once quic_seeker is deprecated.
+ : interval_deque_active_(GetQuicReloadableFlag(quic_interval_deque)),
+ current_end_offset_(0),
+ stream_offset_(0),
allocator_(allocator),
stream_bytes_written_(0),
stream_bytes_outstanding_(0),
@@ -76,9 +84,20 @@
return;
}
size_t length = slice.length();
- buffered_slices_.emplace_back(std::move(slice), stream_offset_);
- if (write_index_ == -1) {
- write_index_ = buffered_slices_.size() - 1;
+ if (interval_deque_active_) {
+ QUIC_RELOADABLE_FLAG_COUNT_N(quic_interval_deque, 1, 5);
+ // Need to start the offsets at the right interval.
+ if (interval_deque_.Empty()) {
+ const QuicStreamOffset end = stream_offset_ + length;
+ current_end_offset_ = std::max(current_end_offset_, end);
+ }
+ BufferedSlice bs = BufferedSlice(std::move(slice), stream_offset_);
+ interval_deque_.PushBack(std::move(bs));
+ } else {
+ buffered_slices_.emplace_back(std::move(slice), stream_offset_);
+ if (write_index_ == -1) {
+ write_index_ = buffered_slices_.size() - 1;
+ }
}
stream_offset_ += length;
}
@@ -96,6 +115,38 @@
bool QuicStreamSendBuffer::WriteStreamData(QuicStreamOffset offset,
QuicByteCount data_length,
QuicDataWriter* writer) {
+ if (interval_deque_active_) {
+ QUIC_RELOADABLE_FLAG_COUNT_N(quic_interval_deque, 2, 5);
+ QUIC_BUG_IF(current_end_offset_ < offset)
+ << "Tried to write data out of sequence. last_offset_end:"
+ << current_end_offset_ << ", offset:" << offset;
+ // The iterator returned from |interval_deque_| will automatically advance
+ // the internal write index for the QuicIntervalDeque. The incrementing is
+ // done in operator++.
+ for (auto slice_it = interval_deque_.DataAt(offset);
+ slice_it != interval_deque_.DataEnd(); ++slice_it) {
+ if (data_length == 0 || offset < slice_it->offset) {
+ break;
+ }
+
+ QuicByteCount slice_offset = offset - slice_it->offset;
+ QuicByteCount available_bytes_in_slice =
+ slice_it->slice.length() - slice_offset;
+ QuicByteCount copy_length =
+ std::min(data_length, available_bytes_in_slice);
+ if (!writer->WriteBytes(slice_it->slice.data() + slice_offset,
+ copy_length)) {
+ QUIC_BUG << "Writer fails to write.";
+ return false;
+ }
+ offset += copy_length;
+ data_length -= copy_length;
+ const QuicStreamOffset new_end =
+ slice_it->offset + slice_it->slice.length();
+ current_end_offset_ = std::max(current_end_offset_, new_end);
+ }
+ return data_length == 0;
+ }
// TODO(renjietang): Remove this variable once quic_coalesce_stream_frames_2
// is deprecated.
bool write_index_hit = false;
@@ -164,7 +215,7 @@
}
} else if (write_index_hit &&
static_cast<size_t>(write_index_) == buffered_slices_.size()) {
- // Already write to the end off buffer.
+ // Already write to the end of buffer.
QUIC_DVLOG(2) << "Finish writing out all buffered data.";
write_index_ = -1;
}
@@ -264,6 +315,39 @@
bool QuicStreamSendBuffer::FreeMemSlices(QuicStreamOffset start,
QuicStreamOffset end) {
+ if (interval_deque_active_) {
+ QUIC_RELOADABLE_FLAG_COUNT_N(quic_interval_deque, 3, 5);
+ auto it = interval_deque_.DataBegin();
+ if (it == interval_deque_.DataEnd() || it->slice.empty()) {
+ QUIC_BUG << "Trying to ack stream data [" << start << ", " << end << "), "
+ << (it == interval_deque_.DataEnd()
+ ? "and there is no outstanding data."
+ : "and the first slice is empty.");
+ return false;
+ }
+ if (!it->interval().Contains(start)) {
+ // Slow path that not the earliest outstanding data gets acked.
+ it = std::lower_bound(interval_deque_.DataBegin(),
+ interval_deque_.DataEnd(), start, CompareOffset());
+ }
+ if (it == interval_deque_.DataEnd() || it->slice.empty()) {
+ QUIC_BUG << "Offset " << start << " with iterator offset: " << it->offset
+ << (it == interval_deque_.DataEnd()
+ ? " does not exist."
+ : " has already been acked.");
+ return false;
+ }
+ for (; it != interval_deque_.DataEnd(); ++it) {
+ if (it->offset >= end) {
+ break;
+ }
+ if (!it->slice.empty() &&
+ bytes_acked_.Contains(it->offset, it->offset + it->slice.length())) {
+ it->slice.Reset();
+ }
+ }
+ return true;
+ }
auto it = buffered_slices_.begin();
// Find it, such that buffered_slices_[it - 1].end < start <=
// buffered_slices_[it].end.
@@ -297,6 +381,19 @@
}
void QuicStreamSendBuffer::CleanUpBufferedSlices() {
+ if (interval_deque_active_) {
+ QUIC_RELOADABLE_FLAG_COUNT_N(quic_interval_deque, 4, 5);
+ while (!interval_deque_.Empty() &&
+ interval_deque_.DataBegin()->slice.empty()) {
+ QUIC_BUG_IF(interval_deque_.DataBegin()->offset > current_end_offset_)
+ << "Fail to pop front from interval_deque_. Front element contained "
+ "a slice whose data has not all be written. Front offset "
+ << interval_deque_.DataBegin()->offset << " length "
+ << interval_deque_.DataBegin()->slice.length();
+ interval_deque_.PopFront();
+ }
+ return;
+ }
while (!buffered_slices_.empty() && buffered_slices_.front().slice.empty()) {
// Remove data which stops waiting for acks. Please note, mem slices can
// be released out of order, but send buffer is cleaned up in order.
@@ -323,7 +420,12 @@
}
size_t QuicStreamSendBuffer::size() const {
- return buffered_slices_.size();
+ if (interval_deque_active_) {
+ QUIC_RELOADABLE_FLAG_COUNT_N(quic_interval_deque, 5, 5);
+ return interval_deque_.Size();
+ } else {
+ return buffered_slices_.size();
+ }
}
} // namespace quic
diff --git a/quic/core/quic_stream_send_buffer.h b/quic/core/quic_stream_send_buffer.h
index 51a10e9..57be51d 100644
--- a/quic/core/quic_stream_send_buffer.h
+++ b/quic/core/quic_stream_send_buffer.h
@@ -6,6 +6,7 @@
#define QUICHE_QUIC_CORE_QUIC_STREAM_SEND_BUFFER_H_
#include "net/third_party/quiche/src/quic/core/frames/quic_stream_frame.h"
+#include "net/third_party/quiche/src/quic/core/quic_interval_deque.h"
#include "net/third_party/quiche/src/quic/core/quic_interval_set.h"
#include "net/third_party/quiche/src/quic/core/quic_types.h"
#include "net/third_party/quiche/src/quic/platform/api/quic_containers.h"
@@ -35,6 +36,9 @@
BufferedSlice& operator=(const BufferedSlice& other) = delete;
~BufferedSlice();
+ // Return an interval representing the offset and length.
+ QuicInterval<std::size_t> interval() const;
+
// Stream data of this data slice.
QuicMemSlice slice;
// Location of this data slice in the stream.
@@ -141,7 +145,12 @@
// Cleanup empty slices in order from buffered_slices_.
void CleanUpBufferedSlices();
+ bool interval_deque_active_;
QuicDeque<BufferedSlice> buffered_slices_;
+ // |current_end_offset_| stores the end offset of the current slice to ensure
+ // data isn't being written out of order when using the |interval_deque_|.
+ QuicStreamOffset current_end_offset_;
+ QuicIntervalDeque<BufferedSlice> interval_deque_;
// Offset of next inserted byte.
QuicStreamOffset stream_offset_;
diff --git a/quic/core/quic_stream_send_buffer_test.cc b/quic/core/quic_stream_send_buffer_test.cc
index 5c1bff3..8f864c6 100644
--- a/quic/core/quic_stream_send_buffer_test.cc
+++ b/quic/core/quic_stream_send_buffer_test.cc
@@ -44,9 +44,14 @@
QuicMemSlice slice2(&allocator_, 768);
memset(const_cast<char*>(slice2.data()), 'd', 768);
- // Index starts from not pointing to any slice.
- EXPECT_EQ(nullptr,
- QuicStreamSendBufferPeer::CurrentWriteSlice(&send_buffer_));
+ if (!GetQuicReloadableFlag(quic_interval_deque)) {
+ // Index starts from not pointing to any slice.
+ EXPECT_EQ(nullptr,
+ QuicStreamSendBufferPeer::CurrentWriteSlice(&send_buffer_));
+ } else {
+ // The stream offset should be 0 since nothing is written.
+ EXPECT_EQ(0u, QuicStreamSendBufferPeer::EndOffset(&send_buffer_));
+ }
// Save all data.
SetQuicFlag(FLAGS_quic_send_buffer_max_data_slice_size, 1024);
@@ -136,11 +141,12 @@
EXPECT_EQ(copy1 + copy2, quiche::QuicheStringPiece(buf + 1024, 2048));
// Write new data.
- if (!GetQuicRestartFlag(quic_coalesce_stream_frames_2)) {
+ if (!GetQuicRestartFlag(quic_coalesce_stream_frames_2) &&
+ !GetQuicReloadableFlag(quic_interval_deque)) {
EXPECT_EQ(1, QuicStreamSendBufferPeer::write_index(&send_buffer_));
EXPECT_QUIC_DEBUG_DEATH(send_buffer_.WriteStreamData(2048, 50, &writer),
"Tried to write data out of sequence.");
- } else {
+ } else if (!GetQuicReloadableFlag(quic_interval_deque)) {
EXPECT_EQ(2, QuicStreamSendBufferPeer::write_index(&send_buffer_));
ASSERT_TRUE(send_buffer_.WriteStreamData(2048, 50, &writer));
EXPECT_EQ(std::string(50, 'c'),
@@ -149,6 +155,17 @@
ASSERT_TRUE(send_buffer_.WriteStreamData(2048, 1124, &writer));
EXPECT_EQ(copy3, quiche::QuicheStringPiece(buf + 1024 + 2048 + 50, 1124));
EXPECT_EQ(3, QuicStreamSendBufferPeer::write_index(&send_buffer_));
+ } else {
+ // if |quic_interval_deque| is true then |write_index_| has no value.
+ // Instead we ensure the |current_end_offet| has an appropriate value.
+ EXPECT_EQ(2048u, QuicStreamSendBufferPeer::EndOffset(&send_buffer_));
+ ASSERT_TRUE(send_buffer_.WriteStreamData(2048, 50, &writer));
+ EXPECT_EQ(std::string(50, 'c'),
+ quiche::QuicheStringPiece(buf + 1024 + 2048, 50));
+ EXPECT_EQ(3072u, QuicStreamSendBufferPeer::EndOffset(&send_buffer_));
+ ASSERT_TRUE(send_buffer_.WriteStreamData(2048, 1124, &writer));
+ EXPECT_EQ(copy3, quiche::QuicheStringPiece(buf + 1024 + 2048 + 50, 1124));
+ EXPECT_EQ(3840u, QuicStreamSendBufferPeer::EndOffset(&send_buffer_));
}
}
@@ -290,39 +307,72 @@
EXPECT_TRUE(send_buffer_.IsStreamDataOutstanding(400, 800));
}
+// TODO(b/144690240): Rename to EndOffset when deprecating --quic_interval_deque
TEST_F(QuicStreamSendBufferTest, CurrentWriteIndex) {
char buf[4000];
QuicDataWriter writer(4000, buf, quiche::HOST_BYTE_ORDER);
- // With data buffered, index points to the 1st slice of data.
- EXPECT_EQ(0u,
- QuicStreamSendBufferPeer::CurrentWriteSlice(&send_buffer_)->offset);
+ if (!GetQuicReloadableFlag(quic_interval_deque)) {
+ // With data buffered, index points to the 1st slice of data.
+ EXPECT_EQ(
+ 0u, QuicStreamSendBufferPeer::CurrentWriteSlice(&send_buffer_)->offset);
+ } else {
+ EXPECT_EQ(1024u, QuicStreamSendBufferPeer::EndOffset(&send_buffer_));
+ }
ASSERT_TRUE(send_buffer_.WriteStreamData(0, 1024, &writer));
// Wrote all data on 1st slice, index points to next slice.
- EXPECT_EQ(1024u,
- QuicStreamSendBufferPeer::CurrentWriteSlice(&send_buffer_)->offset);
+ if (!GetQuicReloadableFlag(quic_interval_deque)) {
+ EXPECT_EQ(
+ 1024u,
+ QuicStreamSendBufferPeer::CurrentWriteSlice(&send_buffer_)->offset);
+ } else {
+ // Last offset we've seen is 1024
+ EXPECT_EQ(1024u, QuicStreamSendBufferPeer::EndOffset(&send_buffer_));
+ }
ASSERT_TRUE(send_buffer_.WriteStreamData(1024, 512, &writer));
// Last write didn't finish a whole slice. Index remains.
- EXPECT_EQ(1024u,
- QuicStreamSendBufferPeer::CurrentWriteSlice(&send_buffer_)->offset);
+ if (!GetQuicReloadableFlag(quic_interval_deque)) {
+ EXPECT_EQ(
+ 1024u,
+ QuicStreamSendBufferPeer::CurrentWriteSlice(&send_buffer_)->offset);
+ } else {
+ // Last offset is now 2048 as that's the end of the next slice.
+ EXPECT_EQ(2048u, QuicStreamSendBufferPeer::EndOffset(&send_buffer_));
+ }
send_buffer_.OnStreamDataConsumed(1024);
// If data in 1st slice gets ACK'ed, it shouldn't change the indexed slice
QuicByteCount newly_acked_length;
EXPECT_TRUE(send_buffer_.OnStreamDataAcked(0, 1024, &newly_acked_length));
- EXPECT_EQ(1024u,
- QuicStreamSendBufferPeer::CurrentWriteSlice(&send_buffer_)->offset);
+ if (!GetQuicReloadableFlag(quic_interval_deque)) {
+ EXPECT_EQ(
+ 1024u,
+ QuicStreamSendBufferPeer::CurrentWriteSlice(&send_buffer_)->offset);
+ } else {
+ // Last offset is still 2048.
+ EXPECT_EQ(2048u, QuicStreamSendBufferPeer::EndOffset(&send_buffer_));
+ }
ASSERT_TRUE(
send_buffer_.WriteStreamData(1024 + 512, 3840 - 1024 - 512, &writer));
// After writing all buffered data, index become invalid again.
- EXPECT_EQ(nullptr,
- QuicStreamSendBufferPeer::CurrentWriteSlice(&send_buffer_));
+ if (!GetQuicReloadableFlag(quic_interval_deque)) {
+ EXPECT_EQ(nullptr,
+ QuicStreamSendBufferPeer::CurrentWriteSlice(&send_buffer_));
+ } else {
+ // Last offset is end offset of last slice.
+ EXPECT_EQ(3840u, QuicStreamSendBufferPeer::EndOffset(&send_buffer_));
+ }
QuicMemSlice slice(&allocator_, 60);
memset(const_cast<char*>(slice.data()), 'e', 60);
send_buffer_.SaveMemSlice(std::move(slice));
- // With new data, index points to the new data.
- EXPECT_EQ(3840u,
- QuicStreamSendBufferPeer::CurrentWriteSlice(&send_buffer_)->offset);
+ if (!GetQuicReloadableFlag(quic_interval_deque)) {
+ // With new data, index points to the new data.
+ EXPECT_EQ(
+ 3840u,
+ QuicStreamSendBufferPeer::CurrentWriteSlice(&send_buffer_)->offset);
+ } else {
+ EXPECT_EQ(3840u, QuicStreamSendBufferPeer::EndOffset(&send_buffer_));
+ }
}
TEST_F(QuicStreamSendBufferTest, SaveMemSliceSpan) {
diff --git a/quic/test_tools/quic_interval_deque_peer.h b/quic/test_tools/quic_interval_deque_peer.h
new file mode 100644
index 0000000..e9a99ba
--- /dev/null
+++ b/quic/test_tools/quic_interval_deque_peer.h
@@ -0,0 +1,35 @@
+// Copyright 2019 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef QUICHE_QUIC_TEST_TOOLS_QUIC_INTERVAL_DEQUE_PEER_H_
+#define QUICHE_QUIC_TEST_TOOLS_QUIC_INTERVAL_DEQUE_PEER_H_
+
+#include "net/third_party/quiche/src/quic/core/quic_interval_deque.h"
+
+namespace quic {
+
+namespace test {
+
+class QuicIntervalDequePeer {
+ public:
+ template <class T, class C>
+ static int32_t GetCachedIndex(QuicIntervalDeque<T, C>* interval_deque) {
+ if (!interval_deque->cached_index_.has_value()) {
+ return -1;
+ }
+ return interval_deque->cached_index_.value();
+ }
+
+ template <class T, class C>
+ static T* GetItem(QuicIntervalDeque<T, C>* interval_deque,
+ const std::size_t index) {
+ return &interval_deque->container_[index];
+ }
+};
+
+} // namespace test
+
+} // namespace quic
+
+#endif // QUICHE_QUIC_TEST_TOOLS_QUIC_INTERVAL_DEQUE_PEER_H_
diff --git a/quic/test_tools/quic_stream_send_buffer_peer.cc b/quic/test_tools/quic_stream_send_buffer_peer.cc
index 22e5e56..2135d3f 100644
--- a/quic/test_tools/quic_stream_send_buffer_peer.cc
+++ b/quic/test_tools/quic_stream_send_buffer_peer.cc
@@ -3,6 +3,7 @@
// found in the LICENSE file.
#include "net/third_party/quiche/src/quic/test_tools/quic_stream_send_buffer_peer.h"
+#include "net/third_party/quiche/src/quic/test_tools/quic_interval_deque_peer.h"
namespace quic {
@@ -15,25 +16,58 @@
send_buffer->stream_offset_ = stream_offset;
}
+// TODO(b/144690240): Remove CurrentWriteSlice when deprecating
+// --quic_interval_deque
// static
const BufferedSlice* QuicStreamSendBufferPeer::CurrentWriteSlice(
QuicStreamSendBuffer* send_buffer) {
- if (send_buffer->write_index_ == -1) {
+ auto wi = write_index(send_buffer);
+
+ if (wi == -1) {
return nullptr;
}
- return &send_buffer->buffered_slices_[send_buffer->write_index_];
+ if (GetQuicReloadableFlag(quic_interval_deque)) {
+ return QuicIntervalDequePeer::GetItem(&send_buffer->interval_deque_, wi);
+ } else {
+ return &send_buffer->buffered_slices_[wi];
+ }
+}
+
+QuicStreamOffset QuicStreamSendBufferPeer::EndOffset(
+ QuicStreamSendBuffer* send_buffer) {
+ if (GetQuicReloadableFlag(quic_interval_deque)) {
+ return send_buffer->current_end_offset_;
+ }
+ return 0;
}
// static
QuicByteCount QuicStreamSendBufferPeer::TotalLength(
QuicStreamSendBuffer* send_buffer) {
QuicByteCount length = 0;
- for (const auto& slice : send_buffer->buffered_slices_) {
- length += slice.slice.length();
+ if (GetQuicReloadableFlag(quic_interval_deque)) {
+ for (auto slice = send_buffer->interval_deque_.DataBegin();
+ slice != send_buffer->interval_deque_.DataEnd(); ++slice) {
+ length += slice->slice.length();
+ }
+ } else {
+ for (const auto& slice : send_buffer->buffered_slices_) {
+ length += slice.slice.length();
+ }
}
return length;
}
+// static
+int32_t QuicStreamSendBufferPeer::write_index(
+ QuicStreamSendBuffer* send_buffer) {
+ if (send_buffer->interval_deque_active_) {
+ return QuicIntervalDequePeer::GetCachedIndex(&send_buffer->interval_deque_);
+ } else {
+ return send_buffer->write_index_;
+ }
+}
+
} // namespace test
} // namespace quic
diff --git a/quic/test_tools/quic_stream_send_buffer_peer.h b/quic/test_tools/quic_stream_send_buffer_peer.h
index 3adb173..310cda9 100644
--- a/quic/test_tools/quic_stream_send_buffer_peer.h
+++ b/quic/test_tools/quic_stream_send_buffer_peer.h
@@ -19,11 +19,11 @@
static const BufferedSlice* CurrentWriteSlice(
QuicStreamSendBuffer* send_buffer);
+ static QuicStreamOffset EndOffset(QuicStreamSendBuffer* send_buffer);
+
static QuicByteCount TotalLength(QuicStreamSendBuffer* send_buffer);
- static int32_t write_index(QuicStreamSendBuffer* send_buffer) {
- return send_buffer->write_index_;
- }
+ static int32_t write_index(QuicStreamSendBuffer* send_buffer);
};
} // namespace test