summaryrefslogtreecommitdiff
path: root/chromium/third_party/blink/renderer/platform/scheduler/base
diff options
context:
space:
mode:
Diffstat (limited to 'chromium/third_party/blink/renderer/platform/scheduler/base')
-rw-r--r--chromium/third_party/blink/renderer/platform/scheduler/base/DEPS5
-rw-r--r--chromium/third_party/blink/renderer/platform/scheduler/base/enqueue_order.cc25
-rw-r--r--chromium/third_party/blink/renderer/platform/scheduler/base/enqueue_order.h52
-rw-r--r--chromium/third_party/blink/renderer/platform/scheduler/base/graceful_queue_shutdown_helper.cc40
-rw-r--r--chromium/third_party/blink/renderer/platform/scheduler/base/graceful_queue_shutdown_helper.h47
-rw-r--r--chromium/third_party/blink/renderer/platform/scheduler/base/intrusive_heap.h227
-rw-r--r--chromium/third_party/blink/renderer/platform/scheduler/base/intrusive_heap_unittest.cc374
-rw-r--r--chromium/third_party/blink/renderer/platform/scheduler/base/lazy_now.cc18
-rw-r--r--chromium/third_party/blink/renderer/platform/scheduler/base/lazy_now.h40
-rw-r--r--chromium/third_party/blink/renderer/platform/scheduler/base/moveable_auto_lock.h41
-rw-r--r--chromium/third_party/blink/renderer/platform/scheduler/base/real_time_domain.cc70
-rw-r--r--chromium/third_party/blink/renderer/platform/scheduler/base/real_time_domain.h45
-rw-r--r--chromium/third_party/blink/renderer/platform/scheduler/base/sequenced_task_source.h40
-rw-r--r--chromium/third_party/blink/renderer/platform/scheduler/base/task_queue.cc279
-rw-r--r--chromium/third_party/blink/renderer/platform/scheduler/base/task_queue.h307
-rw-r--r--chromium/third_party/blink/renderer/platform/scheduler/base/task_queue_impl.cc1039
-rw-r--r--chromium/third_party/blink/renderer/platform/scheduler/base/task_queue_impl.h451
-rw-r--r--chromium/third_party/blink/renderer/platform/scheduler/base/task_queue_manager.h103
-rw-r--r--chromium/third_party/blink/renderer/platform/scheduler/base/task_queue_manager_delegate_for_test.cc65
-rw-r--r--chromium/third_party/blink/renderer/platform/scheduler/base/task_queue_manager_delegate_for_test.h59
-rw-r--r--chromium/third_party/blink/renderer/platform/scheduler/base/task_queue_manager_impl.cc669
-rw-r--r--chromium/third_party/blink/renderer/platform/scheduler/base/task_queue_manager_impl.h334
-rw-r--r--chromium/third_party/blink/renderer/platform/scheduler/base/task_queue_manager_impl_unittest.cc3312
-rw-r--r--chromium/third_party/blink/renderer/platform/scheduler/base/task_queue_manager_perftest.cc230
-rw-r--r--chromium/third_party/blink/renderer/platform/scheduler/base/task_queue_selector.cc388
-rw-r--r--chromium/third_party/blink/renderer/platform/scheduler/base/task_queue_selector.h225
-rw-r--r--chromium/third_party/blink/renderer/platform/scheduler/base/task_queue_selector_logic.h35
-rw-r--r--chromium/third_party/blink/renderer/platform/scheduler/base/task_queue_selector_unittest.cc714
-rw-r--r--chromium/third_party/blink/renderer/platform/scheduler/base/task_time_observer.h36
-rw-r--r--chromium/third_party/blink/renderer/platform/scheduler/base/test_count_uses_time_source.cc22
-rw-r--r--chromium/third_party/blink/renderer/platform/scheduler/base/test_count_uses_time_source.h31
-rw-r--r--chromium/third_party/blink/renderer/platform/scheduler/base/test_task_time_observer.h23
-rw-r--r--chromium/third_party/blink/renderer/platform/scheduler/base/thread_controller.h94
-rw-r--r--chromium/third_party/blink/renderer/platform/scheduler/base/thread_controller_impl.cc255
-rw-r--r--chromium/third_party/blink/renderer/platform/scheduler/base/thread_controller_impl.h132
-rw-r--r--chromium/third_party/blink/renderer/platform/scheduler/base/time_domain.cc119
-rw-r--r--chromium/third_party/blink/renderer/platform/scheduler/base/time_domain.h142
-rw-r--r--chromium/third_party/blink/renderer/platform/scheduler/base/time_domain_unittest.cc348
-rw-r--r--chromium/third_party/blink/renderer/platform/scheduler/base/virtual_time_domain.cc69
-rw-r--r--chromium/third_party/blink/renderer/platform/scheduler/base/virtual_time_domain.h55
-rw-r--r--chromium/third_party/blink/renderer/platform/scheduler/base/work_queue.cc234
-rw-r--r--chromium/third_party/blink/renderer/platform/scheduler/base/work_queue.h156
-rw-r--r--chromium/third_party/blink/renderer/platform/scheduler/base/work_queue_sets.cc172
-rw-r--r--chromium/third_party/blink/renderer/platform/scheduler/base/work_queue_sets.h104
-rw-r--r--chromium/third_party/blink/renderer/platform/scheduler/base/work_queue_sets_unittest.cc329
-rw-r--r--chromium/third_party/blink/renderer/platform/scheduler/base/work_queue_unittest.cc474
46 files changed, 12029 insertions, 0 deletions
diff --git a/chromium/third_party/blink/renderer/platform/scheduler/base/DEPS b/chromium/third_party/blink/renderer/platform/scheduler/base/DEPS
new file mode 100644
index 00000000000..53701134c2e
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/scheduler/base/DEPS
@@ -0,0 +1,5 @@
+specific_include_rules = {
+ ".*test\.cc": [
+ "+components/viz/test",
+ ],
+}
diff --git a/chromium/third_party/blink/renderer/platform/scheduler/base/enqueue_order.cc b/chromium/third_party/blink/renderer/platform/scheduler/base/enqueue_order.cc
new file mode 100644
index 00000000000..5d262f4956a
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/scheduler/base/enqueue_order.cc
@@ -0,0 +1,25 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "third_party/blink/renderer/platform/scheduler/base/enqueue_order.h"
+
+namespace blink {
+namespace scheduler {
+namespace internal {
+
+// Note we set the first |enqueue_order_| to a specific non-zero value, because
+// first N values of EnqueueOrder have special meaning (see EnqueueOrderValues).
+EnqueueOrderGenerator::EnqueueOrderGenerator()
+ : enqueue_order_(static_cast<EnqueueOrder>(EnqueueOrderValues::kFirst)) {}
+
+EnqueueOrderGenerator::~EnqueueOrderGenerator() = default;
+
+EnqueueOrder EnqueueOrderGenerator::GenerateNext() {
+ base::AutoLock lock(lock_);
+ return enqueue_order_++;
+}
+
+} // namespace internal
+} // namespace scheduler
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/platform/scheduler/base/enqueue_order.h b/chromium/third_party/blink/renderer/platform/scheduler/base/enqueue_order.h
new file mode 100644
index 00000000000..9caea897ade
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/scheduler/base/enqueue_order.h
@@ -0,0 +1,52 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_PLATFORM_SCHEDULER_BASE_ENQUEUE_ORDER_H_
+#define THIRD_PARTY_BLINK_RENDERER_PLATFORM_SCHEDULER_BASE_ENQUEUE_ORDER_H_
+
+#include <stdint.h>
+
+#include "base/synchronization/lock.h"
+
+namespace blink {
+namespace scheduler {
+namespace internal {
+
+using EnqueueOrder = uint64_t;
+
+// TODO(scheduler-dev): Remove explicit casts when c++17 comes.
+enum class EnqueueOrderValues : EnqueueOrder {
+ // Invalid EnqueueOrder.
+ kNone = 0,
+
+ // Earliest possible EnqueueOrder, to be used for fence blocking.
+ kBlockingFence = 1,
+ kFirst = 2,
+};
+
+// A 64bit integer used to provide ordering of tasks. NOTE The scheduler assumes
+// these values will not overflow.
+class EnqueueOrderGenerator {
+ public:
+ EnqueueOrderGenerator();
+ ~EnqueueOrderGenerator();
+
+ // Returns a monotonically increasing integer, starting from one. Can be
+ // called from any thread.
+ EnqueueOrder GenerateNext();
+
+ static bool IsValidEnqueueOrder(EnqueueOrder enqueue_order) {
+ return enqueue_order != 0ull;
+ }
+
+ private:
+ base::Lock lock_;
+ EnqueueOrder enqueue_order_;
+};
+
+} // namespace internal
+} // namespace scheduler
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_PLATFORM_SCHEDULER_BASE_ENQUEUE_ORDER_H_
diff --git a/chromium/third_party/blink/renderer/platform/scheduler/base/graceful_queue_shutdown_helper.cc b/chromium/third_party/blink/renderer/platform/scheduler/base/graceful_queue_shutdown_helper.cc
new file mode 100644
index 00000000000..c2fc97aafdf
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/scheduler/base/graceful_queue_shutdown_helper.cc
@@ -0,0 +1,40 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "third_party/blink/renderer/platform/scheduler/base/task_queue_impl.h"
+
+namespace blink {
+namespace scheduler {
+namespace internal {
+
+GracefulQueueShutdownHelper::GracefulQueueShutdownHelper()
+ : task_queue_manager_deleted_(false) {}
+
+GracefulQueueShutdownHelper::~GracefulQueueShutdownHelper() = default;
+
+void GracefulQueueShutdownHelper::GracefullyShutdownTaskQueue(
+ std::unique_ptr<internal::TaskQueueImpl> task_queue) {
+ base::AutoLock lock(lock_);
+ if (task_queue_manager_deleted_)
+ return;
+ queues_.push_back(std::move(task_queue));
+}
+
+void GracefulQueueShutdownHelper::OnTaskQueueManagerDeleted() {
+ base::AutoLock lock(lock_);
+ task_queue_manager_deleted_ = true;
+ queues_.clear();
+}
+
+std::vector<std::unique_ptr<internal::TaskQueueImpl>>
+GracefulQueueShutdownHelper::TakeQueues() {
+ base::AutoLock lock(lock_);
+ std::vector<std::unique_ptr<internal::TaskQueueImpl>> result;
+ result.swap(queues_);
+ return result;
+}
+
+} // namespace internal
+} // namespace scheduler
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/platform/scheduler/base/graceful_queue_shutdown_helper.h b/chromium/third_party/blink/renderer/platform/scheduler/base/graceful_queue_shutdown_helper.h
new file mode 100644
index 00000000000..976feb1ae5d
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/scheduler/base/graceful_queue_shutdown_helper.h
@@ -0,0 +1,47 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_PLATFORM_SCHEDULER_BASE_GRACEFUL_QUEUE_SHUTDOWN_HELPER_H_
+#define THIRD_PARTY_BLINK_RENDERER_PLATFORM_SCHEDULER_BASE_GRACEFUL_QUEUE_SHUTDOWN_HELPER_H_
+
+#include <memory>
+#include <vector>
+
+#include "base/macros.h"
+#include "base/memory/ref_counted.h"
+#include "base/synchronization/lock.h"
+
+namespace blink {
+namespace scheduler {
+namespace internal {
+
+class TaskQueueImpl;
+
+// Thread-safe helper to shutdown queues from any thread.
+class GracefulQueueShutdownHelper
+ : public base::RefCountedThreadSafe<GracefulQueueShutdownHelper> {
+ public:
+ GracefulQueueShutdownHelper();
+ ~GracefulQueueShutdownHelper();
+
+ void GracefullyShutdownTaskQueue(
+ std::unique_ptr<internal::TaskQueueImpl> queue);
+
+ void OnTaskQueueManagerDeleted();
+
+ std::vector<std::unique_ptr<internal::TaskQueueImpl>> TakeQueues();
+
+ private:
+ base::Lock lock_;
+ bool task_queue_manager_deleted_;
+ std::vector<std::unique_ptr<internal::TaskQueueImpl>> queues_;
+
+ DISALLOW_COPY_AND_ASSIGN(GracefulQueueShutdownHelper);
+};
+
+} // namespace internal
+} // namespace scheduler
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_PLATFORM_SCHEDULER_BASE_GRACEFUL_QUEUE_SHUTDOWN_HELPER_H_
diff --git a/chromium/third_party/blink/renderer/platform/scheduler/base/intrusive_heap.h b/chromium/third_party/blink/renderer/platform/scheduler/base/intrusive_heap.h
new file mode 100644
index 00000000000..5d5acabdcc5
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/scheduler/base/intrusive_heap.h
@@ -0,0 +1,227 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_PLATFORM_SCHEDULER_BASE_INTRUSIVE_HEAP_H_
+#define THIRD_PARTY_BLINK_RENDERER_PLATFORM_SCHEDULER_BASE_INTRUSIVE_HEAP_H_
+
+#include <algorithm>
+#include <vector>
+
+#include "base/logging.h"
+
+namespace blink {
+namespace scheduler {
+
+template <typename T>
+class IntrusiveHeap;
+
+// Intended as an opaque wrapper around |index_|.
+class HeapHandle {
+ public:
+ HeapHandle() : index_(0u) {}
+
+ bool IsValid() const { return index_ != 0u; }
+
+ private:
+ template <typename T>
+ friend class IntrusiveHeap;
+
+ HeapHandle(size_t index) : index_(index) {}
+
+ size_t index_;
+};
+
+// A standard min-heap with the following assumptions:
+// 1. T has operator <=
+// 2. T has method void SetHeapHandle(HeapHandle handle)
+// 3. T has method void ClearHeapHandle()
+// 4. T is moveable
+// 5. T is default constructible
+// 6. The heap size never gets terribly big so reclaiming memory on pop/erase
+// isn't a priority.
+//
+// The reason IntrusiveHeap exists is to provide similar performance to
+// std::priority_queue while allowing removal of arbitrary elements.
+template <typename T>
+class IntrusiveHeap {
+ public:
+ IntrusiveHeap() : nodes_(kMinimumHeapSize), size_(0) {}
+
+ ~IntrusiveHeap() {
+ for (size_t i = 1; i <= size_; i++) {
+ MakeHole(i);
+ }
+ }
+
+ bool empty() const { return size_ == 0; }
+
+ size_t size() const { return size_; }
+
+ void Clear() {
+ for (size_t i = 1; i <= size_; i++) {
+ MakeHole(i);
+ }
+ nodes_.resize(kMinimumHeapSize);
+ size_ = 0;
+ }
+
+ const T& Min() const {
+ DCHECK_GE(size_, 1u);
+ return nodes_[1];
+ }
+
+ void Pop() {
+ DCHECK_GE(size_, 1u);
+ MakeHole(1u);
+ size_t top_index = size_--;
+ if (!empty())
+ MoveHoleDownAndFillWithLeafElement(1u, std::move(nodes_[top_index]));
+ }
+
+ void insert(T&& element) {
+ size_++;
+ if (size_ >= nodes_.size())
+ nodes_.resize(nodes_.size() * 2);
+ // Notionally we have a hole in the tree at index |size_|, move this up
+ // to find the right insertion point.
+ MoveHoleUpAndFillWithElement(size_, std::move(element));
+ }
+
+ void erase(HeapHandle handle) {
+ DCHECK_GT(handle.index_, 0u);
+ DCHECK_LE(handle.index_, size_);
+ MakeHole(handle.index_);
+ size_t top_index = size_--;
+ if (empty() || top_index == handle.index_)
+ return;
+ if (nodes_[handle.index_] <= nodes_[top_index]) {
+ MoveHoleDownAndFillWithLeafElement(handle.index_,
+ std::move(nodes_[top_index]));
+ } else {
+ MoveHoleUpAndFillWithElement(handle.index_, std::move(nodes_[top_index]));
+ }
+ }
+
+ void ReplaceMin(T&& element) {
+ // Note |element| might not be a leaf node so we can't use
+ // MoveHoleDownAndFillWithLeafElement.
+ MoveHoleDownAndFillWithElement(1u, std::move(element));
+ }
+
+ void ChangeKey(HeapHandle handle, T&& element) {
+ if (nodes_[handle.index_] <= element) {
+ MoveHoleDownAndFillWithLeafElement(handle.index_, std::move(element));
+ } else {
+ MoveHoleUpAndFillWithElement(handle.index_, std::move(element));
+ }
+ }
+
+ // Caution mutating the heap invalidates the iterators.
+ const T* begin() const { return &nodes_[1u]; }
+ const T* end() const { return begin() + size_; }
+
+ private:
+ enum {
+ // The majority of sets in the scheduler have 0-3 items in them (a few will
+ // have perhaps up to 100), so this means we usually only have to allocate
+ // memory once.
+ kMinimumHeapSize = 4u
+ };
+
+ friend class IntrusiveHeapTest;
+
+ size_t MoveHole(size_t new_hole_pos, size_t old_hole_pos) {
+ DCHECK_GT(new_hole_pos, 0u);
+ DCHECK_LE(new_hole_pos, size_);
+ DCHECK_GT(new_hole_pos, 0u);
+ DCHECK_LE(new_hole_pos, size_);
+ DCHECK_NE(old_hole_pos, new_hole_pos);
+ nodes_[old_hole_pos] = std::move(nodes_[new_hole_pos]);
+ nodes_[old_hole_pos].SetHeapHandle(HeapHandle(old_hole_pos));
+ return new_hole_pos;
+ }
+
+ // Notionally creates a hole in the tree at |index|.
+ void MakeHole(size_t index) {
+ DCHECK_GT(index, 0u);
+ DCHECK_LE(index, size_);
+ nodes_[index].ClearHeapHandle();
+ }
+
+ void FillHole(size_t hole, T&& element) {
+ DCHECK_GT(hole, 0u);
+ DCHECK_LE(hole, size_);
+ nodes_[hole] = std::move(element);
+ nodes_[hole].SetHeapHandle(HeapHandle(hole));
+ DCHECK(std::is_heap(begin(), end(), CompareNodes));
+ }
+
+ // is_heap requires a strict comparator.
+ static bool CompareNodes(const T& a, const T& b) { return !(a <= b); }
+
+ // Moves the |hole| up the tree and when the right position has been found
+ // |element| is moved in.
+ void MoveHoleUpAndFillWithElement(size_t hole, T&& element) {
+ DCHECK_GT(hole, 0u);
+ DCHECK_LE(hole, size_);
+ while (hole >= 2u) {
+ size_t parent_pos = hole / 2;
+ if (nodes_[parent_pos] <= element)
+ break;
+
+ hole = MoveHole(parent_pos, hole);
+ }
+ FillHole(hole, std::move(element));
+ }
+
+ // Moves the |hole| down the tree and when the right position has been found
+ // |element| is moved in.
+ void MoveHoleDownAndFillWithElement(size_t hole, T&& element) {
+ DCHECK_GT(hole, 0u);
+ DCHECK_LE(hole, size_);
+ size_t child_pos = hole * 2;
+ while (child_pos < size_) {
+ if (nodes_[child_pos + 1] <= nodes_[child_pos])
+ child_pos++;
+
+ if (element <= nodes_[child_pos])
+ break;
+
+ hole = MoveHole(child_pos, hole);
+ child_pos *= 2;
+ }
+ if (child_pos == size_ && !(element <= nodes_[child_pos]))
+ hole = MoveHole(child_pos, hole);
+ FillHole(hole, std::move(element));
+ }
+
+ // Moves the |hole| down the tree and when the right position has been found
+ // |leaf_element| is moved in. Faster than MoveHoleDownAndFillWithElement
+ // (it does one key comparison per level instead of two) but only valid for
+ // leaf elements (i.e. one of the max values).
+ void MoveHoleDownAndFillWithLeafElement(size_t hole, T&& leaf_element) {
+ DCHECK_GT(hole, 0u);
+ DCHECK_LE(hole, size_);
+ size_t child_pos = hole * 2;
+ while (child_pos < size_) {
+ size_t second_child = child_pos + 1;
+ if (nodes_[second_child] <= nodes_[child_pos])
+ child_pos = second_child;
+
+ hole = MoveHole(child_pos, hole);
+ child_pos *= 2;
+ }
+ if (child_pos == size_)
+ hole = MoveHole(child_pos, hole);
+ MoveHoleUpAndFillWithElement(hole, std::move(leaf_element));
+ }
+
+ std::vector<T> nodes_; // NOTE we use 1-based indexing
+ size_t size_;
+};
+
+} // namespace scheduler
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_PLATFORM_SCHEDULER_BASE_INTRUSIVE_HEAP_H_
diff --git a/chromium/third_party/blink/renderer/platform/scheduler/base/intrusive_heap_unittest.cc b/chromium/third_party/blink/renderer/platform/scheduler/base/intrusive_heap_unittest.cc
new file mode 100644
index 00000000000..d64250ffaf7
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/scheduler/base/intrusive_heap_unittest.cc
@@ -0,0 +1,374 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "third_party/blink/renderer/platform/scheduler/base/intrusive_heap.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace blink {
+namespace scheduler {
+namespace {
+
+struct TestElement {
+ int key;
+ HeapHandle* handle;
+
+ bool operator<=(const TestElement& other) const { return key <= other.key; }
+
+ void SetHeapHandle(HeapHandle h) {
+ if (handle)
+ *handle = h;
+ }
+
+ void ClearHeapHandle() {
+ if (handle)
+ *handle = HeapHandle();
+ }
+};
+
+} // namespace
+
+class IntrusiveHeapTest : public testing::Test {
+ protected:
+ static bool CompareNodes(const TestElement& a, const TestElement& b) {
+ return IntrusiveHeap<TestElement>::CompareNodes(a, b);
+ }
+};
+
+TEST_F(IntrusiveHeapTest, Basic) {
+ IntrusiveHeap<TestElement> heap;
+
+ EXPECT_TRUE(heap.empty());
+ EXPECT_EQ(0u, heap.size());
+}
+
+TEST_F(IntrusiveHeapTest, Clear) {
+ IntrusiveHeap<TestElement> heap;
+ HeapHandle index1;
+
+ heap.insert({11, &index1});
+ EXPECT_EQ(1u, heap.size());
+ EXPECT_TRUE(index1.IsValid());
+
+ heap.Clear();
+ EXPECT_EQ(0u, heap.size());
+ EXPECT_FALSE(index1.IsValid());
+}
+
+TEST_F(IntrusiveHeapTest, Destructor) {
+ HeapHandle index1;
+
+ {
+ IntrusiveHeap<TestElement> heap;
+
+ heap.insert({11, &index1});
+ EXPECT_EQ(1u, heap.size());
+ EXPECT_TRUE(index1.IsValid());
+ }
+
+ EXPECT_FALSE(index1.IsValid());
+}
+
+TEST_F(IntrusiveHeapTest, Min) {
+ IntrusiveHeap<TestElement> heap;
+
+ heap.insert({9, nullptr});
+ heap.insert({10, nullptr});
+ heap.insert({8, nullptr});
+ heap.insert({2, nullptr});
+ heap.insert({7, nullptr});
+ heap.insert({15, nullptr});
+ heap.insert({22, nullptr});
+ heap.insert({3, nullptr});
+
+ EXPECT_FALSE(heap.empty());
+ EXPECT_EQ(8u, heap.size());
+ EXPECT_EQ(2, heap.Min().key);
+}
+
+TEST_F(IntrusiveHeapTest, InsertAscending) {
+ IntrusiveHeap<TestElement> heap;
+ HeapHandle index1;
+
+ for (int i = 0; i < 50; i++)
+ heap.insert({i, nullptr});
+
+ EXPECT_EQ(0, heap.Min().key);
+ EXPECT_EQ(50u, heap.size());
+}
+
+TEST_F(IntrusiveHeapTest, InsertDescending) {
+ IntrusiveHeap<TestElement> heap;
+
+ for (int i = 0; i < 50; i++)
+ heap.insert({50 - i, nullptr});
+
+ EXPECT_EQ(1, heap.Min().key);
+ EXPECT_EQ(50u, heap.size());
+}
+
+TEST_F(IntrusiveHeapTest, HeapIndex) {
+ HeapHandle index5;
+ HeapHandle index4;
+ HeapHandle index3;
+ HeapHandle index2;
+ HeapHandle index1;
+ IntrusiveHeap<TestElement> heap;
+
+ EXPECT_FALSE(index1.IsValid());
+ EXPECT_FALSE(index2.IsValid());
+ EXPECT_FALSE(index3.IsValid());
+ EXPECT_FALSE(index4.IsValid());
+ EXPECT_FALSE(index5.IsValid());
+
+ heap.insert({15, &index5});
+ heap.insert({14, &index4});
+ heap.insert({13, &index3});
+ heap.insert({12, &index2});
+ heap.insert({11, &index1});
+
+ EXPECT_TRUE(index1.IsValid());
+ EXPECT_TRUE(index2.IsValid());
+ EXPECT_TRUE(index3.IsValid());
+ EXPECT_TRUE(index4.IsValid());
+ EXPECT_TRUE(index5.IsValid());
+
+ EXPECT_FALSE(heap.empty());
+}
+
+TEST_F(IntrusiveHeapTest, Pop) {
+ IntrusiveHeap<TestElement> heap;
+ HeapHandle index1;
+ HeapHandle index2;
+
+ heap.insert({11, &index1});
+ heap.insert({12, &index2});
+ EXPECT_EQ(2u, heap.size());
+ EXPECT_TRUE(index1.IsValid());
+ EXPECT_TRUE(index2.IsValid());
+
+ heap.Pop();
+ EXPECT_EQ(1u, heap.size());
+ EXPECT_FALSE(index1.IsValid());
+ EXPECT_TRUE(index2.IsValid());
+
+ heap.Pop();
+ EXPECT_EQ(0u, heap.size());
+ EXPECT_FALSE(index1.IsValid());
+ EXPECT_FALSE(index2.IsValid());
+}
+
+TEST_F(IntrusiveHeapTest, PopMany) {
+ IntrusiveHeap<TestElement> heap;
+
+ for (int i = 0; i < 500; i++)
+ heap.insert({i, nullptr});
+
+ EXPECT_FALSE(heap.empty());
+ EXPECT_EQ(500u, heap.size());
+ for (int i = 0; i < 500; i++) {
+ EXPECT_EQ(i, heap.Min().key);
+ heap.Pop();
+ }
+ EXPECT_TRUE(heap.empty());
+}
+
+TEST_F(IntrusiveHeapTest, Erase) {
+ IntrusiveHeap<TestElement> heap;
+
+ HeapHandle index12;
+
+ heap.insert({15, nullptr});
+ heap.insert({14, nullptr});
+ heap.insert({13, nullptr});
+ heap.insert({12, &index12});
+ heap.insert({11, nullptr});
+
+ EXPECT_EQ(5u, heap.size());
+ EXPECT_TRUE(index12.IsValid());
+ heap.erase(index12);
+ EXPECT_EQ(4u, heap.size());
+ EXPECT_FALSE(index12.IsValid());
+
+ EXPECT_EQ(11, heap.Min().key);
+ heap.Pop();
+ EXPECT_EQ(13, heap.Min().key);
+ heap.Pop();
+ EXPECT_EQ(14, heap.Min().key);
+ heap.Pop();
+ EXPECT_EQ(15, heap.Min().key);
+ heap.Pop();
+ EXPECT_TRUE(heap.empty());
+}
+
+TEST_F(IntrusiveHeapTest, ReplaceMin) {
+ IntrusiveHeap<TestElement> heap;
+
+ for (int i = 0; i < 500; i++)
+ heap.insert({500 - i, nullptr});
+
+ EXPECT_EQ(1, heap.Min().key);
+
+ for (int i = 0; i < 500; i++)
+ heap.ReplaceMin({1000 + i, nullptr});
+
+ EXPECT_EQ(1000, heap.Min().key);
+}
+
+TEST_F(IntrusiveHeapTest, ReplaceMinWithNonLeafNode) {
+ IntrusiveHeap<TestElement> heap;
+
+ for (int i = 0; i < 50; i++) {
+ heap.insert({i, nullptr});
+ heap.insert({200 + i, nullptr});
+ }
+
+ EXPECT_EQ(0, heap.Min().key);
+
+ for (int i = 0; i < 50; i++)
+ heap.ReplaceMin({100 + i, nullptr});
+
+ for (int i = 0; i < 50; i++) {
+ EXPECT_EQ((100 + i), heap.Min().key);
+ heap.Pop();
+ }
+ for (int i = 0; i < 50; i++) {
+ EXPECT_EQ((200 + i), heap.Min().key);
+ heap.Pop();
+ }
+ EXPECT_TRUE(heap.empty());
+}
+
+TEST_F(IntrusiveHeapTest, ReplaceMinCheckAllFinalPositions) {
+ HeapHandle index[100];
+
+ for (int j = -1; j <= 201; j += 2) {
+ IntrusiveHeap<TestElement> heap;
+ for (size_t i = 0; i < 100; i++) {
+ heap.insert({static_cast<int>(i) * 2, &index[i]});
+ }
+
+ heap.ReplaceMin({j, &index[40]});
+
+ int prev = -2;
+ while (!heap.empty()) {
+ DCHECK_GT(heap.Min().key, prev);
+ DCHECK(heap.Min().key == j || (heap.Min().key % 2) == 0);
+ DCHECK_NE(heap.Min().key, 0);
+ prev = heap.Min().key;
+ heap.Pop();
+ }
+ }
+}
+
+TEST_F(IntrusiveHeapTest, ChangeKeyUp) {
+ IntrusiveHeap<TestElement> heap;
+ HeapHandle index[10];
+
+ for (size_t i = 0; i < 10; i++) {
+ heap.insert({static_cast<int>(i) * 2, &index[i]});
+ }
+
+ heap.ChangeKey(index[5], {17, &index[5]});
+
+ std::vector<int> results;
+ while (!heap.empty()) {
+ results.push_back(heap.Min().key);
+ heap.Pop();
+ }
+
+ EXPECT_THAT(results, testing::ElementsAre(0, 2, 4, 6, 8, 12, 14, 16, 17, 18));
+}
+
+TEST_F(IntrusiveHeapTest, ChangeKeyUpButDoesntMove) {
+ IntrusiveHeap<TestElement> heap;
+ HeapHandle index[10];
+
+ for (size_t i = 0; i < 10; i++) {
+ heap.insert({static_cast<int>(i) * 2, &index[i]});
+ }
+
+ heap.ChangeKey(index[5], {11, &index[5]});
+
+ std::vector<int> results;
+ while (!heap.empty()) {
+ results.push_back(heap.Min().key);
+ heap.Pop();
+ }
+
+ EXPECT_THAT(results, testing::ElementsAre(0, 2, 4, 6, 8, 11, 12, 14, 16, 18));
+}
+
+TEST_F(IntrusiveHeapTest, ChangeKeyDown) {
+ IntrusiveHeap<TestElement> heap;
+ HeapHandle index[10];
+
+ for (size_t i = 0; i < 10; i++) {
+ heap.insert({static_cast<int>(i) * 2, &index[i]});
+ }
+
+ heap.ChangeKey(index[5], {1, &index[5]});
+
+ std::vector<int> results;
+ while (!heap.empty()) {
+ results.push_back(heap.Min().key);
+ heap.Pop();
+ }
+
+ EXPECT_THAT(results, testing::ElementsAre(0, 1, 2, 4, 6, 8, 12, 14, 16, 18));
+}
+
+TEST_F(IntrusiveHeapTest, ChangeKeyDownButDoesntMove) {
+ IntrusiveHeap<TestElement> heap;
+ HeapHandle index[10];
+
+ for (size_t i = 0; i < 10; i++) {
+ heap.insert({static_cast<int>(i) * 2, &index[i]});
+ }
+
+ heap.ChangeKey(index[5], {9, &index[5]});
+
+ std::vector<int> results;
+ while (!heap.empty()) {
+ results.push_back(heap.Min().key);
+ heap.Pop();
+ }
+
+ EXPECT_THAT(results, testing::ElementsAre(0, 2, 4, 6, 8, 9, 12, 14, 16, 18));
+}
+
+TEST_F(IntrusiveHeapTest, ChangeKeyCheckAllFinalPositions) {
+ HeapHandle index[100];
+
+ for (int j = -1; j <= 201; j += 2) {
+ IntrusiveHeap<TestElement> heap;
+ for (size_t i = 0; i < 100; i++) {
+ heap.insert({static_cast<int>(i) * 2, &index[i]});
+ }
+
+ heap.ChangeKey(index[40], {j, &index[40]});
+
+ int prev = -2;
+ while (!heap.empty()) {
+ DCHECK_GT(heap.Min().key, prev);
+ DCHECK(heap.Min().key == j || (heap.Min().key % 2) == 0);
+ DCHECK_NE(heap.Min().key, 80);
+ prev = heap.Min().key;
+ heap.Pop();
+ }
+ }
+}
+
+TEST_F(IntrusiveHeapTest, CompareNodes) {
+ TestElement five{5, nullptr}, six{6, nullptr};
+
+ // This is the stdlibc++ assertion that fails in http://crbug.com/661080
+ EXPECT_FALSE(IntrusiveHeapTest::CompareNodes(six, six));
+
+ EXPECT_FALSE(IntrusiveHeapTest::CompareNodes(five, six));
+ EXPECT_TRUE(IntrusiveHeapTest::CompareNodes(six, five));
+}
+
+} // namespace scheduler
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/platform/scheduler/base/lazy_now.cc b/chromium/third_party/blink/renderer/platform/scheduler/base/lazy_now.cc
new file mode 100644
index 00000000000..4ac96989417
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/scheduler/base/lazy_now.cc
@@ -0,0 +1,18 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "third_party/blink/renderer/platform/scheduler/base/lazy_now.h"
+
+#include "base/time/tick_clock.h"
+
+namespace blink {
+namespace scheduler {
+base::TimeTicks LazyNow::Now() {
+ if (!now_)
+ now_ = tick_clock_->NowTicks();
+ return now_.value();
+}
+
+} // namespace scheduler
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/platform/scheduler/base/lazy_now.h b/chromium/third_party/blink/renderer/platform/scheduler/base/lazy_now.h
new file mode 100644
index 00000000000..c841f20c78a
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/scheduler/base/lazy_now.h
@@ -0,0 +1,40 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_PLATFORM_SCHEDULER_BASE_LAZY_NOW_H_
+#define THIRD_PARTY_BLINK_RENDERER_PLATFORM_SCHEDULER_BASE_LAZY_NOW_H_
+
+#include "base/optional.h"
+#include "base/time/time.h"
+#include "third_party/blink/renderer/platform/platform_export.h"
+
+namespace base {
+class TickClock;
+}
+
+namespace blink {
+namespace scheduler {
+
+// Now() is somewhat expensive so it makes sense not to call Now() unless we
+// really need to.
+class PLATFORM_EXPORT LazyNow {
+ public:
+ explicit LazyNow(base::TimeTicks now) : tick_clock_(nullptr), now_(now) {
+ }
+
+ explicit LazyNow(const base::TickClock* tick_clock)
+ : tick_clock_(tick_clock) {}
+
+ // Result will not be updated on any subsesequent calls.
+ base::TimeTicks Now();
+
+ private:
+ const base::TickClock* tick_clock_; // NOT OWNED
+ base::Optional<base::TimeTicks> now_;
+};
+
+} // namespace scheduler
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_PLATFORM_SCHEDULER_BASE_LAZY_NOW_H_
diff --git a/chromium/third_party/blink/renderer/platform/scheduler/base/moveable_auto_lock.h b/chromium/third_party/blink/renderer/platform/scheduler/base/moveable_auto_lock.h
new file mode 100644
index 00000000000..60412fbcf25
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/scheduler/base/moveable_auto_lock.h
@@ -0,0 +1,41 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_PLATFORM_SCHEDULER_BASE_MOVEABLE_AUTO_LOCK_H_
+#define THIRD_PARTY_BLINK_RENDERER_PLATFORM_SCHEDULER_BASE_MOVEABLE_AUTO_LOCK_H_
+
+#include "base/synchronization/lock.h"
+
+namespace blink {
+namespace scheduler {
+
+class MoveableAutoLock {
+ public:
+ explicit MoveableAutoLock(base::Lock& lock) : lock_(lock), moved_(false) {
+ lock_.Acquire();
+ }
+
+ MoveableAutoLock(MoveableAutoLock&& other)
+ : lock_(other.lock_), moved_(other.moved_) {
+ lock_.AssertAcquired();
+ other.moved_ = true;
+ }
+
+ ~MoveableAutoLock() {
+ if (moved_)
+ return;
+ lock_.AssertAcquired();
+ lock_.Release();
+ }
+
+ private:
+ base::Lock& lock_;
+ bool moved_;
+ DISALLOW_COPY_AND_ASSIGN(MoveableAutoLock);
+};
+
+} // namespace scheduler
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_PLATFORM_SCHEDULER_BASE_MOVEABLE_AUTO_LOCK_H_
diff --git a/chromium/third_party/blink/renderer/platform/scheduler/base/real_time_domain.cc b/chromium/third_party/blink/renderer/platform/scheduler/base/real_time_domain.cc
new file mode 100644
index 00000000000..ccdce6fabf1
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/scheduler/base/real_time_domain.cc
@@ -0,0 +1,70 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "third_party/blink/renderer/platform/scheduler/base/real_time_domain.h"
+
+#include "base/bind.h"
+#include "third_party/blink/renderer/platform/scheduler/base/task_queue_impl.h"
+#include "third_party/blink/renderer/platform/scheduler/base/task_queue_manager_impl.h"
+
+namespace blink {
+namespace scheduler {
+
+RealTimeDomain::RealTimeDomain() : task_queue_manager_(nullptr) {}
+
+RealTimeDomain::~RealTimeDomain() = default;
+
+void RealTimeDomain::OnRegisterWithTaskQueueManager(
+ TaskQueueManagerImpl* task_queue_manager) {
+ task_queue_manager_ = task_queue_manager;
+ DCHECK(task_queue_manager_);
+}
+
+LazyNow RealTimeDomain::CreateLazyNow() const {
+ return task_queue_manager_->CreateLazyNow();
+}
+
+base::TimeTicks RealTimeDomain::Now() const {
+ return task_queue_manager_->NowTicks();
+}
+
+void RealTimeDomain::RequestWakeUpAt(base::TimeTicks now,
+ base::TimeTicks run_time) {
+ // NOTE this is only called if the scheduled runtime is sooner than any
+ // previously scheduled runtime, or there is no (outstanding) previously
+ // scheduled runtime.
+ task_queue_manager_->MaybeScheduleDelayedWork(FROM_HERE, this, now, run_time);
+}
+
+void RealTimeDomain::CancelWakeUpAt(base::TimeTicks run_time) {
+ task_queue_manager_->CancelDelayedWork(this, run_time);
+}
+
+base::Optional<base::TimeDelta> RealTimeDomain::DelayTillNextTask(
+ LazyNow* lazy_now) {
+ base::TimeTicks next_run_time;
+ if (!NextScheduledRunTime(&next_run_time))
+ return base::nullopt;
+
+ base::TimeTicks now = lazy_now->Now();
+ if (now >= next_run_time)
+ return base::TimeDelta(); // Makes DoWork post an immediate continuation.
+
+ base::TimeDelta delay = next_run_time - now;
+ TRACE_EVENT1("renderer.scheduler", "RealTimeDomain::DelayTillNextTask",
+ "delay_ms", delay.InMillisecondsF());
+
+ // The next task is sometime in the future. DoWork will make sure it gets
+ // run at the right time.
+ return delay;
+}
+
+void RealTimeDomain::AsValueIntoInternal(
+ base::trace_event::TracedValue* state) const {}
+
+const char* RealTimeDomain::GetName() const {
+ return "RealTimeDomain";
+}
+} // namespace scheduler
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/platform/scheduler/base/real_time_domain.h b/chromium/third_party/blink/renderer/platform/scheduler/base/real_time_domain.h
new file mode 100644
index 00000000000..57e079c2d71
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/scheduler/base/real_time_domain.h
@@ -0,0 +1,45 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_PLATFORM_SCHEDULER_BASE_REAL_TIME_DOMAIN_H_
+#define THIRD_PARTY_BLINK_RENDERER_PLATFORM_SCHEDULER_BASE_REAL_TIME_DOMAIN_H_
+
+#include <set>
+
+#include "base/macros.h"
+#include "third_party/blink/renderer/platform/platform_export.h"
+#include "third_party/blink/renderer/platform/scheduler/base/time_domain.h"
+
+namespace blink {
+namespace scheduler {
+
+class PLATFORM_EXPORT RealTimeDomain : public TimeDomain {
+ public:
+ RealTimeDomain();
+ ~RealTimeDomain() override;
+
+ // TimeDomain implementation:
+ LazyNow CreateLazyNow() const override;
+ base::TimeTicks Now() const override;
+ base::Optional<base::TimeDelta> DelayTillNextTask(LazyNow* lazy_now) override;
+ const char* GetName() const override;
+
+ protected:
+ void OnRegisterWithTaskQueueManager(
+ TaskQueueManagerImpl* task_queue_manager) override;
+ void RequestWakeUpAt(base::TimeTicks now, base::TimeTicks run_time) override;
+ void CancelWakeUpAt(base::TimeTicks run_time) override;
+ void AsValueIntoInternal(
+ base::trace_event::TracedValue* state) const override;
+
+ private:
+ TaskQueueManagerImpl* task_queue_manager_; // NOT OWNED
+
+ DISALLOW_COPY_AND_ASSIGN(RealTimeDomain);
+};
+
+} // namespace scheduler
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_PLATFORM_SCHEDULER_BASE_REAL_TIME_DOMAIN_H_
diff --git a/chromium/third_party/blink/renderer/platform/scheduler/base/sequenced_task_source.h b/chromium/third_party/blink/renderer/platform/scheduler/base/sequenced_task_source.h
new file mode 100644
index 00000000000..323e0868988
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/scheduler/base/sequenced_task_source.h
@@ -0,0 +1,40 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_PLATFORM_SCHEDULER_BASE_SEQUENCED_TASK_SOURCE_H_
+#define THIRD_PARTY_BLINK_RENDERER_PLATFORM_SCHEDULER_BASE_SEQUENCED_TASK_SOURCE_H_
+
+#include "base/optional.h"
+#include "base/pending_task.h"
+
+namespace blink {
+namespace scheduler {
+class LazyNow;
+
+namespace internal {
+
+// This is temporary interface for ThreadController to be able to run tasks
+// from TaskQueueManager.
+class SequencedTaskSource {
+ public:
+ // TODO(alexclarke): Move this enum elsewhere.
+ enum class WorkType { kImmediate, kDelayed };
+
+ // Take a next task to run from a sequence.
+ // TODO(altimin): Do not pass |work_type| here.
+ virtual base::Optional<base::PendingTask> TakeTask() = 0;
+
+ // Notify a sequence that a taken task has been completed.
+ virtual void DidRunTask() = 0;
+
+ // Returns the delay till the next task, or base::TimeDelta::Max() if there
+ // isn't one.
+ virtual base::TimeDelta DelayTillNextTask(LazyNow* lazy_now) = 0;
+};
+
+} // namespace internal
+} // namespace scheduler
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_PLATFORM_SCHEDULER_BASE_SEQUENCED_TASK_SOURCE_H_
diff --git a/chromium/third_party/blink/renderer/platform/scheduler/base/task_queue.cc b/chromium/third_party/blink/renderer/platform/scheduler/base/task_queue.cc
new file mode 100644
index 00000000000..d1dd378e3a8
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/scheduler/base/task_queue.cc
@@ -0,0 +1,279 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "third_party/blink/renderer/platform/scheduler/base/task_queue.h"
+
+#include "base/bind_helpers.h"
+#include "third_party/blink/renderer/platform/scheduler/base/task_queue_impl.h"
+#include "third_party/blink/renderer/platform/scheduler/base/task_queue_manager_impl.h"
+
+namespace blink {
+namespace scheduler {
+
+TaskQueue::TaskQueue(std::unique_ptr<internal::TaskQueueImpl> impl,
+ const TaskQueue::Spec& spec)
+ : impl_(std::move(impl)),
+ thread_id_(base::PlatformThread::CurrentId()),
+ task_queue_manager_(impl_ ? impl_->GetTaskQueueManagerWeakPtr()
+ : nullptr),
+ graceful_queue_shutdown_helper_(
+ impl_ ? impl_->GetGracefulQueueShutdownHelper() : nullptr) {}
+
+TaskQueue::~TaskQueue() {
+ // scoped_refptr guarantees us that this object isn't used.
+ if (!impl_)
+ return;
+ if (impl_->IsUnregistered())
+ return;
+ graceful_queue_shutdown_helper_->GracefullyShutdownTaskQueue(
+ TakeTaskQueueImpl());
+}
+
+TaskQueue::Task::Task(TaskQueue::PostedTask task,
+ base::TimeTicks desired_run_time)
+ : PendingTask(task.posted_from,
+ std::move(task.callback),
+ desired_run_time,
+ task.nestable),
+ task_type_(task.task_type) {}
+
+TaskQueue::PostedTask::PostedTask(base::OnceClosure callback,
+ base::Location posted_from,
+ base::TimeDelta delay,
+ base::Nestable nestable,
+ int task_type)
+ : callback(std::move(callback)),
+ posted_from(posted_from),
+ delay(delay),
+ nestable(nestable),
+ task_type(task_type) {}
+
+void TaskQueue::ShutdownTaskQueue() {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ base::AutoLock lock(impl_lock_);
+ if (!impl_)
+ return;
+ if (!task_queue_manager_) {
+ impl_.reset();
+ return;
+ }
+ impl_->SetBlameContext(nullptr);
+ impl_->SetOnTaskStartedHandler(
+ internal::TaskQueueImpl::OnTaskStartedHandler());
+ impl_->SetOnTaskCompletedHandler(
+ internal::TaskQueueImpl::OnTaskCompletedHandler());
+ task_queue_manager_->UnregisterTaskQueueImpl(TakeTaskQueueImpl());
+}
+
+bool TaskQueue::RunsTasksInCurrentSequence() const {
+ return IsOnMainThread();
+}
+
+bool TaskQueue::PostDelayedTask(const base::Location& from_here,
+ base::OnceClosure task,
+ base::TimeDelta delay) {
+ internal::TaskQueueImpl::PostTaskResult result;
+ {
+ auto lock = AcquireImplReadLockIfNeeded();
+ if (!impl_)
+ return false;
+ result = impl_->PostDelayedTask(PostedTask(
+ std::move(task), from_here, delay, base::Nestable::kNestable));
+ }
+ return result.success;
+}
+
+bool TaskQueue::PostNonNestableDelayedTask(const base::Location& from_here,
+ base::OnceClosure task,
+ base::TimeDelta delay) {
+ internal::TaskQueueImpl::PostTaskResult result;
+ {
+ auto lock = AcquireImplReadLockIfNeeded();
+ if (!impl_)
+ return false;
+ result = impl_->PostDelayedTask(PostedTask(
+ std::move(task), from_here, delay, base::Nestable::kNonNestable));
+ }
+ return result.success;
+}
+
+bool TaskQueue::PostTaskWithMetadata(PostedTask task) {
+ internal::TaskQueueImpl::PostTaskResult result;
+ {
+ auto lock = AcquireImplReadLockIfNeeded();
+ if (!impl_)
+ return false;
+ result = impl_->PostDelayedTask(std::move(task));
+ }
+ return result.success;
+}
+
+std::unique_ptr<TaskQueue::QueueEnabledVoter>
+TaskQueue::CreateQueueEnabledVoter() {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ if (!impl_)
+ return nullptr;
+ return impl_->CreateQueueEnabledVoter(this);
+}
+
+bool TaskQueue::IsQueueEnabled() const {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ if (!impl_)
+ return false;
+ return impl_->IsQueueEnabled();
+}
+
+bool TaskQueue::IsEmpty() const {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ if (!impl_)
+ return true;
+ return impl_->IsEmpty();
+}
+
+size_t TaskQueue::GetNumberOfPendingTasks() const {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ if (!impl_)
+ return 0;
+ return impl_->GetNumberOfPendingTasks();
+}
+
+bool TaskQueue::HasTaskToRunImmediately() const {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ if (!impl_)
+ return false;
+ return impl_->HasTaskToRunImmediately();
+}
+
+base::Optional<base::TimeTicks> TaskQueue::GetNextScheduledWakeUp() {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ if (!impl_)
+ return base::nullopt;
+ return impl_->GetNextScheduledWakeUp();
+}
+
+void TaskQueue::SetQueuePriority(TaskQueue::QueuePriority priority) {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ if (!impl_)
+ return;
+ impl_->SetQueuePriority(priority);
+}
+
+TaskQueue::QueuePriority TaskQueue::GetQueuePriority() const {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ if (!impl_)
+ return TaskQueue::QueuePriority::kLowPriority;
+ return impl_->GetQueuePriority();
+}
+
+void TaskQueue::AddTaskObserver(
+ base::MessageLoop::TaskObserver* task_observer) {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ if (!impl_)
+ return;
+ impl_->AddTaskObserver(task_observer);
+}
+
+void TaskQueue::RemoveTaskObserver(
+ base::MessageLoop::TaskObserver* task_observer) {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ if (!impl_)
+ return;
+ impl_->RemoveTaskObserver(task_observer);
+}
+
+void TaskQueue::SetTimeDomain(TimeDomain* time_domain) {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ if (!impl_)
+ return;
+ impl_->SetTimeDomain(time_domain);
+}
+
+TimeDomain* TaskQueue::GetTimeDomain() const {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ if (!impl_)
+ return nullptr;
+ return impl_->GetTimeDomain();
+}
+
+void TaskQueue::SetBlameContext(
+ base::trace_event::BlameContext* blame_context) {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ if (!impl_)
+ return;
+ impl_->SetBlameContext(blame_context);
+}
+
+void TaskQueue::InsertFence(InsertFencePosition position) {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ if (!impl_)
+ return;
+ impl_->InsertFence(position);
+}
+
+void TaskQueue::InsertFenceAt(base::TimeTicks time) {
+ impl_->InsertFenceAt(time);
+}
+
+void TaskQueue::RemoveFence() {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ if (!impl_)
+ return;
+ impl_->RemoveFence();
+}
+
+bool TaskQueue::HasActiveFence() {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ if (!impl_)
+ return false;
+ return impl_->HasActiveFence();
+}
+
+bool TaskQueue::BlockedByFence() const {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ if (!impl_)
+ return false;
+ return impl_->BlockedByFence();
+}
+
+const char* TaskQueue::GetName() const {
+ auto lock = AcquireImplReadLockIfNeeded();
+ if (!impl_)
+ return "";
+ return impl_->GetName();
+}
+
+void TaskQueue::SetObserver(Observer* observer) {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ if (!impl_)
+ return;
+ if (observer) {
+ // Observer is guaranteed to outlive TaskQueue and TaskQueueImpl lifecycle
+ // is controlled by |this|.
+ impl_->SetOnNextWakeUpChangedCallback(base::BindRepeating(
+ &TaskQueue::Observer::OnQueueNextWakeUpChanged,
+ base::Unretained(observer), base::Unretained(this)));
+ } else {
+ impl_->SetOnNextWakeUpChangedCallback(
+ base::RepeatingCallback<void(base::TimeTicks)>());
+ }
+}
+
+bool TaskQueue::IsOnMainThread() const {
+ return thread_id_ == base::PlatformThread::CurrentId();
+}
+
+base::Optional<MoveableAutoLock> TaskQueue::AcquireImplReadLockIfNeeded()
+ const {
+ if (IsOnMainThread())
+ return base::nullopt;
+ return MoveableAutoLock(impl_lock_);
+}
+
+std::unique_ptr<internal::TaskQueueImpl> TaskQueue::TakeTaskQueueImpl() {
+ DCHECK(impl_);
+ return std::move(impl_);
+}
+
+} // namespace scheduler
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/platform/scheduler/base/task_queue.h b/chromium/third_party/blink/renderer/platform/scheduler/base/task_queue.h
new file mode 100644
index 00000000000..031204f5581
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/scheduler/base/task_queue.h
@@ -0,0 +1,307 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_PLATFORM_SCHEDULER_BASE_TASK_QUEUE_H_
+#define THIRD_PARTY_BLINK_RENDERER_PLATFORM_SCHEDULER_BASE_TASK_QUEUE_H_
+
+#include "base/macros.h"
+#include "base/memory/weak_ptr.h"
+#include "base/message_loop/message_loop.h"
+#include "base/optional.h"
+#include "base/single_thread_task_runner.h"
+#include "base/synchronization/lock.h"
+#include "base/threading/platform_thread.h"
+#include "base/time/time.h"
+#include "third_party/blink/renderer/platform/platform_export.h"
+#include "third_party/blink/renderer/platform/scheduler/base/graceful_queue_shutdown_helper.h"
+#include "third_party/blink/renderer/platform/scheduler/base/moveable_auto_lock.h"
+
+namespace base {
+namespace trace_event {
+class BlameContext;
+}
+} // namespace base
+
+namespace blink {
+namespace scheduler {
+namespace task_queue_throttler_unittest {
+class TaskQueueThrottlerTest;
+}
+namespace internal {
+class TaskQueueImpl;
+}
+
+class TimeDomain;
+class TaskQueueManagerImpl;
+
+class PLATFORM_EXPORT TaskQueue : public base::SingleThreadTaskRunner {
+ public:
+ class PLATFORM_EXPORT Observer {
+ public:
+ virtual ~Observer() = default;
+
+ // Notify observer that the time at which this queue wants to run
+ // the next task has changed. |next_wakeup| can be in the past
+ // (e.g. base::TimeTicks() can be used to notify about immediate work).
+ // Can be called on any thread
+ // All methods but SetObserver, SetTimeDomain and GetTimeDomain can be
+ // called on |queue|.
+ //
+ // TODO(altimin): Make it base::Optional<base::TimeTicks> to tell
+ // observer about cancellations.
+ virtual void OnQueueNextWakeUpChanged(TaskQueue* queue,
+ base::TimeTicks next_wake_up) = 0;
+ };
+
+ // A wrapper around base::OnceClosure with additional metadata to be passed
+ // to PostTask and plumbed until PendingTask is created.
+ struct PLATFORM_EXPORT PostedTask {
+ PostedTask(base::OnceClosure callback,
+ base::Location posted_from,
+ base::TimeDelta delay = base::TimeDelta(),
+ base::Nestable nestable = base::Nestable::kNestable,
+ int task_type = 0);
+
+ base::OnceClosure callback;
+ base::Location posted_from;
+ base::TimeDelta delay;
+ base::Nestable nestable;
+ int task_type;
+ };
+
+ // Unregisters the task queue after which no tasks posted to it will run and
+ // the TaskQueueManagerImpl's reference to it will be released soon.
+ virtual void ShutdownTaskQueue();
+
+ enum QueuePriority {
+ // Queues with control priority will run before any other queue, and will
+ // explicitly starve other queues. Typically this should only be used for
+ // private queues which perform control operations.
+ kControlPriority,
+
+ // The selector will prioritize highest over high, normal and low; and
+ // high over normal and low; and normal over low. However it will ensure
+ // neither of the lower priority queues can be completely starved by higher
+ // priority tasks. All three of these queues will always take priority over
+ // and can starve the best effort queue.
+ kHighestPriority,
+
+ kHighPriority,
+
+ // Queues with normal priority are the default.
+ kNormalPriority,
+ kLowPriority,
+
+ // Queues with best effort priority will only be run if all other queues are
+ // empty. They can be starved by the other queues.
+ kBestEffortPriority,
+ // Must be the last entry.
+ kQueuePriorityCount,
+ kFirstQueuePriority = kControlPriority,
+ };
+
+ // Can be called on any thread.
+ static const char* PriorityToString(QueuePriority priority);
+
+ // Options for constructing a TaskQueue.
+ struct Spec {
+ explicit Spec(const char* name)
+ : name(name),
+ should_monitor_quiescence(false),
+ time_domain(nullptr),
+ should_notify_observers(true) {}
+
+ Spec SetShouldMonitorQuiescence(bool should_monitor) {
+ should_monitor_quiescence = should_monitor;
+ return *this;
+ }
+
+ Spec SetShouldNotifyObservers(bool run_observers) {
+ should_notify_observers = run_observers;
+ return *this;
+ }
+
+ Spec SetTimeDomain(TimeDomain* domain) {
+ time_domain = domain;
+ return *this;
+ }
+
+ const char* name;
+ bool should_monitor_quiescence;
+ TimeDomain* time_domain;
+ bool should_notify_observers;
+ };
+
+ // Interface to pass per-task metadata to RendererScheduler.
+ class PLATFORM_EXPORT Task : public base::PendingTask {
+ public:
+ Task(PostedTask posted_task, base::TimeTicks desired_run_time);
+
+ int task_type() const { return task_type_; }
+
+ private:
+ int task_type_;
+ };
+
+ // An interface that lets the owner vote on whether or not the associated
+ // TaskQueue should be enabled.
+ class QueueEnabledVoter {
+ public:
+ QueueEnabledVoter() = default;
+ virtual ~QueueEnabledVoter() = default;
+
+ // Votes to enable or disable the associated TaskQueue. The TaskQueue will
+ // only be enabled if all the voters agree it should be enabled, or if there
+ // are no voters.
+ // NOTE this must be called on the thread the associated TaskQueue was
+ // created on.
+ virtual void SetQueueEnabled(bool enabled) = 0;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(QueueEnabledVoter);
+ };
+
+ // Returns an interface that allows the caller to vote on whether or not this
+ // TaskQueue is enabled. The TaskQueue will be enabled if there are no voters
+ // or if all agree it should be enabled.
+ // NOTE this must be called on the thread this TaskQueue was created by.
+ std::unique_ptr<QueueEnabledVoter> CreateQueueEnabledVoter();
+
+ // NOTE this must be called on the thread this TaskQueue was created by.
+ bool IsQueueEnabled() const;
+
+ // Returns true if the queue is completely empty.
+ bool IsEmpty() const;
+
+ // Returns the number of pending tasks in the queue.
+ size_t GetNumberOfPendingTasks() const;
+
+ // Returns true if the queue has work that's ready to execute now.
+ // NOTE: this must be called on the thread this TaskQueue was created by.
+ bool HasTaskToRunImmediately() const;
+
+ // Returns requested run time of next scheduled wake-up for a delayed task
+ // which is not ready to run. If there are no such tasks or the queue is
+ // disabled (by a QueueEnabledVoter) it returns base::nullopt.
+ // NOTE: this must be called on the thread this TaskQueue was created by.
+ base::Optional<base::TimeTicks> GetNextScheduledWakeUp();
+
+ // Can be called on any thread.
+ virtual const char* GetName() const;
+
+ // Set the priority of the queue to |priority|. NOTE this must be called on
+ // the thread this TaskQueue was created by.
+ void SetQueuePriority(QueuePriority priority);
+
+ // Returns the current queue priority.
+ QueuePriority GetQueuePriority() const;
+
+ // These functions can only be called on the same thread that the task queue
+ // manager executes its tasks on.
+ void AddTaskObserver(base::MessageLoop::TaskObserver* task_observer);
+ void RemoveTaskObserver(base::MessageLoop::TaskObserver* task_observer);
+
+ // Set the blame context which is entered and left while executing tasks from
+ // this task queue. |blame_context| must be null or outlive this task queue.
+ // Must be called on the thread this TaskQueue was created by.
+ void SetBlameContext(base::trace_event::BlameContext* blame_context);
+
+ // Removes the task queue from the previous TimeDomain and adds it to
+ // |domain|. This is a moderately expensive operation.
+ void SetTimeDomain(TimeDomain* domain);
+
+ // Returns the queue's current TimeDomain. Can be called from any thread.
+ TimeDomain* GetTimeDomain() const;
+
+ enum class InsertFencePosition {
+ kNow, // Tasks posted on the queue up till this point further may run.
+ // All further tasks are blocked.
+ kBeginningOfTime, // No tasks posted on this queue may run.
+ };
+
+ // Inserts a barrier into the task queue which prevents tasks with an enqueue
+ // order greater than the fence from running until either the fence has been
+ // removed or a subsequent fence has unblocked some tasks within the queue.
+ // Note: delayed tasks get their enqueue order set once their delay has
+ // expired, and non-delayed tasks get their enqueue order set when posted.
+ //
+ // Fences come in three flavours:
+ // - Regular (InsertFence(NOW)) - all tasks posted after this moment
+ // are blocked.
+ // - Fully blocking (InsertFence(kBeginningOfTime)) - all tasks including
+ // already posted are blocked.
+ // - Delayed (InsertFenceAt(timestamp)) - blocks all tasks posted after given
+ // point in time (must be in the future).
+ //
+ // Only one fence can be scheduled at a time. Inserting a new fence
+ // will automatically remove the previous one, regardless of fence type.
+ void InsertFence(InsertFencePosition position);
+ void InsertFenceAt(base::TimeTicks time);
+
+ // Removes any previously added fence and unblocks execution of any tasks
+ // blocked by it.
+ void RemoveFence();
+
+ bool HasActiveFence();
+
+ // Returns true if the queue has a fence which is blocking execution of tasks.
+ bool BlockedByFence() const;
+
+ void SetObserver(Observer* observer);
+
+ // base::SingleThreadTaskRunner implementation
+ bool RunsTasksInCurrentSequence() const override;
+ bool PostDelayedTask(const base::Location& from_here,
+ base::OnceClosure task,
+ base::TimeDelta delay) override;
+ bool PostNonNestableDelayedTask(const base::Location& from_here,
+ base::OnceClosure task,
+ base::TimeDelta delay) override;
+
+ bool PostTaskWithMetadata(PostedTask task);
+
+ protected:
+ TaskQueue(std::unique_ptr<internal::TaskQueueImpl> impl,
+ const TaskQueue::Spec& spec);
+ ~TaskQueue() override;
+
+ internal::TaskQueueImpl* GetTaskQueueImpl() const { return impl_.get(); }
+
+ private:
+ friend class internal::TaskQueueImpl;
+ friend class TaskQueueManagerImpl;
+
+ friend class task_queue_throttler_unittest::TaskQueueThrottlerTest;
+
+ bool IsOnMainThread() const;
+
+ base::Optional<MoveableAutoLock> AcquireImplReadLockIfNeeded() const;
+
+ // Take |impl_| and untie it from the enclosing task queue.
+ std::unique_ptr<internal::TaskQueueImpl> TakeTaskQueueImpl();
+
+ // |impl_| can be written to on the main thread but can be read from
+ // any thread.
+ // |impl_lock_| must be acquired when writing to |impl_| or when accessing
+ // it from non-main thread. Reading from the main thread does not require
+ // a lock.
+ mutable base::Lock impl_lock_;
+ std::unique_ptr<internal::TaskQueueImpl> impl_;
+
+ const base::PlatformThreadId thread_id_;
+
+ const base::WeakPtr<TaskQueueManagerImpl> task_queue_manager_;
+
+ const scoped_refptr<internal::GracefulQueueShutdownHelper>
+ graceful_queue_shutdown_helper_;
+
+ THREAD_CHECKER(main_thread_checker_);
+
+ DISALLOW_COPY_AND_ASSIGN(TaskQueue);
+};
+
+} // namespace scheduler
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_PLATFORM_SCHEDULER_BASE_TASK_QUEUE_H_
diff --git a/chromium/third_party/blink/renderer/platform/scheduler/base/task_queue_impl.cc b/chromium/third_party/blink/renderer/platform/scheduler/base/task_queue_impl.cc
new file mode 100644
index 00000000000..d53960530ce
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/scheduler/base/task_queue_impl.cc
@@ -0,0 +1,1039 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "third_party/blink/renderer/platform/scheduler/base/task_queue_impl.h"
+
+#include <memory>
+#include <utility>
+
+#include "base/time/time.h"
+#include "base/trace_event/blame_context.h"
+#include "third_party/blink/renderer/platform/scheduler/base/task_queue_manager_impl.h"
+#include "third_party/blink/renderer/platform/scheduler/base/time_domain.h"
+#include "third_party/blink/renderer/platform/scheduler/base/work_queue.h"
+#include "third_party/blink/renderer/platform/scheduler/util/tracing_helper.h"
+
+namespace blink {
+namespace scheduler {
+
+// static
+const char* TaskQueue::PriorityToString(TaskQueue::QueuePriority priority) {
+ switch (priority) {
+ case kControlPriority:
+ return "control";
+ case kHighestPriority:
+ return "highest";
+ case kHighPriority:
+ return "high";
+ case kNormalPriority:
+ return "normal";
+ case kLowPriority:
+ return "low";
+ case kBestEffortPriority:
+ return "best_effort";
+ default:
+ NOTREACHED();
+ return nullptr;
+ }
+}
+
+namespace internal {
+
+TaskQueueImpl::TaskQueueImpl(TaskQueueManagerImpl* task_queue_manager,
+ TimeDomain* time_domain,
+ const TaskQueue::Spec& spec)
+ : name_(spec.name),
+ thread_id_(base::PlatformThread::CurrentId()),
+ any_thread_(task_queue_manager, time_domain),
+ main_thread_only_(task_queue_manager, this, time_domain),
+ should_monitor_quiescence_(spec.should_monitor_quiescence),
+ should_notify_observers_(spec.should_notify_observers) {
+ DCHECK(time_domain);
+ time_domain->RegisterQueue(this);
+}
+
+TaskQueueImpl::~TaskQueueImpl() {
+#if DCHECK_IS_ON()
+ base::AutoLock lock(any_thread_lock_);
+ // NOTE this check shouldn't fire because |TaskQueueManagerImpl::queues_|
+ // contains a strong reference to this TaskQueueImpl and the
+ // TaskQueueManagerImpl destructor calls UnregisterTaskQueue on all task
+ // queues.
+ DCHECK(!any_thread().task_queue_manager)
+ << "UnregisterTaskQueue must be called first!";
+#endif
+}
+
+TaskQueueImpl::PostTaskResult::PostTaskResult()
+ : task(base::OnceClosure(), base::Location()) {}
+
+TaskQueueImpl::PostTaskResult::PostTaskResult(bool success,
+ TaskQueue::PostedTask task)
+ : success(success), task(std::move(task)) {}
+
+TaskQueueImpl::PostTaskResult TaskQueueImpl::PostTaskResult::Success() {
+ return PostTaskResult(
+ true, TaskQueue::PostedTask(base::OnceClosure(), base::Location()));
+}
+
+TaskQueueImpl::PostTaskResult TaskQueueImpl::PostTaskResult::Fail(
+ TaskQueue::PostedTask task) {
+ return PostTaskResult(false, std::move(task));
+}
+
+TaskQueueImpl::Task::Task(TaskQueue::PostedTask task,
+ base::TimeTicks desired_run_time,
+ EnqueueOrder sequence_number)
+ : TaskQueue::Task(std::move(task), desired_run_time),
+#ifndef NDEBUG
+ enqueue_order_set_(false),
+#endif
+ enqueue_order_(0) {
+ sequence_num = sequence_number;
+}
+
+TaskQueueImpl::Task::Task(TaskQueue::PostedTask task,
+ base::TimeTicks desired_run_time,
+ EnqueueOrder sequence_number,
+ EnqueueOrder enqueue_order)
+ : TaskQueue::Task(std::move(task), desired_run_time),
+#ifndef NDEBUG
+ enqueue_order_set_(true),
+#endif
+ enqueue_order_(enqueue_order) {
+ sequence_num = sequence_number;
+}
+
+TaskQueueImpl::AnyThread::AnyThread(TaskQueueManagerImpl* task_queue_manager,
+ TimeDomain* time_domain)
+ : task_queue_manager(task_queue_manager), time_domain(time_domain) {}
+
+TaskQueueImpl::AnyThread::~AnyThread() = default;
+
+TaskQueueImpl::MainThreadOnly::MainThreadOnly(
+ TaskQueueManagerImpl* task_queue_manager,
+ TaskQueueImpl* task_queue,
+ TimeDomain* time_domain)
+ : task_queue_manager(task_queue_manager),
+ time_domain(time_domain),
+ delayed_work_queue(
+ new WorkQueue(task_queue, "delayed", WorkQueue::QueueType::kDelayed)),
+ immediate_work_queue(new WorkQueue(task_queue,
+ "immediate",
+ WorkQueue::QueueType::kImmediate)),
+ set_index(0),
+ is_enabled_refcount(0),
+ voter_refcount(0),
+ blame_context(nullptr),
+ current_fence(0),
+ is_enabled_for_test(true) {}
+
+TaskQueueImpl::MainThreadOnly::~MainThreadOnly() = default;
+
+void TaskQueueImpl::UnregisterTaskQueue() {
+ TaskDeque immediate_incoming_queue;
+
+ {
+ base::AutoLock lock(any_thread_lock_);
+ base::AutoLock immediate_incoming_queue_lock(
+ immediate_incoming_queue_lock_);
+
+ if (main_thread_only().time_domain)
+ main_thread_only().time_domain->UnregisterQueue(this);
+
+ if (!any_thread().task_queue_manager)
+ return;
+
+ main_thread_only().on_task_completed_handler = OnTaskCompletedHandler();
+ any_thread().time_domain = nullptr;
+ main_thread_only().time_domain = nullptr;
+
+ any_thread().task_queue_manager = nullptr;
+ main_thread_only().task_queue_manager = nullptr;
+ any_thread().on_next_wake_up_changed_callback =
+ OnNextWakeUpChangedCallback();
+ main_thread_only().on_next_wake_up_changed_callback =
+ OnNextWakeUpChangedCallback();
+ immediate_incoming_queue.Swap(immediate_incoming_queue_);
+ }
+
+ // It is possible for a task to hold a scoped_refptr to this, which
+ // will lead to TaskQueueImpl destructor being called when deleting a task.
+ // To avoid use-after-free, we need to clear all fields of a task queue
+ // before starting to delete the tasks.
+ // All work queues and priority queues containing tasks should be moved to
+ // local variables on stack (std::move for unique_ptrs and swap for queues)
+ // before clearing them and deleting tasks.
+
+ // Flush the queues outside of the lock because TSAN complains about a lock
+ // order inversion for tasks that are posted from within a lock, with a
+ // destructor that acquires the same lock.
+
+ std::priority_queue<Task> delayed_incoming_queue;
+ delayed_incoming_queue.swap(main_thread_only().delayed_incoming_queue);
+
+ std::unique_ptr<WorkQueue> immediate_work_queue =
+ std::move(main_thread_only().immediate_work_queue);
+ std::unique_ptr<WorkQueue> delayed_work_queue =
+ std::move(main_thread_only().delayed_work_queue);
+}
+
+const char* TaskQueueImpl::GetName() const {
+ return name_;
+}
+
+bool TaskQueueImpl::RunsTasksInCurrentSequence() const {
+ return base::PlatformThread::CurrentId() == thread_id_;
+}
+
+TaskQueueImpl::PostTaskResult TaskQueueImpl::PostDelayedTask(
+ TaskQueue::PostedTask task) {
+ if (task.delay.is_zero())
+ return PostImmediateTaskImpl(std::move(task));
+
+ return PostDelayedTaskImpl(std::move(task));
+}
+
+TaskQueueImpl::PostTaskResult TaskQueueImpl::PostImmediateTaskImpl(
+ TaskQueue::PostedTask task) {
+ // Use CHECK instead of DCHECK to crash earlier. See http://crbug.com/711167
+ // for details.
+ CHECK(task.callback);
+ base::AutoLock lock(any_thread_lock_);
+ if (!any_thread().task_queue_manager)
+ return PostTaskResult::Fail(std::move(task));
+
+ EnqueueOrder sequence_number =
+ any_thread().task_queue_manager->GetNextSequenceNumber();
+
+ PushOntoImmediateIncomingQueueLocked(Task(std::move(task),
+ any_thread().time_domain->Now(),
+ sequence_number, sequence_number));
+ return PostTaskResult::Success();
+}
+
+TaskQueueImpl::PostTaskResult TaskQueueImpl::PostDelayedTaskImpl(
+ TaskQueue::PostedTask task) {
+ // Use CHECK instead of DCHECK to crash earlier. See http://crbug.com/711167
+ // for details.
+ CHECK(task.callback);
+ DCHECK_GT(task.delay, base::TimeDelta());
+ if (base::PlatformThread::CurrentId() == thread_id_) {
+ // Lock-free fast path for delayed tasks posted from the main thread.
+ if (!main_thread_only().task_queue_manager)
+ return PostTaskResult::Fail(std::move(task));
+
+ EnqueueOrder sequence_number =
+ main_thread_only().task_queue_manager->GetNextSequenceNumber();
+
+ base::TimeTicks time_domain_now = main_thread_only().time_domain->Now();
+ base::TimeTicks time_domain_delayed_run_time = time_domain_now + task.delay;
+ PushOntoDelayedIncomingQueueFromMainThread(
+ Task(std::move(task), time_domain_delayed_run_time, sequence_number),
+ time_domain_now);
+ } else {
+ // NOTE posting a delayed task from a different thread is not expected to
+ // be common. This pathway is less optimal than perhaps it could be
+ // because it causes two main thread tasks to be run. Should this
+ // assumption prove to be false in future, we may need to revisit this.
+ base::AutoLock lock(any_thread_lock_);
+ if (!any_thread().task_queue_manager)
+ return PostTaskResult::Fail(std::move(task));
+
+ EnqueueOrder sequence_number =
+ any_thread().task_queue_manager->GetNextSequenceNumber();
+
+ base::TimeTicks time_domain_now = any_thread().time_domain->Now();
+ base::TimeTicks time_domain_delayed_run_time = time_domain_now + task.delay;
+ PushOntoDelayedIncomingQueueLocked(
+ Task(std::move(task), time_domain_delayed_run_time, sequence_number));
+ }
+ return PostTaskResult::Success();
+}
+
+void TaskQueueImpl::PushOntoDelayedIncomingQueueFromMainThread(
+ Task pending_task,
+ base::TimeTicks now) {
+ main_thread_only().task_queue_manager->DidQueueTask(pending_task);
+ main_thread_only().delayed_incoming_queue.push(std::move(pending_task));
+
+ LazyNow lazy_now = main_thread_only().time_domain->CreateLazyNow();
+ UpdateDelayedWakeUp(&lazy_now);
+
+ TraceQueueSize();
+}
+
+void TaskQueueImpl::PushOntoDelayedIncomingQueueLocked(Task pending_task) {
+ any_thread().task_queue_manager->DidQueueTask(pending_task);
+
+ int thread_hop_task_sequence_number =
+ any_thread().task_queue_manager->GetNextSequenceNumber();
+ // TODO(altimin): Add a copy method to Task to capture metadata here.
+ PushOntoImmediateIncomingQueueLocked(
+ Task(TaskQueue::PostedTask(
+ base::BindOnce(&TaskQueueImpl::ScheduleDelayedWorkTask,
+ base::Unretained(this), std::move(pending_task)),
+ FROM_HERE, base::TimeDelta(), base::Nestable::kNonNestable,
+ pending_task.task_type()),
+ base::TimeTicks(), thread_hop_task_sequence_number,
+ thread_hop_task_sequence_number));
+}
+
+void TaskQueueImpl::ScheduleDelayedWorkTask(Task pending_task) {
+ DCHECK(main_thread_checker_.CalledOnValidThread());
+ base::TimeTicks delayed_run_time = pending_task.delayed_run_time;
+ base::TimeTicks time_domain_now = main_thread_only().time_domain->Now();
+ if (delayed_run_time <= time_domain_now) {
+ // If |delayed_run_time| is in the past then push it onto the work queue
+ // immediately. To ensure the right task ordering we need to temporarily
+ // push it onto the |delayed_incoming_queue|.
+ delayed_run_time = time_domain_now;
+ pending_task.delayed_run_time = time_domain_now;
+ main_thread_only().delayed_incoming_queue.push(std::move(pending_task));
+ LazyNow lazy_now(time_domain_now);
+ WakeUpForDelayedWork(&lazy_now);
+ } else {
+ // If |delayed_run_time| is in the future we can queue it as normal.
+ PushOntoDelayedIncomingQueueFromMainThread(std::move(pending_task),
+ time_domain_now);
+ }
+ TraceQueueSize();
+}
+
+void TaskQueueImpl::PushOntoImmediateIncomingQueueLocked(Task task) {
+ // If the |immediate_incoming_queue| is empty we need a DoWork posted to make
+ // it run.
+ bool was_immediate_incoming_queue_empty;
+
+ EnqueueOrder sequence_number = task.sequence_num;
+ base::TimeTicks desired_run_time = task.delayed_run_time;
+
+ {
+ base::AutoLock lock(immediate_incoming_queue_lock_);
+ was_immediate_incoming_queue_empty = immediate_incoming_queue().empty();
+ immediate_incoming_queue().push_back(std::move(task));
+ any_thread().task_queue_manager->DidQueueTask(
+ immediate_incoming_queue().back());
+ }
+
+ if (was_immediate_incoming_queue_empty) {
+ // However there's no point posting a DoWork for a blocked queue. NB we can
+ // only tell if it's disabled from the main thread.
+ bool queue_is_blocked =
+ RunsTasksInCurrentSequence() &&
+ (!IsQueueEnabled() || main_thread_only().current_fence);
+ any_thread().task_queue_manager->OnQueueHasIncomingImmediateWork(
+ this, sequence_number, queue_is_blocked);
+ if (!any_thread().on_next_wake_up_changed_callback.is_null())
+ any_thread().on_next_wake_up_changed_callback.Run(desired_run_time);
+ }
+
+ TraceQueueSize();
+}
+
+void TaskQueueImpl::ReloadImmediateWorkQueueIfEmpty() {
+ if (!main_thread_only().immediate_work_queue->Empty())
+ return;
+
+ main_thread_only().immediate_work_queue->ReloadEmptyImmediateQueue();
+}
+
+TaskQueueImpl::TaskDeque TaskQueueImpl::TakeImmediateIncomingQueue() {
+ base::AutoLock immediate_incoming_queue_lock(immediate_incoming_queue_lock_);
+ TaskQueueImpl::TaskDeque queue;
+ queue.Swap(immediate_incoming_queue());
+
+ // Activate delayed fence if necessary. This is ideologically similar to
+ // ActivateDelayedFenceIfNeeded, but due to immediate tasks being posted
+ // from any thread we can't generate an enqueue order for the fence there,
+ // so we have to check all immediate tasks and use their enqueue order for
+ // a fence.
+ if (main_thread_only().delayed_fence) {
+ for (const Task& task : queue) {
+ if (task.delayed_run_time >= main_thread_only().delayed_fence.value()) {
+ main_thread_only().delayed_fence = base::nullopt;
+ DCHECK_EQ(main_thread_only().current_fence,
+ static_cast<EnqueueOrder>(EnqueueOrderValues::kNone));
+ main_thread_only().current_fence = task.enqueue_order();
+ // Do not trigger WorkQueueSets notification when taking incoming
+ // immediate queue.
+ main_thread_only().immediate_work_queue->InsertFenceSilently(
+ main_thread_only().current_fence);
+ main_thread_only().delayed_work_queue->InsertFenceSilently(
+ main_thread_only().current_fence);
+ break;
+ }
+ }
+ }
+
+ return queue;
+}
+
+bool TaskQueueImpl::IsEmpty() const {
+ if (!main_thread_only().delayed_work_queue->Empty() ||
+ !main_thread_only().delayed_incoming_queue.empty() ||
+ !main_thread_only().immediate_work_queue->Empty()) {
+ return false;
+ }
+
+ base::AutoLock lock(immediate_incoming_queue_lock_);
+ return immediate_incoming_queue().empty();
+}
+
+size_t TaskQueueImpl::GetNumberOfPendingTasks() const {
+ size_t task_count = 0;
+ task_count += main_thread_only().delayed_work_queue->Size();
+ task_count += main_thread_only().delayed_incoming_queue.size();
+ task_count += main_thread_only().immediate_work_queue->Size();
+
+ base::AutoLock lock(immediate_incoming_queue_lock_);
+ task_count += immediate_incoming_queue().size();
+ return task_count;
+}
+
+bool TaskQueueImpl::HasTaskToRunImmediately() const {
+ // Any work queue tasks count as immediate work.
+ if (!main_thread_only().delayed_work_queue->Empty() ||
+ !main_thread_only().immediate_work_queue->Empty()) {
+ return true;
+ }
+
+ // Tasks on |delayed_incoming_queue| that could run now, count as
+ // immediate work.
+ if (!main_thread_only().delayed_incoming_queue.empty() &&
+ main_thread_only().delayed_incoming_queue.top().delayed_run_time <=
+ main_thread_only().time_domain->CreateLazyNow().Now()) {
+ return true;
+ }
+
+ // Finally tasks on |immediate_incoming_queue| count as immediate work.
+ base::AutoLock lock(immediate_incoming_queue_lock_);
+ return !immediate_incoming_queue().empty();
+}
+
+base::Optional<TaskQueueImpl::DelayedWakeUp>
+TaskQueueImpl::GetNextScheduledWakeUpImpl() {
+ // Note we don't scheduled a wake-up for disabled queues.
+ if (main_thread_only().delayed_incoming_queue.empty() || !IsQueueEnabled())
+ return base::nullopt;
+
+ return main_thread_only().delayed_incoming_queue.top().delayed_wake_up();
+}
+
+base::Optional<base::TimeTicks> TaskQueueImpl::GetNextScheduledWakeUp() {
+ base::Optional<DelayedWakeUp> wake_up = GetNextScheduledWakeUpImpl();
+ if (!wake_up)
+ return base::nullopt;
+ return wake_up->time;
+}
+
+void TaskQueueImpl::WakeUpForDelayedWork(LazyNow* lazy_now) {
+ // Enqueue all delayed tasks that should be running now, skipping any that
+ // have been canceled.
+ while (!main_thread_only().delayed_incoming_queue.empty()) {
+ Task& task =
+ const_cast<Task&>(main_thread_only().delayed_incoming_queue.top());
+ if (!task.task || task.task.IsCancelled()) {
+ main_thread_only().delayed_incoming_queue.pop();
+ continue;
+ }
+ if (task.delayed_run_time > lazy_now->Now())
+ break;
+ ActivateDelayedFenceIfNeeded(task.delayed_run_time);
+ task.set_enqueue_order(
+ main_thread_only().task_queue_manager->GetNextSequenceNumber());
+ main_thread_only().delayed_work_queue->Push(std::move(task));
+ main_thread_only().delayed_incoming_queue.pop();
+
+ // Normally WakeUpForDelayedWork is called inside DoWork, but it also
+ // can be called elsewhere (e.g. tests and fast-path for posting
+ // delayed tasks). Ensure that there is a DoWork posting. No-op inside
+ // existing DoWork due to DoWork deduplication.
+ if (IsQueueEnabled() || !main_thread_only().current_fence) {
+ main_thread_only().task_queue_manager->MaybeScheduleImmediateWork(
+ FROM_HERE);
+ }
+ }
+
+ UpdateDelayedWakeUp(lazy_now);
+}
+
+void TaskQueueImpl::TraceQueueSize() const {
+ bool is_tracing;
+ TRACE_EVENT_CATEGORY_GROUP_ENABLED(
+ TRACE_DISABLED_BY_DEFAULT("renderer.scheduler"), &is_tracing);
+ if (!is_tracing)
+ return;
+
+ // It's only safe to access the work queues from the main thread.
+ // TODO(alexclarke): We should find another way of tracing this
+ if (base::PlatformThread::CurrentId() != thread_id_)
+ return;
+
+ base::AutoLock lock(immediate_incoming_queue_lock_);
+ TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("renderer.scheduler"), GetName(),
+ immediate_incoming_queue().size() +
+ main_thread_only().immediate_work_queue->Size() +
+ main_thread_only().delayed_work_queue->Size() +
+ main_thread_only().delayed_incoming_queue.size());
+}
+
+void TaskQueueImpl::SetQueuePriority(TaskQueue::QueuePriority priority) {
+ if (!main_thread_only().task_queue_manager || priority == GetQueuePriority())
+ return;
+ main_thread_only()
+ .task_queue_manager->main_thread_only()
+ .selector.SetQueuePriority(this, priority);
+}
+
+TaskQueue::QueuePriority TaskQueueImpl::GetQueuePriority() const {
+ size_t set_index = immediate_work_queue()->work_queue_set_index();
+ DCHECK_EQ(set_index, delayed_work_queue()->work_queue_set_index());
+ return static_cast<TaskQueue::QueuePriority>(set_index);
+}
+
+void TaskQueueImpl::AsValueInto(base::TimeTicks now,
+ base::trace_event::TracedValue* state) const {
+ base::AutoLock lock(any_thread_lock_);
+ base::AutoLock immediate_incoming_queue_lock(immediate_incoming_queue_lock_);
+ state->BeginDictionary();
+ state->SetString("name", GetName());
+ if (!main_thread_only().task_queue_manager) {
+ state->SetBoolean("unregistered", true);
+ state->EndDictionary();
+ return;
+ }
+ DCHECK(main_thread_only().time_domain);
+ DCHECK(main_thread_only().delayed_work_queue);
+ DCHECK(main_thread_only().immediate_work_queue);
+
+ state->SetString("task_queue_id", PointerToString(this));
+ state->SetBoolean("enabled", IsQueueEnabled());
+ state->SetString("time_domain_name",
+ main_thread_only().time_domain->GetName());
+ state->SetInteger("immediate_incoming_queue_size",
+ immediate_incoming_queue().size());
+ state->SetInteger("delayed_incoming_queue_size",
+ main_thread_only().delayed_incoming_queue.size());
+ state->SetInteger("immediate_work_queue_size",
+ main_thread_only().immediate_work_queue->Size());
+ state->SetInteger("delayed_work_queue_size",
+ main_thread_only().delayed_work_queue->Size());
+ if (!main_thread_only().delayed_incoming_queue.empty()) {
+ base::TimeDelta delay_to_next_task =
+ (main_thread_only().delayed_incoming_queue.top().delayed_run_time -
+ main_thread_only().time_domain->CreateLazyNow().Now());
+ state->SetDouble("delay_to_next_task_ms",
+ delay_to_next_task.InMillisecondsF());
+ }
+ if (main_thread_only().current_fence)
+ state->SetInteger("current_fence", main_thread_only().current_fence);
+ if (main_thread_only().delayed_fence) {
+ state->SetDouble(
+ "delayed_fence_seconds_from_now",
+ (main_thread_only().delayed_fence.value() - now).InSecondsF());
+ }
+ if (AreVerboseSnapshotsEnabled()) {
+ state->BeginArray("immediate_incoming_queue");
+ QueueAsValueInto(immediate_incoming_queue(), now, state);
+ state->EndArray();
+ state->BeginArray("delayed_work_queue");
+ main_thread_only().delayed_work_queue->AsValueInto(now, state);
+ state->EndArray();
+ state->BeginArray("immediate_work_queue");
+ main_thread_only().immediate_work_queue->AsValueInto(now, state);
+ state->EndArray();
+ state->BeginArray("delayed_incoming_queue");
+ QueueAsValueInto(main_thread_only().delayed_incoming_queue, now, state);
+ state->EndArray();
+ }
+ state->SetString("priority", TaskQueue::PriorityToString(GetQueuePriority()));
+ state->EndDictionary();
+}
+
+void TaskQueueImpl::AddTaskObserver(
+ base::MessageLoop::TaskObserver* task_observer) {
+ main_thread_only().task_observers.AddObserver(task_observer);
+}
+
+void TaskQueueImpl::RemoveTaskObserver(
+ base::MessageLoop::TaskObserver* task_observer) {
+ main_thread_only().task_observers.RemoveObserver(task_observer);
+}
+
+void TaskQueueImpl::NotifyWillProcessTask(
+ const base::PendingTask& pending_task) {
+ DCHECK(should_notify_observers_);
+ if (main_thread_only().blame_context)
+ main_thread_only().blame_context->Enter();
+ for (auto& observer : main_thread_only().task_observers)
+ observer.WillProcessTask(pending_task);
+}
+
+void TaskQueueImpl::NotifyDidProcessTask(
+ const base::PendingTask& pending_task) {
+ DCHECK(should_notify_observers_);
+ for (auto& observer : main_thread_only().task_observers)
+ observer.DidProcessTask(pending_task);
+ if (main_thread_only().blame_context)
+ main_thread_only().blame_context->Leave();
+}
+
+void TaskQueueImpl::SetTimeDomain(TimeDomain* time_domain) {
+ {
+ base::AutoLock lock(any_thread_lock_);
+ DCHECK(time_domain);
+ // NOTE this is similar to checking |any_thread().task_queue_manager| but
+ // the TaskQueueSelectorTests constructs TaskQueueImpl directly with a null
+ // task_queue_manager. Instead we check |any_thread().time_domain| which is
+ // another way of asserting that UnregisterTaskQueue has not been called.
+ DCHECK(any_thread().time_domain);
+ if (!any_thread().time_domain)
+ return;
+ DCHECK(main_thread_checker_.CalledOnValidThread());
+ if (time_domain == main_thread_only().time_domain)
+ return;
+
+ any_thread().time_domain = time_domain;
+ }
+
+ main_thread_only().time_domain->UnregisterQueue(this);
+ main_thread_only().time_domain = time_domain;
+ time_domain->RegisterQueue(this);
+
+ LazyNow lazy_now = time_domain->CreateLazyNow();
+ // Clear scheduled wake up to ensure that new notifications are issued
+ // correctly.
+ // TODO(altimin): Remove this when we won't have to support changing time
+ // domains.
+ main_thread_only().scheduled_wake_up = base::nullopt;
+ UpdateDelayedWakeUp(&lazy_now);
+}
+
+TimeDomain* TaskQueueImpl::GetTimeDomain() const {
+ if (base::PlatformThread::CurrentId() == thread_id_)
+ return main_thread_only().time_domain;
+
+ base::AutoLock lock(any_thread_lock_);
+ return any_thread().time_domain;
+}
+
+void TaskQueueImpl::SetBlameContext(
+ base::trace_event::BlameContext* blame_context) {
+ main_thread_only().blame_context = blame_context;
+}
+
+void TaskQueueImpl::InsertFence(TaskQueue::InsertFencePosition position) {
+ if (!main_thread_only().task_queue_manager)
+ return;
+
+ // Only one fence may be present at a time.
+ main_thread_only().delayed_fence = base::nullopt;
+
+ EnqueueOrder previous_fence = main_thread_only().current_fence;
+ EnqueueOrder current_fence =
+ position == TaskQueue::InsertFencePosition::kNow
+ ? main_thread_only().task_queue_manager->GetNextSequenceNumber()
+ : static_cast<EnqueueOrder>(EnqueueOrderValues::kBlockingFence);
+
+ // Tasks posted after this point will have a strictly higher enqueue order
+ // and will be blocked from running.
+ main_thread_only().current_fence = current_fence;
+ bool task_unblocked =
+ main_thread_only().immediate_work_queue->InsertFence(current_fence);
+ task_unblocked |=
+ main_thread_only().delayed_work_queue->InsertFence(current_fence);
+
+ if (!task_unblocked && previous_fence && previous_fence < current_fence) {
+ base::AutoLock lock(immediate_incoming_queue_lock_);
+ if (!immediate_incoming_queue().empty() &&
+ immediate_incoming_queue().front().enqueue_order() > previous_fence &&
+ immediate_incoming_queue().front().enqueue_order() < current_fence) {
+ task_unblocked = true;
+ }
+ }
+
+ if (IsQueueEnabled() && task_unblocked) {
+ main_thread_only().task_queue_manager->MaybeScheduleImmediateWork(
+ FROM_HERE);
+ }
+}
+
+void TaskQueueImpl::InsertFenceAt(base::TimeTicks time) {
+ // Task queue can have only one fence, delayed or not.
+ RemoveFence();
+ main_thread_only().delayed_fence = time;
+}
+
+void TaskQueueImpl::RemoveFence() {
+ if (!main_thread_only().task_queue_manager)
+ return;
+
+ EnqueueOrder previous_fence = main_thread_only().current_fence;
+ main_thread_only().current_fence = 0;
+ main_thread_only().delayed_fence = base::nullopt;
+
+ bool task_unblocked = main_thread_only().immediate_work_queue->RemoveFence();
+ task_unblocked |= main_thread_only().delayed_work_queue->RemoveFence();
+
+ if (!task_unblocked && previous_fence) {
+ base::AutoLock lock(immediate_incoming_queue_lock_);
+ if (!immediate_incoming_queue().empty() &&
+ immediate_incoming_queue().front().enqueue_order() > previous_fence) {
+ task_unblocked = true;
+ }
+ }
+
+ if (IsQueueEnabled() && task_unblocked) {
+ main_thread_only().task_queue_manager->MaybeScheduleImmediateWork(
+ FROM_HERE);
+ }
+}
+
+bool TaskQueueImpl::BlockedByFence() const {
+ if (!main_thread_only().current_fence)
+ return false;
+
+ if (!main_thread_only().immediate_work_queue->BlockedByFence() ||
+ !main_thread_only().delayed_work_queue->BlockedByFence()) {
+ return false;
+ }
+
+ base::AutoLock lock(immediate_incoming_queue_lock_);
+ if (immediate_incoming_queue().empty())
+ return true;
+
+ return immediate_incoming_queue().front().enqueue_order() >
+ main_thread_only().current_fence;
+}
+
+bool TaskQueueImpl::HasActiveFence() {
+ if (main_thread_only().delayed_fence &&
+ main_thread_only().time_domain->Now() >
+ main_thread_only().delayed_fence.value()) {
+ return true;
+ }
+ return !!main_thread_only().current_fence;
+}
+
+bool TaskQueueImpl::CouldTaskRun(EnqueueOrder enqueue_order) const {
+ if (!IsQueueEnabled())
+ return false;
+
+ if (!main_thread_only().current_fence)
+ return true;
+
+ return enqueue_order < main_thread_only().current_fence;
+}
+
+EnqueueOrder TaskQueueImpl::GetFenceForTest() const {
+ return main_thread_only().current_fence;
+}
+
+// static
+void TaskQueueImpl::QueueAsValueInto(const TaskDeque& queue,
+ base::TimeTicks now,
+ base::trace_event::TracedValue* state) {
+ for (const Task& task : queue) {
+ TaskAsValueInto(task, now, state);
+ }
+}
+
+// static
+void TaskQueueImpl::QueueAsValueInto(const std::priority_queue<Task>& queue,
+ base::TimeTicks now,
+ base::trace_event::TracedValue* state) {
+ // Remove const to search |queue| in the destructive manner. Restore the
+ // content from |visited| later.
+ std::priority_queue<Task>* mutable_queue =
+ const_cast<std::priority_queue<Task>*>(&queue);
+ std::priority_queue<Task> visited;
+ while (!mutable_queue->empty()) {
+ TaskAsValueInto(mutable_queue->top(), now, state);
+ visited.push(std::move(const_cast<Task&>(mutable_queue->top())));
+ mutable_queue->pop();
+ }
+ *mutable_queue = std::move(visited);
+}
+
+// static
+void TaskQueueImpl::TaskAsValueInto(const Task& task,
+ base::TimeTicks now,
+ base::trace_event::TracedValue* state) {
+ state->BeginDictionary();
+ state->SetString("posted_from", task.posted_from.ToString());
+#ifndef NDEBUG
+ if (task.enqueue_order_set())
+ state->SetInteger("enqueue_order", task.enqueue_order());
+#else
+ state->SetInteger("enqueue_order", task.enqueue_order());
+#endif
+ state->SetInteger("sequence_num", task.sequence_num);
+ state->SetBoolean("nestable", task.nestable == base::Nestable::kNestable);
+ state->SetBoolean("is_high_res", task.is_high_res);
+ state->SetBoolean("is_cancelled", task.task.IsCancelled());
+ state->SetDouble(
+ "delayed_run_time",
+ (task.delayed_run_time - base::TimeTicks()).InMillisecondsF());
+ state->SetDouble("delayed_run_time_milliseconds_from_now",
+ (task.delayed_run_time - now).InMillisecondsF());
+ state->EndDictionary();
+}
+
+TaskQueueImpl::QueueEnabledVoterImpl::QueueEnabledVoterImpl(
+ scoped_refptr<TaskQueue> task_queue)
+ : task_queue_(task_queue), enabled_(true) {}
+
+TaskQueueImpl::QueueEnabledVoterImpl::~QueueEnabledVoterImpl() {
+ if (task_queue_->GetTaskQueueImpl())
+ task_queue_->GetTaskQueueImpl()->RemoveQueueEnabledVoter(this);
+}
+
+void TaskQueueImpl::QueueEnabledVoterImpl::SetQueueEnabled(bool enabled) {
+ if (enabled_ == enabled)
+ return;
+
+ task_queue_->GetTaskQueueImpl()->OnQueueEnabledVoteChanged(enabled);
+ enabled_ = enabled;
+}
+
+void TaskQueueImpl::RemoveQueueEnabledVoter(
+ const QueueEnabledVoterImpl* voter) {
+ // Bail out if we're being called from TaskQueueImpl::UnregisterTaskQueue.
+ if (!main_thread_only().time_domain)
+ return;
+
+ bool was_enabled = IsQueueEnabled();
+ if (voter->enabled_) {
+ main_thread_only().is_enabled_refcount--;
+ DCHECK_GE(main_thread_only().is_enabled_refcount, 0);
+ }
+
+ main_thread_only().voter_refcount--;
+ DCHECK_GE(main_thread_only().voter_refcount, 0);
+
+ bool is_enabled = IsQueueEnabled();
+ if (was_enabled != is_enabled)
+ EnableOrDisableWithSelector(is_enabled);
+}
+
+bool TaskQueueImpl::IsQueueEnabled() const {
+ // By default is_enabled_refcount and voter_refcount both equal zero.
+ return (main_thread_only().is_enabled_refcount ==
+ main_thread_only().voter_refcount) &&
+ main_thread_only().is_enabled_for_test;
+}
+
+void TaskQueueImpl::OnQueueEnabledVoteChanged(bool enabled) {
+ bool was_enabled = IsQueueEnabled();
+ if (enabled) {
+ main_thread_only().is_enabled_refcount++;
+ DCHECK_LE(main_thread_only().is_enabled_refcount,
+ main_thread_only().voter_refcount);
+ } else {
+ main_thread_only().is_enabled_refcount--;
+ DCHECK_GE(main_thread_only().is_enabled_refcount, 0);
+ }
+
+ bool is_enabled = IsQueueEnabled();
+ if (was_enabled != is_enabled)
+ EnableOrDisableWithSelector(is_enabled);
+}
+
+void TaskQueueImpl::EnableOrDisableWithSelector(bool enable) {
+ if (!main_thread_only().task_queue_manager)
+ return;
+
+ LazyNow lazy_now = main_thread_only().time_domain->CreateLazyNow();
+ UpdateDelayedWakeUp(&lazy_now);
+
+ if (enable) {
+ if (HasPendingImmediateWork() &&
+ !main_thread_only().on_next_wake_up_changed_callback.is_null()) {
+ // Delayed work notification will be issued via time domain.
+ main_thread_only().on_next_wake_up_changed_callback.Run(
+ base::TimeTicks());
+ }
+
+ // Note the selector calls TaskQueueManager::OnTaskQueueEnabled which posts
+ // a DoWork if needed.
+ main_thread_only()
+ .task_queue_manager->main_thread_only()
+ .selector.EnableQueue(this);
+ } else {
+ main_thread_only()
+ .task_queue_manager->main_thread_only()
+ .selector.DisableQueue(this);
+ }
+}
+
+std::unique_ptr<TaskQueue::QueueEnabledVoter>
+TaskQueueImpl::CreateQueueEnabledVoter(scoped_refptr<TaskQueue> task_queue) {
+ DCHECK_EQ(task_queue->GetTaskQueueImpl(), this);
+ main_thread_only().voter_refcount++;
+ main_thread_only().is_enabled_refcount++;
+ return std::make_unique<QueueEnabledVoterImpl>(task_queue);
+}
+
+void TaskQueueImpl::SweepCanceledDelayedTasks(base::TimeTicks now) {
+ if (main_thread_only().delayed_incoming_queue.empty())
+ return;
+
+ // Remove canceled tasks.
+ std::priority_queue<Task> remaining_tasks;
+ while (!main_thread_only().delayed_incoming_queue.empty()) {
+ if (!main_thread_only().delayed_incoming_queue.top().task.IsCancelled()) {
+ remaining_tasks.push(std::move(
+ const_cast<Task&>(main_thread_only().delayed_incoming_queue.top())));
+ }
+ main_thread_only().delayed_incoming_queue.pop();
+ }
+
+ main_thread_only().delayed_incoming_queue = std::move(remaining_tasks);
+
+ LazyNow lazy_now(now);
+ UpdateDelayedWakeUp(&lazy_now);
+}
+
+void TaskQueueImpl::PushImmediateIncomingTaskForTest(
+ TaskQueueImpl::Task&& task) {
+ base::AutoLock lock(immediate_incoming_queue_lock_);
+ immediate_incoming_queue().push_back(std::move(task));
+}
+
+void TaskQueueImpl::RequeueDeferredNonNestableTask(
+ TaskQueueImpl::Task&& task,
+ SequencedTaskSource::WorkType work_type) {
+ DCHECK(task.nestable == base::Nestable::kNonNestable);
+ // The re-queued tasks have to be pushed onto the front because we'd otherwise
+ // violate the strict monotonically increasing enqueue order within the
+ // WorkQueue. We can't assign them a new enqueue order here because that will
+ // not behave correctly with fences and things will break (e.g Idle TQ).
+ if (work_type == SequencedTaskSource::WorkType::kDelayed) {
+ main_thread_only().delayed_work_queue->PushNonNestableTaskToFront(
+ std::move(task));
+ } else {
+ main_thread_only().immediate_work_queue->PushNonNestableTaskToFront(
+ std::move(task));
+ }
+}
+
+void TaskQueueImpl::SetOnNextWakeUpChangedCallback(
+ TaskQueueImpl::OnNextWakeUpChangedCallback callback) {
+#if DCHECK_IS_ON()
+ if (callback) {
+ DCHECK(main_thread_only().on_next_wake_up_changed_callback.is_null())
+ << "Can't assign two different observers to "
+ "blink::scheduler::TaskQueue";
+ }
+#endif
+ base::AutoLock lock(any_thread_lock_);
+ any_thread().on_next_wake_up_changed_callback = callback;
+ main_thread_only().on_next_wake_up_changed_callback = callback;
+}
+
+void TaskQueueImpl::UpdateDelayedWakeUp(LazyNow* lazy_now) {
+ return UpdateDelayedWakeUpImpl(lazy_now, GetNextScheduledWakeUpImpl());
+}
+
+void TaskQueueImpl::UpdateDelayedWakeUpImpl(
+ LazyNow* lazy_now,
+ base::Optional<TaskQueueImpl::DelayedWakeUp> wake_up) {
+ if (main_thread_only().scheduled_wake_up == wake_up)
+ return;
+ main_thread_only().scheduled_wake_up = wake_up;
+
+ if (wake_up &&
+ !main_thread_only().on_next_wake_up_changed_callback.is_null() &&
+ !HasPendingImmediateWork()) {
+ main_thread_only().on_next_wake_up_changed_callback.Run(wake_up->time);
+ }
+
+ main_thread_only().time_domain->ScheduleWakeUpForQueue(this, wake_up,
+ lazy_now);
+}
+
+void TaskQueueImpl::SetDelayedWakeUpForTesting(
+ base::Optional<TaskQueueImpl::DelayedWakeUp> wake_up) {
+ LazyNow lazy_now = main_thread_only().time_domain->CreateLazyNow();
+ UpdateDelayedWakeUpImpl(&lazy_now, wake_up);
+}
+
+bool TaskQueueImpl::HasPendingImmediateWork() {
+ // Any work queue tasks count as immediate work.
+ if (!main_thread_only().delayed_work_queue->Empty() ||
+ !main_thread_only().immediate_work_queue->Empty()) {
+ return true;
+ }
+
+ // Finally tasks on |immediate_incoming_queue| count as immediate work.
+ base::AutoLock lock(immediate_incoming_queue_lock_);
+ return !immediate_incoming_queue().empty();
+}
+
+void TaskQueueImpl::SetOnTaskStartedHandler(
+ TaskQueueImpl::OnTaskStartedHandler handler) {
+ main_thread_only().on_task_started_handler = std::move(handler);
+}
+
+void TaskQueueImpl::OnTaskStarted(const TaskQueue::Task& task,
+ base::TimeTicks start) {
+ if (!main_thread_only().on_task_started_handler.is_null())
+ main_thread_only().on_task_started_handler.Run(task, start);
+}
+
+void TaskQueueImpl::SetOnTaskCompletedHandler(
+ TaskQueueImpl::OnTaskCompletedHandler handler) {
+ main_thread_only().on_task_completed_handler = std::move(handler);
+}
+
+void TaskQueueImpl::OnTaskCompleted(
+ const TaskQueue::Task& task,
+ base::TimeTicks start,
+ base::TimeTicks end,
+ base::Optional<base::TimeDelta> thread_time) {
+ if (!main_thread_only().on_task_completed_handler.is_null()) {
+ main_thread_only().on_task_completed_handler.Run(task, start, end,
+ thread_time);
+ }
+}
+
+bool TaskQueueImpl::RequiresTaskTiming() const {
+ return !main_thread_only().on_task_started_handler.is_null() ||
+ !main_thread_only().on_task_completed_handler.is_null();
+}
+
+bool TaskQueueImpl::IsUnregistered() const {
+ base::AutoLock lock(any_thread_lock_);
+ return !any_thread().task_queue_manager;
+}
+
+base::WeakPtr<TaskQueueManagerImpl>
+TaskQueueImpl::GetTaskQueueManagerWeakPtr() {
+ return main_thread_only().task_queue_manager->GetWeakPtr();
+}
+
+scoped_refptr<GracefulQueueShutdownHelper>
+TaskQueueImpl::GetGracefulQueueShutdownHelper() {
+ return main_thread_only()
+ .task_queue_manager->GetGracefulQueueShutdownHelper();
+}
+
+void TaskQueueImpl::SetQueueEnabledForTest(bool enabled) {
+ main_thread_only().is_enabled_for_test = enabled;
+ EnableOrDisableWithSelector(IsQueueEnabled());
+}
+
+void TaskQueueImpl::ActivateDelayedFenceIfNeeded(base::TimeTicks now) {
+ if (!main_thread_only().delayed_fence)
+ return;
+ if (main_thread_only().delayed_fence.value() > now)
+ return;
+ InsertFence(TaskQueue::InsertFencePosition::kNow);
+ main_thread_only().delayed_fence = base::nullopt;
+}
+
+} // namespace internal
+} // namespace scheduler
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/platform/scheduler/base/task_queue_impl.h b/chromium/third_party/blink/renderer/platform/scheduler/base/task_queue_impl.h
new file mode 100644
index 00000000000..0e1a62c1a9d
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/scheduler/base/task_queue_impl.h
@@ -0,0 +1,451 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_PLATFORM_SCHEDULER_BASE_TASK_QUEUE_IMPL_H_
+#define THIRD_PARTY_BLINK_RENDERER_PLATFORM_SCHEDULER_BASE_TASK_QUEUE_IMPL_H_
+
+#include <stddef.h>
+
+#include <memory>
+#include <set>
+
+#include "base/callback.h"
+#include "base/macros.h"
+#include "base/memory/weak_ptr.h"
+#include "base/message_loop/message_loop.h"
+#include "base/pending_task.h"
+#include "base/threading/thread_checker.h"
+#include "base/trace_event/trace_event.h"
+#include "base/trace_event/trace_event_argument.h"
+#include "third_party/blink/renderer/platform/scheduler/base/enqueue_order.h"
+#include "third_party/blink/renderer/platform/scheduler/base/graceful_queue_shutdown_helper.h"
+#include "third_party/blink/renderer/platform/scheduler/base/intrusive_heap.h"
+#include "third_party/blink/renderer/platform/scheduler/base/sequenced_task_source.h"
+#include "third_party/blink/renderer/platform/scheduler/base/task_queue.h"
+#include "third_party/blink/renderer/platform/wtf/deque.h"
+
+namespace blink {
+namespace scheduler {
+class LazyNow;
+class TimeDomain;
+class TaskQueueManagerImpl;
+
+namespace internal {
+class WorkQueue;
+class WorkQueueSets;
+
+// TaskQueueImpl has four main queues:
+//
+// Immediate (non-delayed) tasks:
+// immediate_incoming_queue - PostTask enqueues tasks here
+// immediate_work_queue
+//
+// Delayed tasks
+// delayed_incoming_queue - PostDelayedTask enqueues tasks here
+// delayed_work_queue
+//
+// The immediate_incoming_queue can be accessed from any thread, the other
+// queues are main-thread only. To reduce the overhead of locking,
+// immediate_work_queue is swapped with immediate_incoming_queue when
+// immediate_work_queue becomes empty.
+//
+// Delayed tasks are initially posted to delayed_incoming_queue and a wake-up
+// is scheduled with the TimeDomain. When the delay has elapsed, the TimeDomain
+// calls UpdateDelayedWorkQueue and ready delayed tasks are moved into the
+// delayed_work_queue. Note the EnqueueOrder (used for ordering) for a delayed
+// task is not set until it's moved into the delayed_work_queue.
+//
+// TaskQueueImpl uses the WorkQueueSets and the TaskQueueSelector to implement
+// prioritization. Task selection is done by the TaskQueueSelector and when a
+// queue is selected, it round-robins between the immediate_work_queue and
+// delayed_work_queue. The reason for this is we want to make sure delayed
+// tasks (normally the most common type) don't starve out immediate work.
+class PLATFORM_EXPORT TaskQueueImpl {
+ public:
+ TaskQueueImpl(TaskQueueManagerImpl* task_queue_manager,
+ TimeDomain* time_domain,
+ const TaskQueue::Spec& spec);
+
+ ~TaskQueueImpl();
+
+ // Represents a time at which a task wants to run. Tasks scheduled for the
+ // same point in time will be ordered by their sequence numbers.
+ struct DelayedWakeUp {
+ base::TimeTicks time;
+ int sequence_num;
+
+ bool operator!=(const DelayedWakeUp& other) const {
+ return time != other.time || other.sequence_num != sequence_num;
+ }
+
+ bool operator==(const DelayedWakeUp& other) const {
+ return !(*this != other);
+ }
+
+ bool operator<=(const DelayedWakeUp& other) const {
+ if (time == other.time) {
+ // Debug gcc builds can compare an element against itself.
+ DCHECK(sequence_num != other.sequence_num || this == &other);
+ return (sequence_num - other.sequence_num) < 0;
+ }
+ return time < other.time;
+ }
+ };
+
+ class PLATFORM_EXPORT Task : public TaskQueue::Task {
+ public:
+ Task(TaskQueue::PostedTask task,
+ base::TimeTicks desired_run_time,
+ EnqueueOrder sequence_number);
+
+ Task(TaskQueue::PostedTask task,
+ base::TimeTicks desired_run_time,
+ EnqueueOrder sequence_number,
+ EnqueueOrder enqueue_order);
+
+ DelayedWakeUp delayed_wake_up() const {
+ return DelayedWakeUp{delayed_run_time, sequence_num};
+ }
+
+ EnqueueOrder enqueue_order() const {
+#ifndef NDEBUG
+ DCHECK(enqueue_order_set_);
+#endif
+ return enqueue_order_;
+ }
+
+ void set_enqueue_order(EnqueueOrder enqueue_order) {
+#ifndef NDEBUG
+ DCHECK(!enqueue_order_set_);
+ enqueue_order_set_ = true;
+#endif
+ enqueue_order_ = enqueue_order;
+ }
+
+#ifndef NDEBUG
+ bool enqueue_order_set() const { return enqueue_order_set_; }
+#endif
+
+ private:
+#ifndef NDEBUG
+ bool enqueue_order_set_;
+#endif
+ // Similar to sequence number, but ultimately the |enqueue_order_| is what
+ // the scheduler uses for task ordering. For immediate tasks |enqueue_order|
+ // is set when posted, but for delayed tasks it's not defined until they are
+ // enqueued on the |delayed_work_queue_|. This is because otherwise delayed
+ // tasks could run before an immediate task posted after the delayed task.
+ EnqueueOrder enqueue_order_;
+ };
+
+ // A result retuned by PostDelayedTask. When scheduler failed to post a task
+ // due to being shutdown a task is returned to be destroyed outside the lock.
+ struct PostTaskResult {
+ PostTaskResult();
+ PostTaskResult(bool success, TaskQueue::PostedTask task);
+
+ static PostTaskResult Success();
+ static PostTaskResult Fail(TaskQueue::PostedTask task);
+
+ bool success = false;
+ TaskQueue::PostedTask task;
+ };
+
+ using OnNextWakeUpChangedCallback =
+ base::RepeatingCallback<void(base::TimeTicks)>;
+ using OnTaskStartedHandler =
+ base::RepeatingCallback<void(const TaskQueue::Task&, base::TimeTicks)>;
+ using OnTaskCompletedHandler =
+ base::RepeatingCallback<void(const TaskQueue::Task&,
+ base::TimeTicks,
+ base::TimeTicks,
+ base::Optional<base::TimeDelta>)>;
+
+ // TaskQueue implementation.
+ const char* GetName() const;
+ bool RunsTasksInCurrentSequence() const;
+ PostTaskResult PostDelayedTask(TaskQueue::PostedTask task);
+ // Require a reference to enclosing task queue for lifetime control.
+ std::unique_ptr<TaskQueue::QueueEnabledVoter> CreateQueueEnabledVoter(
+ scoped_refptr<TaskQueue> owning_task_queue);
+ bool IsQueueEnabled() const;
+ bool IsEmpty() const;
+ size_t GetNumberOfPendingTasks() const;
+ bool HasTaskToRunImmediately() const;
+ base::Optional<base::TimeTicks> GetNextScheduledWakeUp();
+ base::Optional<DelayedWakeUp> GetNextScheduledWakeUpImpl();
+ void SetQueuePriority(TaskQueue::QueuePriority priority);
+ TaskQueue::QueuePriority GetQueuePriority() const;
+ void AddTaskObserver(base::MessageLoop::TaskObserver* task_observer);
+ void RemoveTaskObserver(base::MessageLoop::TaskObserver* task_observer);
+ void SetTimeDomain(TimeDomain* time_domain);
+ TimeDomain* GetTimeDomain() const;
+ void SetBlameContext(base::trace_event::BlameContext* blame_context);
+ void InsertFence(TaskQueue::InsertFencePosition position);
+ void InsertFenceAt(base::TimeTicks time);
+ void RemoveFence();
+ bool HasActiveFence();
+ bool BlockedByFence() const;
+ // Implementation of TaskQueue::SetObserver.
+ void SetOnNextWakeUpChangedCallback(OnNextWakeUpChangedCallback callback);
+
+ void UnregisterTaskQueue();
+
+ // Returns true if a (potentially hypothetical) task with the specified
+ // |enqueue_order| could run on the queue. Must be called from the main
+ // thread.
+ bool CouldTaskRun(EnqueueOrder enqueue_order) const;
+
+ // Must only be called from the thread this task queue was created on.
+ void ReloadImmediateWorkQueueIfEmpty();
+
+ void AsValueInto(base::TimeTicks now,
+ base::trace_event::TracedValue* state) const;
+
+ bool GetQuiescenceMonitored() const { return should_monitor_quiescence_; }
+ bool GetShouldNotifyObservers() const { return should_notify_observers_; }
+
+ void NotifyWillProcessTask(const base::PendingTask& pending_task);
+ void NotifyDidProcessTask(const base::PendingTask& pending_task);
+
+ // Check for available tasks in immediate work queues.
+ // Used to check if we need to generate notifications about delayed work.
+ bool HasPendingImmediateWork();
+
+ WorkQueue* delayed_work_queue() {
+ return main_thread_only().delayed_work_queue.get();
+ }
+
+ const WorkQueue* delayed_work_queue() const {
+ return main_thread_only().delayed_work_queue.get();
+ }
+
+ WorkQueue* immediate_work_queue() {
+ return main_thread_only().immediate_work_queue.get();
+ }
+
+ const WorkQueue* immediate_work_queue() const {
+ return main_thread_only().immediate_work_queue.get();
+ }
+
+ // Enqueues any delayed tasks which should be run now on the
+ // |delayed_work_queue|.
+ // Must be called from the main thread.
+ void WakeUpForDelayedWork(LazyNow* lazy_now);
+
+ HeapHandle heap_handle() const { return main_thread_only().heap_handle; }
+
+ void set_heap_handle(HeapHandle heap_handle) {
+ main_thread_only().heap_handle = heap_handle;
+ }
+
+ // Pushes |task| onto the front of the specified work queue. Caution must be
+ // taken with this API because you could easily starve out other work.
+ void RequeueDeferredNonNestableTask(TaskQueueImpl::Task&& task,
+ SequencedTaskSource::WorkType work_type);
+
+ void PushImmediateIncomingTaskForTest(TaskQueueImpl::Task&& task);
+ EnqueueOrder GetFenceForTest() const;
+
+ class QueueEnabledVoterImpl : public TaskQueue::QueueEnabledVoter {
+ public:
+ explicit QueueEnabledVoterImpl(scoped_refptr<TaskQueue> task_queue);
+ ~QueueEnabledVoterImpl() override;
+
+ // QueueEnabledVoter implementation.
+ void SetQueueEnabled(bool enabled) override;
+
+ TaskQueueImpl* GetTaskQueueForTest() const {
+ return task_queue_->GetTaskQueueImpl();
+ }
+
+ private:
+ friend class TaskQueueImpl;
+
+ scoped_refptr<TaskQueue> task_queue_;
+ bool enabled_;
+ };
+
+ // Iterates over |delayed_incoming_queue| removing canceled tasks.
+ void SweepCanceledDelayedTasks(base::TimeTicks now);
+
+ // Allows wrapping TaskQueue to set a handler to subscribe for notifications
+ // about started and completed tasks.
+ void SetOnTaskStartedHandler(OnTaskStartedHandler handler);
+ void OnTaskStarted(const TaskQueue::Task& task, base::TimeTicks start);
+ void SetOnTaskCompletedHandler(OnTaskCompletedHandler handler);
+ void OnTaskCompleted(const TaskQueue::Task& task,
+ base::TimeTicks start,
+ base::TimeTicks end,
+ base::Optional<base::TimeDelta> thread_time);
+ bool RequiresTaskTiming() const;
+
+ base::WeakPtr<TaskQueueManagerImpl> GetTaskQueueManagerWeakPtr();
+
+ scoped_refptr<GracefulQueueShutdownHelper> GetGracefulQueueShutdownHelper();
+
+ // Returns true if this queue is unregistered or task queue manager is deleted
+ // and this queue can be safely deleted on any thread.
+ bool IsUnregistered() const;
+
+ // Disables queue for testing purposes, when a QueueEnabledVoter can't be
+ // constructed due to not having TaskQueue.
+ void SetQueueEnabledForTest(bool enabled);
+
+ protected:
+ void SetDelayedWakeUpForTesting(base::Optional<DelayedWakeUp> wake_up);
+
+ private:
+ friend class WorkQueue;
+ friend class WorkQueueTest;
+
+ struct AnyThread {
+ AnyThread(TaskQueueManagerImpl* task_queue_manager,
+ TimeDomain* time_domain);
+ ~AnyThread();
+
+ // TaskQueueManagerImpl, TimeDomain and Observer are maintained in two
+ // copies: inside AnyThread and inside MainThreadOnly. They can be changed
+ // only from main thread, so it should be locked before accessing from other
+ // threads.
+ TaskQueueManagerImpl* task_queue_manager;
+ TimeDomain* time_domain;
+ // Callback corresponding to TaskQueue::Observer::OnQueueNextChanged.
+ OnNextWakeUpChangedCallback on_next_wake_up_changed_callback;
+ };
+
+ struct MainThreadOnly {
+ MainThreadOnly(TaskQueueManagerImpl* task_queue_manager,
+ TaskQueueImpl* task_queue,
+ TimeDomain* time_domain);
+ ~MainThreadOnly();
+
+ // Another copy of TaskQueueManagerImpl, TimeDomain and Observer
+ // for lock-free access from the main thread.
+ // See description inside struct AnyThread for details.
+ TaskQueueManagerImpl* task_queue_manager;
+ TimeDomain* time_domain;
+ // Callback corresponding to TaskQueue::Observer::OnQueueNextChanged.
+ OnNextWakeUpChangedCallback on_next_wake_up_changed_callback;
+
+ std::unique_ptr<WorkQueue> delayed_work_queue;
+ std::unique_ptr<WorkQueue> immediate_work_queue;
+ std::priority_queue<Task> delayed_incoming_queue;
+ base::ObserverList<base::MessageLoop::TaskObserver> task_observers;
+ size_t set_index;
+ HeapHandle heap_handle;
+ int is_enabled_refcount;
+ int voter_refcount;
+ base::trace_event::BlameContext* blame_context; // Not owned.
+ EnqueueOrder current_fence;
+ base::Optional<base::TimeTicks> delayed_fence;
+ OnTaskStartedHandler on_task_started_handler;
+ OnTaskCompletedHandler on_task_completed_handler;
+ // Last reported wake up, used only in UpdateWakeUp to avoid
+ // excessive calls.
+ base::Optional<DelayedWakeUp> scheduled_wake_up;
+ // If false, queue will be disabled. Used only for tests.
+ bool is_enabled_for_test;
+ };
+
+ PostTaskResult PostImmediateTaskImpl(TaskQueue::PostedTask task);
+ PostTaskResult PostDelayedTaskImpl(TaskQueue::PostedTask task);
+
+ // Push the task onto the |delayed_incoming_queue|. Lock-free main thread
+ // only fast path.
+ void PushOntoDelayedIncomingQueueFromMainThread(Task pending_task,
+ base::TimeTicks now);
+
+ // Push the task onto the |delayed_incoming_queue|. Slow path from other
+ // threads.
+ void PushOntoDelayedIncomingQueueLocked(Task pending_task);
+
+ void ScheduleDelayedWorkTask(Task pending_task);
+
+ void MoveReadyImmediateTasksToImmediateWorkQueueLocked();
+
+ // Push the task onto the |immediate_incoming_queue| and for auto pumped
+ // queues it calls MaybePostDoWorkOnMainRunner if the Incoming queue was
+ // empty.
+ void PushOntoImmediateIncomingQueueLocked(Task task);
+
+ // We reserve an inline capacity of 8 tasks to try and reduce the load on
+ // PartitionAlloc.
+ using TaskDeque = WTF::Deque<Task, 8>;
+
+ // Extracts all the tasks from the immediate incoming queue and clears it.
+ // Can be called from any thread.
+ TaskDeque TakeImmediateIncomingQueue();
+
+ void TraceQueueSize() const;
+ static void QueueAsValueInto(const TaskDeque& queue,
+ base::TimeTicks now,
+ base::trace_event::TracedValue* state);
+ static void QueueAsValueInto(const std::priority_queue<Task>& queue,
+ base::TimeTicks now,
+ base::trace_event::TracedValue* state);
+ static void TaskAsValueInto(const Task& task,
+ base::TimeTicks now,
+ base::trace_event::TracedValue* state);
+
+ void RemoveQueueEnabledVoter(const QueueEnabledVoterImpl* voter);
+ void OnQueueEnabledVoteChanged(bool enabled);
+ void EnableOrDisableWithSelector(bool enable);
+
+ // Schedules delayed work on time domain and calls the observer.
+ void UpdateDelayedWakeUp(LazyNow* lazy_now);
+ void UpdateDelayedWakeUpImpl(LazyNow* lazy_now,
+ base::Optional<DelayedWakeUp> wake_up);
+
+ // Activate a delayed fence if a time has come.
+ void ActivateDelayedFenceIfNeeded(base::TimeTicks now);
+
+ const char* name_;
+
+ const base::PlatformThreadId thread_id_;
+
+ mutable base::Lock any_thread_lock_;
+ AnyThread any_thread_;
+ struct AnyThread& any_thread() {
+ any_thread_lock_.AssertAcquired();
+ return any_thread_;
+ }
+ const struct AnyThread& any_thread() const {
+ any_thread_lock_.AssertAcquired();
+ return any_thread_;
+ }
+
+ base::ThreadChecker main_thread_checker_;
+ MainThreadOnly main_thread_only_;
+ MainThreadOnly& main_thread_only() {
+ DCHECK(main_thread_checker_.CalledOnValidThread());
+ return main_thread_only_;
+ }
+ const MainThreadOnly& main_thread_only() const {
+ DCHECK(main_thread_checker_.CalledOnValidThread());
+ return main_thread_only_;
+ }
+
+ mutable base::Lock immediate_incoming_queue_lock_;
+ TaskDeque immediate_incoming_queue_;
+ TaskDeque& immediate_incoming_queue() {
+ immediate_incoming_queue_lock_.AssertAcquired();
+ return immediate_incoming_queue_;
+ }
+ const TaskDeque& immediate_incoming_queue() const {
+ immediate_incoming_queue_lock_.AssertAcquired();
+ return immediate_incoming_queue_;
+ }
+
+ const bool should_monitor_quiescence_;
+ const bool should_notify_observers_;
+
+ DISALLOW_COPY_AND_ASSIGN(TaskQueueImpl);
+};
+
+} // namespace internal
+} // namespace scheduler
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_PLATFORM_SCHEDULER_BASE_TASK_QUEUE_IMPL_H_
diff --git a/chromium/third_party/blink/renderer/platform/scheduler/base/task_queue_manager.h b/chromium/third_party/blink/renderer/platform/scheduler/base/task_queue_manager.h
new file mode 100644
index 00000000000..77fecacb9b4
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/scheduler/base/task_queue_manager.h
@@ -0,0 +1,103 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_PLATFORM_SCHEDULER_BASE_TASK_QUEUE_MANAGER_H_
+#define THIRD_PARTY_BLINK_RENDERER_PLATFORM_SCHEDULER_BASE_TASK_QUEUE_MANAGER_H_
+
+#include "base/message_loop/message_loop.h"
+#include "base/single_thread_task_runner.h"
+#include "third_party/blink/renderer/platform/platform_export.h"
+#include "third_party/blink/renderer/platform/scheduler/base/real_time_domain.h"
+#include "third_party/blink/renderer/platform/scheduler/base/task_queue_impl.h"
+#include "third_party/blink/renderer/platform/scheduler/base/task_time_observer.h"
+#include "third_party/blink/renderer/platform/scheduler/base/time_domain.h"
+
+namespace blink {
+namespace scheduler {
+
+class PLATFORM_EXPORT TaskQueueManager {
+ public:
+ // Keep TaskQueueManagerImpl in sync with this interface.
+ // The general rule is not to expose methods only used in scheduler/base.
+ // Try to keep interface as lean as possible.
+
+ // Observer class. Always called back on the main thread.
+ class PLATFORM_EXPORT Observer {
+ public:
+ virtual ~Observer() {}
+ virtual void OnBeginNestedRunLoop() = 0;
+ virtual void OnExitNestedRunLoop() = 0;
+ };
+
+ virtual ~TaskQueueManager() = default;
+
+ // Forwards to TaskQueueManagerImpl::TakeOverCurrentThread.
+ // TODO(kraynov): Any way to make it truly agnostic of TaskQueueManagerImpl?
+ static std::unique_ptr<TaskQueueManager> TakeOverCurrentThread();
+
+ // Should be called once, on main thread only.
+ // If |null| is passed, no callbacks will occur.
+ // Note: |observer| is expected to outlive the SchedulerHelper.
+ // TODO(kraynov): Review these lifetime assumptions.
+ virtual void SetObserver(Observer* observer) = 0;
+
+ // These functions can only be called on the same thread that the task queue
+ // manager executes its tasks on.
+ virtual void AddTaskObserver(
+ base::MessageLoop::TaskObserver* task_observer) = 0;
+ virtual void RemoveTaskObserver(
+ base::MessageLoop::TaskObserver* task_observer) = 0;
+ virtual void AddTaskTimeObserver(TaskTimeObserver* task_time_observer) = 0;
+ virtual void RemoveTaskTimeObserver(TaskTimeObserver* task_time_observer) = 0;
+
+ // Time domains must be registered for the task queues to get updated.
+ virtual void RegisterTimeDomain(TimeDomain* time_domain) = 0;
+ virtual void UnregisterTimeDomain(TimeDomain* time_domain) = 0;
+ virtual RealTimeDomain* GetRealTimeDomain() const = 0;
+
+ virtual const base::TickClock* GetClock() const = 0;
+ virtual base::TimeTicks NowTicks() const = 0;
+
+ // Sets the SingleThreadTaskRunner that will be returned by
+ // ThreadTaskRunnerHandle::Get and MessageLoop::current().task_runner() on the
+ // thread associated with this TaskQueueManager.
+ virtual void SetDefaultTaskRunner(
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner) = 0;
+
+ // Removes all canceled delayed tasks.
+ virtual void SweepCanceledDelayedTasks() = 0;
+
+ // Returns true if any task from a monitored task queue was was run since the
+ // last call to GetAndClearSystemIsQuiescentBit.
+ virtual bool GetAndClearSystemIsQuiescentBit() = 0;
+
+ // Set the number of tasks executed in a single invocation of the task queue
+ // manager. Increasing the batch size can reduce the overhead of yielding
+ // back to the main message loop -- at the cost of potentially delaying other
+ // tasks posted to the main loop. The batch size is 1 by default.
+ virtual void SetWorkBatchSize(int work_batch_size) = 0;
+
+ virtual void EnableCrashKeys(const char* file_name_crash_key,
+ const char* function_name_crash_key) = 0;
+
+ // Creates a task queue with the given type, |spec| and args. Must be called
+ // on the thread this class was created on.
+ // TODO(altimin): TaskQueueManager should not create TaskQueues.
+ template <typename TaskQueueType, typename... Args>
+ scoped_refptr<TaskQueueType> CreateTaskQueue(const TaskQueue::Spec& spec,
+ Args&&... args) {
+ scoped_refptr<TaskQueueType> task_queue(new TaskQueueType(
+ CreateTaskQueueImpl(spec), spec, std::forward<Args>(args)...));
+ return task_queue;
+ }
+
+ protected:
+ virtual std::unique_ptr<internal::TaskQueueImpl> CreateTaskQueueImpl(
+ const TaskQueue::Spec& spec) = 0;
+};
+
+} // namespace scheduler
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_PLATFORM_SCHEDULER_BASE_TASK_QUEUE_MANAGER_H_
diff --git a/chromium/third_party/blink/renderer/platform/scheduler/base/task_queue_manager_delegate_for_test.cc b/chromium/third_party/blink/renderer/platform/scheduler/base/task_queue_manager_delegate_for_test.cc
new file mode 100644
index 00000000000..e24c8827fdb
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/scheduler/base/task_queue_manager_delegate_for_test.cc
@@ -0,0 +1,65 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "third_party/blink/renderer/platform/scheduler/base/task_queue_manager_delegate_for_test.h"
+
+#include <utility>
+
+#include "base/bind.h"
+#include "base/bind_helpers.h"
+
+namespace blink {
+namespace scheduler {
+
+// static
+scoped_refptr<TaskQueueManagerDelegateForTest>
+TaskQueueManagerDelegateForTest::Create(
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner,
+ const base::TickClock* time_source) {
+ return base::WrapRefCounted(
+ new TaskQueueManagerDelegateForTest(task_runner, time_source));
+}
+
+TaskQueueManagerDelegateForTest::TaskQueueManagerDelegateForTest(
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner,
+ const base::TickClock* time_source)
+ : task_runner_(task_runner), time_source_(time_source) {}
+
+TaskQueueManagerDelegateForTest::~TaskQueueManagerDelegateForTest() {}
+
+bool TaskQueueManagerDelegateForTest::PostDelayedTask(
+ const base::Location& from_here,
+ base::OnceClosure task,
+ base::TimeDelta delay) {
+ return task_runner_->PostDelayedTask(from_here, std::move(task), delay);
+}
+
+bool TaskQueueManagerDelegateForTest::PostNonNestableDelayedTask(
+ const base::Location& from_here,
+ base::OnceClosure task,
+ base::TimeDelta delay) {
+ return task_runner_->PostNonNestableDelayedTask(from_here, std::move(task),
+ delay);
+}
+
+bool TaskQueueManagerDelegateForTest::RunsTasksInCurrentSequence() const {
+ return task_runner_->RunsTasksInCurrentSequence();
+}
+
+bool TaskQueueManagerDelegateForTest::IsNested() const {
+ return false;
+}
+
+void TaskQueueManagerDelegateForTest::AddNestingObserver(
+ base::RunLoop::NestingObserver* observer) {}
+
+void TaskQueueManagerDelegateForTest::RemoveNestingObserver(
+ base::RunLoop::NestingObserver* observer) {}
+
+base::TimeTicks TaskQueueManagerDelegateForTest::NowTicks() const {
+ return time_source_->NowTicks();
+}
+
+} // namespace scheduler
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/platform/scheduler/base/task_queue_manager_delegate_for_test.h b/chromium/third_party/blink/renderer/platform/scheduler/base/task_queue_manager_delegate_for_test.h
new file mode 100644
index 00000000000..d448f792c3e
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/scheduler/base/task_queue_manager_delegate_for_test.h
@@ -0,0 +1,59 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_PLATFORM_SCHEDULER_BASE_TASK_QUEUE_MANAGER_DELEGATE_FOR_TEST_H_
+#define THIRD_PARTY_BLINK_RENDERER_PLATFORM_SCHEDULER_BASE_TASK_QUEUE_MANAGER_DELEGATE_FOR_TEST_H_
+
+#include <memory>
+
+#include "base/callback.h"
+#include "base/macros.h"
+#include "base/run_loop.h"
+#include "base/single_thread_task_runner.h"
+#include "base/time/tick_clock.h"
+#include "third_party/blink/renderer/platform/scheduler/base/task_queue_manager_delegate.h"
+
+namespace blink {
+namespace scheduler {
+
+class TaskQueueManagerDelegateForTest : public TaskQueueManagerDelegate {
+ public:
+ static scoped_refptr<TaskQueueManagerDelegateForTest> Create(
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner,
+ const base::TickClock* time_source);
+
+ // SingleThreadTaskRunner:
+ bool PostDelayedTask(const base::Location& from_here,
+ base::OnceClosure task,
+ base::TimeDelta delay) override;
+ bool PostNonNestableDelayedTask(const base::Location& from_here,
+ base::OnceClosure task,
+ base::TimeDelta delay) override;
+ bool RunsTasksInCurrentSequence() const override;
+
+ // TaskQueueManagerDelegate:
+ bool IsNested() const override;
+ void AddNestingObserver(base::RunLoop::NestingObserver* observer) override;
+ void RemoveNestingObserver(base::RunLoop::NestingObserver* observer) override;
+
+ // TickClock:
+ base::TimeTicks NowTicks() const override;
+
+ protected:
+ ~TaskQueueManagerDelegateForTest() override;
+ TaskQueueManagerDelegateForTest(
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner,
+ const base::TickClock* time_source);
+
+ private:
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
+ const base::TickClock* time_source_;
+
+ DISALLOW_COPY_AND_ASSIGN(TaskQueueManagerDelegateForTest);
+};
+
+} // namespace scheduler
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_PLATFORM_SCHEDULER_BASE_TASK_QUEUE_MANAGER_DELEGATE_FOR_TEST_H_
diff --git a/chromium/third_party/blink/renderer/platform/scheduler/base/task_queue_manager_impl.cc b/chromium/third_party/blink/renderer/platform/scheduler/base/task_queue_manager_impl.cc
new file mode 100644
index 00000000000..5c7b2968477
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/scheduler/base/task_queue_manager_impl.cc
@@ -0,0 +1,669 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "third_party/blink/renderer/platform/scheduler/base/task_queue_manager_impl.h"
+
+#include <memory>
+#include <queue>
+#include <set>
+
+#include "base/bind.h"
+#include "base/bit_cast.h"
+#include "base/compiler_specific.h"
+#include "base/debug/crash_logging.h"
+#include "base/rand_util.h"
+#include "base/time/default_tick_clock.h"
+#include "base/time/tick_clock.h"
+#include "base/trace_event/trace_event.h"
+#include "third_party/blink/renderer/platform/scheduler/base/real_time_domain.h"
+#include "third_party/blink/renderer/platform/scheduler/base/task_queue_impl.h"
+#include "third_party/blink/renderer/platform/scheduler/base/task_queue_selector.h"
+#include "third_party/blink/renderer/platform/scheduler/base/task_time_observer.h"
+#include "third_party/blink/renderer/platform/scheduler/base/thread_controller.h"
+#include "third_party/blink/renderer/platform/scheduler/base/thread_controller_impl.h"
+#include "third_party/blink/renderer/platform/scheduler/base/work_queue.h"
+#include "third_party/blink/renderer/platform/scheduler/base/work_queue_sets.h"
+
+namespace blink {
+namespace scheduler {
+
+namespace {
+
+const double kLongTaskTraceEventThreshold = 0.05;
+const double kSamplingRateForRecordingCPUTime = 0.01;
+
+double MonotonicTimeInSeconds(base::TimeTicks time_ticks) {
+ return (time_ticks - base::TimeTicks()).InSecondsF();
+}
+
+// Magic value to protect against memory corruption and bail out
+// early when detected.
+constexpr int kMemoryCorruptionSentinelValue = 0xdeadbeef;
+
+void SweepCanceledDelayedTasksInQueue(
+ internal::TaskQueueImpl* queue,
+ std::map<TimeDomain*, base::TimeTicks>* time_domain_now) {
+ TimeDomain* time_domain = queue->GetTimeDomain();
+ if (time_domain_now->find(time_domain) == time_domain_now->end())
+ time_domain_now->insert(std::make_pair(time_domain, time_domain->Now()));
+ queue->SweepCanceledDelayedTasks(time_domain_now->at(time_domain));
+}
+
+} // namespace
+
+// static
+std::unique_ptr<TaskQueueManager> TaskQueueManager::TakeOverCurrentThread() {
+ return TaskQueueManagerImpl::TakeOverCurrentThread();
+}
+
+TaskQueueManagerImpl::TaskQueueManagerImpl(
+ std::unique_ptr<internal::ThreadController> controller)
+ : graceful_shutdown_helper_(new internal::GracefulQueueShutdownHelper()),
+ controller_(std::move(controller)),
+ memory_corruption_sentinel_(kMemoryCorruptionSentinelValue),
+ weak_factory_(this) {
+ // TODO(altimin): Create a sequence checker here.
+ DCHECK(controller_->RunsTasksInCurrentSequence());
+ TRACE_EVENT_OBJECT_CREATED_WITH_ID(
+ TRACE_DISABLED_BY_DEFAULT("renderer.scheduler"), "TaskQueueManager",
+ this);
+ main_thread_only().selector.SetTaskQueueSelectorObserver(this);
+
+ RegisterTimeDomain(main_thread_only().real_time_domain.get());
+
+ controller_->SetSequencedTaskSource(this);
+ controller_->AddNestingObserver(this);
+}
+
+TaskQueueManagerImpl::~TaskQueueManagerImpl() {
+ TRACE_EVENT_OBJECT_DELETED_WITH_ID(
+ TRACE_DISABLED_BY_DEFAULT("renderer.scheduler"), "TaskQueueManager",
+ this);
+
+ // TODO(altimin): restore default task runner automatically when
+ // ThreadController is destroyed.
+ controller_->RestoreDefaultTaskRunner();
+
+ for (internal::TaskQueueImpl* queue : main_thread_only().active_queues) {
+ main_thread_only().selector.RemoveQueue(queue);
+ queue->UnregisterTaskQueue();
+ }
+
+ main_thread_only().active_queues.clear();
+ main_thread_only().queues_to_gracefully_shutdown.clear();
+
+ graceful_shutdown_helper_->OnTaskQueueManagerDeleted();
+
+ main_thread_only().selector.SetTaskQueueSelectorObserver(nullptr);
+ controller_->RemoveNestingObserver(this);
+}
+
+TaskQueueManagerImpl::MainThreadOnly::MainThreadOnly()
+ : random_generator(base::RandUint64()),
+ uniform_distribution(0.0, 1.0),
+ real_time_domain(new RealTimeDomain()) {}
+
+std::unique_ptr<TaskQueueManagerImpl>
+TaskQueueManagerImpl::TakeOverCurrentThread() {
+ return std::unique_ptr<TaskQueueManagerImpl>(
+ new TaskQueueManagerImpl(internal::ThreadControllerImpl::Create(
+ base::MessageLoop::current(),
+ base::DefaultTickClock::GetInstance())));
+}
+
+void TaskQueueManagerImpl::RegisterTimeDomain(TimeDomain* time_domain) {
+ main_thread_only().time_domains.insert(time_domain);
+ time_domain->OnRegisterWithTaskQueueManager(this);
+}
+
+void TaskQueueManagerImpl::UnregisterTimeDomain(TimeDomain* time_domain) {
+ main_thread_only().time_domains.erase(time_domain);
+}
+
+RealTimeDomain* TaskQueueManagerImpl::GetRealTimeDomain() const {
+ return main_thread_only().real_time_domain.get();
+}
+
+std::unique_ptr<internal::TaskQueueImpl>
+TaskQueueManagerImpl::CreateTaskQueueImpl(const TaskQueue::Spec& spec) {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ TimeDomain* time_domain = spec.time_domain
+ ? spec.time_domain
+ : main_thread_only().real_time_domain.get();
+ DCHECK(main_thread_only().time_domains.find(time_domain) !=
+ main_thread_only().time_domains.end());
+ std::unique_ptr<internal::TaskQueueImpl> task_queue =
+ std::make_unique<internal::TaskQueueImpl>(this, time_domain, spec);
+ main_thread_only().active_queues.insert(task_queue.get());
+ main_thread_only().selector.AddQueue(task_queue.get());
+ return task_queue;
+}
+
+void TaskQueueManagerImpl::SetObserver(Observer* observer) {
+ main_thread_only().observer = observer;
+}
+
+void TaskQueueManagerImpl::UnregisterTaskQueueImpl(
+ std::unique_ptr<internal::TaskQueueImpl> task_queue) {
+ TRACE_EVENT1("renderer.scheduler",
+ "TaskQueueManagerImpl::UnregisterTaskQueue", "queue_name",
+ task_queue->GetName());
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+
+ main_thread_only().selector.RemoveQueue(task_queue.get());
+
+ {
+ base::AutoLock lock(any_thread_lock_);
+ any_thread().has_incoming_immediate_work.erase(task_queue.get());
+ }
+
+ task_queue->UnregisterTaskQueue();
+
+ // Add |task_queue| to |main_thread_only().queues_to_delete| so we can prevent
+ // it from being freed while any of our structures hold hold a raw pointer to
+ // it.
+ main_thread_only().active_queues.erase(task_queue.get());
+ main_thread_only().queues_to_delete[task_queue.get()] = std::move(task_queue);
+}
+
+void TaskQueueManagerImpl::ReloadEmptyWorkQueues(
+ const IncomingImmediateWorkMap& queues_to_reload) const {
+ // There are two cases where a queue needs reloading. First, it might be
+ // completely empty and we've just posted a task (this method handles that
+ // case). Secondly if the work queue becomes empty in when calling
+ // WorkQueue::TakeTaskFromWorkQueue (handled there).
+ for (const auto& pair : queues_to_reload) {
+ pair.first->ReloadImmediateWorkQueueIfEmpty();
+ }
+}
+
+void TaskQueueManagerImpl::WakeUpReadyDelayedQueues(LazyNow* lazy_now) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("renderer.scheduler"),
+ "TaskQueueManagerImpl::WakeUpReadyDelayedQueues");
+
+ for (TimeDomain* time_domain : main_thread_only().time_domains) {
+ if (time_domain == main_thread_only().real_time_domain.get()) {
+ time_domain->WakeUpReadyDelayedQueues(lazy_now);
+ } else {
+ LazyNow time_domain_lazy_now = time_domain->CreateLazyNow();
+ time_domain->WakeUpReadyDelayedQueues(&time_domain_lazy_now);
+ }
+ }
+}
+
+void TaskQueueManagerImpl::OnBeginNestedRunLoop() {
+ // We just entered a nested run loop, make sure there's a DoWork posted or
+ // the system will grind to a halt.
+ main_thread_only().nesting_depth++;
+ if (main_thread_only().observer && main_thread_only().nesting_depth == 1)
+ main_thread_only().observer->OnBeginNestedRunLoop();
+}
+
+void TaskQueueManagerImpl::OnExitNestedRunLoop() {
+ main_thread_only().nesting_depth--;
+ DCHECK_GE(main_thread_only().nesting_depth, 0);
+ if (main_thread_only().nesting_depth == 0) {
+ // While we were nested some non-nestable tasks may have become eligible to
+ // run. We push them back onto the front of their original work queues.
+ while (!main_thread_only().non_nestable_task_queue.empty()) {
+ NonNestableTask& non_nestable_task =
+ *main_thread_only().non_nestable_task_queue.begin();
+ non_nestable_task.task_queue->RequeueDeferredNonNestableTask(
+ std::move(non_nestable_task.task), non_nestable_task.work_type);
+ main_thread_only().non_nestable_task_queue.pop_front();
+ }
+ if (main_thread_only().observer)
+ main_thread_only().observer->OnExitNestedRunLoop();
+ }
+}
+
+void TaskQueueManagerImpl::OnQueueHasIncomingImmediateWork(
+ internal::TaskQueueImpl* queue,
+ internal::EnqueueOrder enqueue_order,
+ bool queue_is_blocked) {
+ {
+ base::AutoLock lock(any_thread_lock_);
+ any_thread().has_incoming_immediate_work.insert(
+ std::make_pair(queue, enqueue_order));
+ }
+
+ if (!queue_is_blocked)
+ controller_->ScheduleWork();
+}
+
+void TaskQueueManagerImpl::MaybeScheduleImmediateWork(
+ const base::Location& from_here) {
+ controller_->ScheduleWork();
+}
+
+void TaskQueueManagerImpl::MaybeScheduleDelayedWork(
+ const base::Location& from_here,
+ TimeDomain* requesting_time_domain,
+ base::TimeTicks now,
+ base::TimeTicks run_time) {
+ controller_->ScheduleDelayedWork(now, run_time);
+}
+
+void TaskQueueManagerImpl::CancelDelayedWork(TimeDomain* requesting_time_domain,
+ base::TimeTicks run_time) {
+ controller_->CancelDelayedWork(run_time);
+}
+
+base::Optional<base::PendingTask> TaskQueueManagerImpl::TakeTask() {
+ CHECK(Validate());
+
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ TRACE_EVENT0("renderer.scheduler", "TaskQueueManagerImpl::TakeTask");
+
+ IncomingImmediateWorkMap queues_to_reload;
+
+ {
+ base::AutoLock lock(any_thread_lock_);
+ std::swap(queues_to_reload, any_thread().has_incoming_immediate_work);
+ }
+
+ // It's important we call ReloadEmptyWorkQueues out side of the lock to
+ // avoid a lock order inversion.
+ ReloadEmptyWorkQueues(queues_to_reload);
+ LazyNow lazy_now(main_thread_only().real_time_domain->CreateLazyNow());
+ WakeUpReadyDelayedQueues(&lazy_now);
+
+ while (true) {
+ internal::WorkQueue* work_queue = nullptr;
+ bool should_run =
+ main_thread_only().selector.SelectWorkQueueToService(&work_queue);
+ TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(
+ TRACE_DISABLED_BY_DEFAULT("renderer.scheduler.debug"),
+ "TaskQueueManager", this,
+ AsValueWithSelectorResult(should_run, work_queue));
+
+ if (!should_run)
+ return base::nullopt;
+
+ // If the head task was canceled, remove it and run the selector again.
+ if (work_queue->RemoveAllCanceledTasksFromFront())
+ continue;
+
+ if (work_queue->GetFrontTask()->nestable == base::Nestable::kNonNestable &&
+ main_thread_only().nesting_depth > 0) {
+ // Defer non-nestable work. NOTE these tasks can be arbitrarily delayed so
+ // the additional delay should not be a problem.
+ // Note because we don't delete queues while nested, it's perfectly OK to
+ // store the raw pointer for |queue| here.
+ NonNestableTask deferred_task{work_queue->TakeTaskFromWorkQueue(),
+ work_queue->task_queue(),
+ work_queue->queue_type()};
+ // We push these tasks onto the front to make sure that when requeued they
+ // are pushed in the right order.
+ main_thread_only().non_nestable_task_queue.push_front(
+ std::move(deferred_task));
+ continue;
+ }
+
+ // Due to nested message loops we need to maintain a stack of currently
+ // executing tasks so in TaskQueueManagerImpl::DidRunTask we can run the
+ // right observers.
+ main_thread_only().task_execution_stack.emplace_back(
+ work_queue->TakeTaskFromWorkQueue(), work_queue->task_queue());
+ ExecutingTask& executing_task =
+ *main_thread_only().task_execution_stack.rbegin();
+ NotifyWillProcessTask(&executing_task, &lazy_now);
+ return std::move(executing_task.pending_task);
+ }
+}
+
+void TaskQueueManagerImpl::DidRunTask() {
+ LazyNow lazy_now(main_thread_only().real_time_domain->CreateLazyNow());
+ ExecutingTask& executing_task =
+ *main_thread_only().task_execution_stack.rbegin();
+ NotifyDidProcessTask(executing_task, &lazy_now);
+ main_thread_only().task_execution_stack.pop_back();
+
+ if (main_thread_only().nesting_depth == 0)
+ CleanUpQueues();
+}
+
+base::TimeDelta TaskQueueManagerImpl::DelayTillNextTask(LazyNow* lazy_now) {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+
+ // If the selector has non-empty queues we trivially know there is immediate
+ // work to be done.
+ if (!main_thread_only().selector.AllEnabledWorkQueuesAreEmpty())
+ return base::TimeDelta();
+
+ // Its possible the selectors state is dirty because ReloadEmptyWorkQueues
+ // hasn't been called yet. This check catches the case of fresh incoming work.
+ {
+ base::AutoLock lock(any_thread_lock_);
+ for (const auto& pair : any_thread().has_incoming_immediate_work) {
+ if (pair.first->CouldTaskRun(pair.second))
+ return base::TimeDelta();
+ }
+ }
+
+ // Otherwise we need to find the shortest delay, if any. NB we don't need to
+ // call WakeUpReadyDelayedQueues because it's assumed DelayTillNextTask will
+ // return base::TimeDelta>() if the delayed task is due to run now.
+ base::TimeDelta delay_till_next_task = base::TimeDelta::Max();
+ for (TimeDomain* time_domain : main_thread_only().time_domains) {
+ base::Optional<base::TimeDelta> delay =
+ time_domain->DelayTillNextTask(lazy_now);
+ if (!delay)
+ continue;
+
+ if (*delay < delay_till_next_task)
+ delay_till_next_task = *delay;
+ }
+ return delay_till_next_task;
+}
+
+void TaskQueueManagerImpl::DidQueueTask(
+ const internal::TaskQueueImpl::Task& pending_task) {
+ controller_->DidQueueTask(pending_task);
+}
+
+void TaskQueueManagerImpl::NotifyWillProcessTask(ExecutingTask* executing_task,
+ LazyNow* time_before_task) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("renderer.scheduler"),
+ "TaskQueueManagerImpl::NotifyWillProcessTaskObservers");
+ if (executing_task->task_queue->GetQuiescenceMonitored())
+ main_thread_only().task_was_run_on_quiescence_monitored_queue = true;
+
+ base::debug::SetCrashKeyString(
+ main_thread_only().file_name_crash_key,
+ executing_task->pending_task.posted_from.file_name());
+ base::debug::SetCrashKeyString(
+ main_thread_only().function_name_crash_key,
+ executing_task->pending_task.posted_from.function_name());
+
+ if (executing_task->task_queue->GetShouldNotifyObservers()) {
+ {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("renderer.scheduler"),
+ "TaskQueueManager.WillProcessTaskObservers");
+ for (auto& observer : main_thread_only().task_observers)
+ observer.WillProcessTask(executing_task->pending_task);
+ }
+
+ {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("renderer.scheduler"),
+ "TaskQueueManager.QueueNotifyWillProcessTask");
+ executing_task->task_queue->NotifyWillProcessTask(
+ executing_task->pending_task);
+ }
+
+ bool notify_time_observers =
+ main_thread_only().nesting_depth == 0 &&
+ (main_thread_only().task_time_observers.might_have_observers() ||
+ executing_task->task_queue->RequiresTaskTiming());
+ if (notify_time_observers) {
+ executing_task->task_start_time = time_before_task->Now();
+ double task_start_time_sec =
+ MonotonicTimeInSeconds(executing_task->task_start_time);
+
+ {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("renderer.scheduler"),
+ "TaskQueueManager.WillProcessTaskTimeObservers");
+ for (auto& observer : main_thread_only().task_time_observers)
+ observer.WillProcessTask(task_start_time_sec);
+ }
+
+ {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("renderer.scheduler"),
+ "TaskQueueManager.QueueOnTaskStarted");
+ executing_task->task_queue->OnTaskStarted(
+ executing_task->pending_task, executing_task->task_start_time);
+ }
+ }
+ }
+
+ executing_task->should_record_thread_time = ShouldRecordCPUTimeForTask();
+ if (executing_task->should_record_thread_time)
+ executing_task->task_start_thread_time = base::ThreadTicks::Now();
+}
+
+void TaskQueueManagerImpl::NotifyDidProcessTask(
+ const ExecutingTask& executing_task,
+ LazyNow* time_after_task) {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("renderer.scheduler"),
+ "TaskQueueManagerImpl::NotifyDidProcessTaskObservers");
+
+ base::ThreadTicks task_end_thread_time;
+ if (executing_task.should_record_thread_time)
+ task_end_thread_time = base::ThreadTicks::Now();
+
+ if (!executing_task.task_queue->GetShouldNotifyObservers())
+ return;
+
+ double task_start_time_sec =
+ MonotonicTimeInSeconds(executing_task.task_start_time);
+ double task_end_time_sec = 0;
+
+ if (task_start_time_sec) {
+ task_end_time_sec = MonotonicTimeInSeconds(time_after_task->Now());
+
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("renderer.scheduler"),
+ "TaskQueueManager.DidProcessTaskTimeObservers");
+ for (auto& observer : main_thread_only().task_time_observers)
+ observer.DidProcessTask(task_start_time_sec, task_end_time_sec);
+ }
+
+ {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("renderer.scheduler"),
+ "TaskQueueManager.DidProcessTaskObservers");
+ for (auto& observer : main_thread_only().task_observers)
+ observer.DidProcessTask(executing_task.pending_task);
+ }
+
+ {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("renderer.scheduler"),
+ "TaskQueueManager.QueueNotifyDidProcessTask");
+ executing_task.task_queue->NotifyDidProcessTask(
+ executing_task.pending_task);
+ }
+
+ {
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("renderer.scheduler"),
+ "TaskQueueManager.QueueOnTaskCompleted");
+ if (task_start_time_sec && task_end_time_sec) {
+ executing_task.task_queue->OnTaskCompleted(
+ executing_task.pending_task, executing_task.task_start_time,
+ time_after_task->Now(),
+ task_end_thread_time - executing_task.task_start_thread_time);
+ }
+ }
+
+ if (task_start_time_sec && task_end_time_sec &&
+ task_end_time_sec - task_start_time_sec > kLongTaskTraceEventThreshold) {
+ TRACE_EVENT_INSTANT1("blink", "LongTask", TRACE_EVENT_SCOPE_THREAD,
+ "duration", task_end_time_sec - task_start_time_sec);
+ }
+}
+
+void TaskQueueManagerImpl::SetWorkBatchSize(int work_batch_size) {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ DCHECK_GE(work_batch_size, 1);
+ controller_->SetWorkBatchSize(work_batch_size);
+}
+
+void TaskQueueManagerImpl::AddTaskObserver(
+ base::MessageLoop::TaskObserver* task_observer) {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ main_thread_only().task_observers.AddObserver(task_observer);
+}
+
+void TaskQueueManagerImpl::RemoveTaskObserver(
+ base::MessageLoop::TaskObserver* task_observer) {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ main_thread_only().task_observers.RemoveObserver(task_observer);
+}
+
+void TaskQueueManagerImpl::AddTaskTimeObserver(
+ TaskTimeObserver* task_time_observer) {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ main_thread_only().task_time_observers.AddObserver(task_time_observer);
+}
+
+void TaskQueueManagerImpl::RemoveTaskTimeObserver(
+ TaskTimeObserver* task_time_observer) {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ main_thread_only().task_time_observers.RemoveObserver(task_time_observer);
+}
+
+bool TaskQueueManagerImpl::GetAndClearSystemIsQuiescentBit() {
+ bool task_was_run =
+ main_thread_only().task_was_run_on_quiescence_monitored_queue;
+ main_thread_only().task_was_run_on_quiescence_monitored_queue = false;
+ return !task_was_run;
+}
+
+internal::EnqueueOrder TaskQueueManagerImpl::GetNextSequenceNumber() {
+ return enqueue_order_generator_.GenerateNext();
+}
+
+LazyNow TaskQueueManagerImpl::CreateLazyNow() const {
+ return LazyNow(controller_->GetClock());
+}
+
+std::unique_ptr<base::trace_event::ConvertableToTraceFormat>
+TaskQueueManagerImpl::AsValueWithSelectorResult(
+ bool should_run,
+ internal::WorkQueue* selected_work_queue) const {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ std::unique_ptr<base::trace_event::TracedValue> state(
+ new base::trace_event::TracedValue());
+ base::TimeTicks now =
+ main_thread_only().real_time_domain->CreateLazyNow().Now();
+ state->BeginArray("active_queues");
+ for (auto& queue : main_thread_only().active_queues)
+ queue->AsValueInto(now, state.get());
+ state->EndArray();
+ state->BeginArray("queues_to_gracefully_shutdown");
+ for (const auto& pair : main_thread_only().queues_to_gracefully_shutdown)
+ pair.first->AsValueInto(now, state.get());
+ state->EndArray();
+ state->BeginArray("queues_to_delete");
+ for (const auto& pair : main_thread_only().queues_to_delete)
+ pair.first->AsValueInto(now, state.get());
+ state->EndArray();
+ state->BeginDictionary("selector");
+ main_thread_only().selector.AsValueInto(state.get());
+ state->EndDictionary();
+ if (should_run) {
+ state->SetString("selected_queue",
+ selected_work_queue->task_queue()->GetName());
+ state->SetString("work_queue_name", selected_work_queue->name());
+ }
+
+ state->BeginArray("time_domains");
+ for (auto* time_domain : main_thread_only().time_domains)
+ time_domain->AsValueInto(state.get());
+ state->EndArray();
+ {
+ base::AutoLock lock(any_thread_lock_);
+ state->BeginArray("has_incoming_immediate_work");
+ for (const auto& pair : any_thread().has_incoming_immediate_work) {
+ state->AppendString(pair.first->GetName());
+ }
+ state->EndArray();
+ }
+ return std::move(state);
+}
+
+void TaskQueueManagerImpl::OnTaskQueueEnabled(internal::TaskQueueImpl* queue) {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ DCHECK(queue->IsQueueEnabled());
+ // Only schedule DoWork if there's something to do.
+ if (queue->HasTaskToRunImmediately() && !queue->BlockedByFence())
+ MaybeScheduleImmediateWork(FROM_HERE);
+}
+
+void TaskQueueManagerImpl::SweepCanceledDelayedTasks() {
+ std::map<TimeDomain*, base::TimeTicks> time_domain_now;
+ for (const auto& queue : main_thread_only().active_queues)
+ SweepCanceledDelayedTasksInQueue(queue, &time_domain_now);
+ for (const auto& pair : main_thread_only().queues_to_gracefully_shutdown)
+ SweepCanceledDelayedTasksInQueue(pair.first, &time_domain_now);
+}
+
+void TaskQueueManagerImpl::TakeQueuesToGracefullyShutdownFromHelper() {
+ std::vector<std::unique_ptr<internal::TaskQueueImpl>> queues =
+ graceful_shutdown_helper_->TakeQueues();
+ for (std::unique_ptr<internal::TaskQueueImpl>& queue : queues) {
+ main_thread_only().queues_to_gracefully_shutdown[queue.get()] =
+ std::move(queue);
+ }
+}
+
+void TaskQueueManagerImpl::CleanUpQueues() {
+ TakeQueuesToGracefullyShutdownFromHelper();
+
+ for (auto it = main_thread_only().queues_to_gracefully_shutdown.begin();
+ it != main_thread_only().queues_to_gracefully_shutdown.end();) {
+ if (it->first->IsEmpty()) {
+ UnregisterTaskQueueImpl(std::move(it->second));
+ main_thread_only().active_queues.erase(it->first);
+ main_thread_only().queues_to_gracefully_shutdown.erase(it++);
+ } else {
+ ++it;
+ }
+ }
+ main_thread_only().queues_to_delete.clear();
+}
+
+scoped_refptr<internal::GracefulQueueShutdownHelper>
+TaskQueueManagerImpl::GetGracefulQueueShutdownHelper() const {
+ return graceful_shutdown_helper_;
+}
+
+base::WeakPtr<TaskQueueManagerImpl> TaskQueueManagerImpl::GetWeakPtr() {
+ return weak_factory_.GetWeakPtr();
+}
+
+void TaskQueueManagerImpl::SetDefaultTaskRunner(
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner) {
+ controller_->SetDefaultTaskRunner(task_runner);
+}
+
+const base::TickClock* TaskQueueManagerImpl::GetClock() const {
+ return controller_->GetClock();
+}
+
+base::TimeTicks TaskQueueManagerImpl::NowTicks() const {
+ return controller_->GetClock()->NowTicks();
+}
+
+bool TaskQueueManagerImpl::ShouldRecordCPUTimeForTask() {
+ return base::ThreadTicks::IsSupported() &&
+ main_thread_only().uniform_distribution(
+ main_thread_only().random_generator) <
+ kSamplingRateForRecordingCPUTime;
+}
+
+MSVC_DISABLE_OPTIMIZE()
+bool TaskQueueManagerImpl::Validate() {
+ return memory_corruption_sentinel_ == kMemoryCorruptionSentinelValue;
+}
+MSVC_ENABLE_OPTIMIZE()
+
+void TaskQueueManagerImpl::EnableCrashKeys(
+ const char* file_name_crash_key_name,
+ const char* function_name_crash_key_name) {
+ DCHECK(!main_thread_only().file_name_crash_key);
+ DCHECK(!main_thread_only().function_name_crash_key);
+ main_thread_only().file_name_crash_key = base::debug::AllocateCrashKeyString(
+ file_name_crash_key_name, base::debug::CrashKeySize::Size64);
+ main_thread_only().function_name_crash_key =
+ base::debug::AllocateCrashKeyString(function_name_crash_key_name,
+ base::debug::CrashKeySize::Size64);
+}
+
+internal::TaskQueueImpl* TaskQueueManagerImpl::currently_executing_task_queue()
+ const {
+ if (main_thread_only().task_execution_stack.empty())
+ return nullptr;
+ return main_thread_only().task_execution_stack.rbegin()->task_queue;
+}
+
+} // namespace scheduler
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/platform/scheduler/base/task_queue_manager_impl.h b/chromium/third_party/blink/renderer/platform/scheduler/base/task_queue_manager_impl.h
new file mode 100644
index 00000000000..dde60bee6b7
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/scheduler/base/task_queue_manager_impl.h
@@ -0,0 +1,334 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_PLATFORM_SCHEDULER_BASE_TASK_QUEUE_MANAGER_IMPL_H_
+#define THIRD_PARTY_BLINK_RENDERER_PLATFORM_SCHEDULER_BASE_TASK_QUEUE_MANAGER_IMPL_H_
+
+#include <map>
+#include <random>
+
+#include "base/atomic_sequence_num.h"
+#include "base/cancelable_callback.h"
+#include "base/debug/task_annotator.h"
+#include "base/macros.h"
+#include "base/memory/scoped_refptr.h"
+#include "base/memory/weak_ptr.h"
+#include "base/message_loop/message_loop.h"
+#include "base/pending_task.h"
+#include "base/run_loop.h"
+#include "base/synchronization/lock.h"
+#include "base/threading/thread_checker.h"
+#include "third_party/blink/renderer/platform/scheduler/base/enqueue_order.h"
+#include "third_party/blink/renderer/platform/scheduler/base/graceful_queue_shutdown_helper.h"
+#include "third_party/blink/renderer/platform/scheduler/base/moveable_auto_lock.h"
+#include "third_party/blink/renderer/platform/scheduler/base/sequenced_task_source.h"
+#include "third_party/blink/renderer/platform/scheduler/base/task_queue_impl.h"
+#include "third_party/blink/renderer/platform/scheduler/base/task_queue_manager.h"
+#include "third_party/blink/renderer/platform/scheduler/base/task_queue_selector.h"
+
+namespace base {
+namespace debug {
+struct CrashKeyString;
+} // namespace debug
+
+namespace trace_event {
+class ConvertableToTraceFormat;
+} // namespace trace_event
+} // namespace base
+
+namespace blink {
+namespace scheduler {
+
+namespace internal {
+class TaskQueueImpl;
+class ThreadController;
+} // namespace internal
+
+class LazyNow;
+class RealTimeDomain;
+class TaskQueue;
+class TaskTimeObserver;
+class TimeDomain;
+
+// The task queue manager provides N task queues and a selector interface for
+// choosing which task queue to service next. Each task queue consists of two
+// sub queues:
+//
+// 1. Incoming task queue. Tasks that are posted get immediately appended here.
+// When a task is appended into an empty incoming queue, the task manager
+// work function (DoWork()) is scheduled to run on the main task runner.
+//
+// 2. Work queue. If a work queue is empty when DoWork() is entered, tasks from
+// the incoming task queue (if any) are moved here. The work queues are
+// registered with the selector as input to the scheduling decision.
+//
+class PLATFORM_EXPORT TaskQueueManagerImpl
+ : public TaskQueueManager,
+ public internal::SequencedTaskSource,
+ public internal::TaskQueueSelector::Observer,
+ public base::RunLoop::NestingObserver {
+ public:
+ // Keep public methods in sync with TaskQueueManager interface.
+ // The general rule is to keep methods only used in scheduler/base just here
+ // and not to define them in the interface.
+
+ using Observer = TaskQueueManager::Observer;
+
+ ~TaskQueueManagerImpl() override;
+
+ // Assume direct control over current thread and create a TaskQueueManager.
+ // This function should be called only once per thread.
+ // This function assumes that a MessageLoop is initialized for current
+ // thread.
+ static std::unique_ptr<TaskQueueManagerImpl> TakeOverCurrentThread();
+
+ // TaskQueueManager implementation:
+ void SetObserver(Observer* observer) override;
+ void AddTaskObserver(base::MessageLoop::TaskObserver* task_observer) override;
+ void RemoveTaskObserver(
+ base::MessageLoop::TaskObserver* task_observer) override;
+ void AddTaskTimeObserver(TaskTimeObserver* task_time_observer) override;
+ void RemoveTaskTimeObserver(TaskTimeObserver* task_time_observer) override;
+ void RegisterTimeDomain(TimeDomain* time_domain) override;
+ void UnregisterTimeDomain(TimeDomain* time_domain) override;
+ RealTimeDomain* GetRealTimeDomain() const override;
+ const base::TickClock* GetClock() const override;
+ base::TimeTicks NowTicks() const override;
+ void SetDefaultTaskRunner(
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner) override;
+ void SweepCanceledDelayedTasks() override;
+ bool GetAndClearSystemIsQuiescentBit() override;
+ void SetWorkBatchSize(int work_batch_size) override;
+ void EnableCrashKeys(const char* file_name_crash_key,
+ const char* function_name_crash_key) override;
+
+ // Implementation of SequencedTaskSource:
+ base::Optional<base::PendingTask> TakeTask() override;
+ void DidRunTask() override;
+ base::TimeDelta DelayTillNextTask(LazyNow* lazy_now) override;
+
+ // Requests that a task to process work is posted on the main task runner.
+ // These tasks are de-duplicated in two buckets: main-thread and all other
+ // threads. This distinction is done to reduce the overhead from locks, we
+ // assume the main-thread path will be hot.
+ void MaybeScheduleImmediateWork(const base::Location& from_here);
+
+ // Requests that a delayed task to process work is posted on the main task
+ // runner. These delayed tasks are de-duplicated. Must be called on the thread
+ // this class was created on.
+ void MaybeScheduleDelayedWork(const base::Location& from_here,
+ TimeDomain* requesting_time_domain,
+ base::TimeTicks now,
+ base::TimeTicks run_time);
+
+ // Cancels a delayed task to process work at |run_time|, previously requested
+ // with MaybeScheduleDelayedWork.
+ void CancelDelayedWork(TimeDomain* requesting_time_domain,
+ base::TimeTicks run_time);
+
+ LazyNow CreateLazyNow() const;
+
+ // Returns the currently executing TaskQueue if any. Must be called on the
+ // thread this class was created on.
+ internal::TaskQueueImpl* currently_executing_task_queue() const;
+
+ // Unregisters a TaskQueue previously created by |NewTaskQueue()|.
+ // No tasks will run on this queue after this call.
+ void UnregisterTaskQueueImpl(
+ std::unique_ptr<internal::TaskQueueImpl> task_queue);
+
+ scoped_refptr<internal::GracefulQueueShutdownHelper>
+ GetGracefulQueueShutdownHelper() const;
+
+ base::WeakPtr<TaskQueueManagerImpl> GetWeakPtr();
+
+ protected:
+ // Create a task queue manager where |controller| controls the thread
+ // on which the tasks are eventually run.
+ explicit TaskQueueManagerImpl(
+ std::unique_ptr<internal::ThreadController> controller);
+
+ friend class internal::TaskQueueImpl;
+ friend class TaskQueueManagerForTest;
+
+ private:
+ enum class ProcessTaskResult {
+ kDeferred,
+ kExecuted,
+ kTaskQueueManagerDeleted,
+ };
+
+ using IncomingImmediateWorkMap =
+ std::unordered_map<internal::TaskQueueImpl*, internal::EnqueueOrder>;
+
+ struct AnyThread {
+ AnyThread() = default;
+
+ // Task queues with newly available work on the incoming queue.
+ IncomingImmediateWorkMap has_incoming_immediate_work;
+ };
+
+ // TODO(scheduler-dev): Review if we really need non-nestable tasks at all.
+ struct NonNestableTask {
+ internal::TaskQueueImpl::Task task;
+ internal::TaskQueueImpl* task_queue;
+ WorkType work_type;
+ };
+ using NonNestableTaskDeque = WTF::Deque<NonNestableTask, 8>;
+
+ // We have to track rentrancy because we support nested runloops but the
+ // selector interface is unaware of those. This struct keeps track off all
+ // task related state needed to make pairs of TakeTask() / DidRunTask() work.
+ struct ExecutingTask {
+ ExecutingTask()
+ : pending_task(
+ TaskQueue::PostedTask(base::OnceClosure(), base::Location()),
+ base::TimeTicks(),
+ 0) {}
+
+ ExecutingTask(internal::TaskQueueImpl::Task&& pending_task,
+ internal::TaskQueueImpl* task_queue)
+ : pending_task(std::move(pending_task)), task_queue(task_queue) {}
+
+ internal::TaskQueueImpl::Task pending_task;
+ internal::TaskQueueImpl* task_queue = nullptr;
+ base::TimeTicks task_start_time;
+ base::ThreadTicks task_start_thread_time;
+ bool should_record_thread_time = false;
+ };
+
+ struct MainThreadOnly {
+ MainThreadOnly();
+
+ int nesting_depth = 0;
+ NonNestableTaskDeque non_nestable_task_queue;
+ // TODO(altimin): Switch to instruction pointer crash key when it's
+ // available.
+ base::debug::CrashKeyString* file_name_crash_key = nullptr;
+ base::debug::CrashKeyString* function_name_crash_key = nullptr;
+
+ std::mt19937_64 random_generator;
+ std::uniform_real_distribution<double> uniform_distribution;
+
+ internal::TaskQueueSelector selector;
+ base::ObserverList<base::MessageLoop::TaskObserver> task_observers;
+ base::ObserverList<TaskTimeObserver> task_time_observers;
+ std::set<TimeDomain*> time_domains;
+ std::unique_ptr<RealTimeDomain> real_time_domain;
+
+ // List of task queues managed by this TaskQueueManager.
+ // - active_queues contains queues that are still running tasks.
+ // Most often they are owned by relevant TaskQueues, but
+ // queues_to_gracefully_shutdown_ are included here too.
+ // - queues_to_gracefully_shutdown contains queues which should be deleted
+ // when they become empty.
+ // - queues_to_delete contains soon-to-be-deleted queues, because some
+ // internal scheduling code does not expect queues to be pulled
+ // from underneath.
+
+ std::set<internal::TaskQueueImpl*> active_queues;
+ std::map<internal::TaskQueueImpl*, std::unique_ptr<internal::TaskQueueImpl>>
+ queues_to_gracefully_shutdown;
+ std::map<internal::TaskQueueImpl*, std::unique_ptr<internal::TaskQueueImpl>>
+ queues_to_delete;
+
+ bool task_was_run_on_quiescence_monitored_queue = false;
+
+ // Due to nested runloops more than one task can be executing concurrently.
+ std::list<ExecutingTask> task_execution_stack;
+
+ Observer* observer = nullptr; // NOT OWNED
+ };
+
+ // TaskQueueSelector::Observer:
+ void OnTaskQueueEnabled(internal::TaskQueueImpl* queue) override;
+
+ // base::RunLoop::NestingObserver:
+ void OnBeginNestedRunLoop() override;
+ void OnExitNestedRunLoop() override;
+
+ // Called by the task queue to register a new pending task.
+ void DidQueueTask(const internal::TaskQueueImpl::Task& pending_task);
+
+ // Delayed Tasks with run_times <= Now() are enqueued onto the work queue and
+ // reloads any empty work queues.
+ void WakeUpReadyDelayedQueues(LazyNow* lazy_now);
+
+ void NotifyWillProcessTask(ExecutingTask* task, LazyNow* time_before_task);
+ void NotifyDidProcessTask(const ExecutingTask& task,
+ LazyNow* time_after_task);
+
+ internal::EnqueueOrder GetNextSequenceNumber();
+
+ std::unique_ptr<base::trace_event::ConvertableToTraceFormat>
+ AsValueWithSelectorResult(bool should_run,
+ internal::WorkQueue* selected_work_queue) const;
+
+ // Adds |queue| to |any_thread().has_incoming_immediate_work_| and if
+ // |queue_is_blocked| is false it makes sure a DoWork is posted.
+ // Can be called from any thread.
+ void OnQueueHasIncomingImmediateWork(internal::TaskQueueImpl* queue,
+ internal::EnqueueOrder enqueue_order,
+ bool queue_is_blocked);
+
+ // Calls |ReloadImmediateWorkQueueIfEmpty| on all queues in
+ // |queues_to_reload|.
+ void ReloadEmptyWorkQueues(
+ const IncomingImmediateWorkMap& queues_to_reload) const;
+
+ std::unique_ptr<internal::TaskQueueImpl> CreateTaskQueueImpl(
+ const TaskQueue::Spec& spec) override;
+
+ void TakeQueuesToGracefullyShutdownFromHelper();
+
+ // Deletes queues marked for deletion and empty queues marked for shutdown.
+ void CleanUpQueues();
+
+ bool ShouldRecordCPUTimeForTask();
+
+ const scoped_refptr<internal::GracefulQueueShutdownHelper>
+ graceful_shutdown_helper_;
+
+ internal::EnqueueOrderGenerator enqueue_order_generator_;
+
+ std::unique_ptr<internal::ThreadController> controller_;
+
+ mutable base::Lock any_thread_lock_;
+ AnyThread any_thread_;
+
+ struct AnyThread& any_thread() {
+ any_thread_lock_.AssertAcquired();
+ return any_thread_;
+ }
+ const struct AnyThread& any_thread() const {
+ any_thread_lock_.AssertAcquired();
+ return any_thread_;
+ }
+
+ // A check to bail out early during memory corruption.
+ // crbug.com/757940
+ bool Validate();
+
+ int32_t memory_corruption_sentinel_;
+
+ THREAD_CHECKER(main_thread_checker_);
+ MainThreadOnly main_thread_only_;
+ MainThreadOnly& main_thread_only() {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ return main_thread_only_;
+ }
+ const MainThreadOnly& main_thread_only() const {
+ DCHECK_CALLED_ON_VALID_THREAD(main_thread_checker_);
+ return main_thread_only_;
+ }
+
+ base::WeakPtrFactory<TaskQueueManagerImpl> weak_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(TaskQueueManagerImpl);
+};
+
+} // namespace scheduler
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_PLATFORM_SCHEDULER_BASE_TASK_QUEUE_MANAGER_IMPL_H_
diff --git a/chromium/third_party/blink/renderer/platform/scheduler/base/task_queue_manager_impl_unittest.cc b/chromium/third_party/blink/renderer/platform/scheduler/base/task_queue_manager_impl_unittest.cc
new file mode 100644
index 00000000000..303dc435830
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/scheduler/base/task_queue_manager_impl_unittest.cc
@@ -0,0 +1,3312 @@
+// Copyright 2018 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "third_party/blink/renderer/platform/scheduler/base/task_queue_manager_impl.h"
+
+#include <stddef.h>
+#include <memory>
+#include <utility>
+
+#include "base/location.h"
+#include "base/memory/ref_counted_memory.h"
+#include "base/message_loop/message_loop.h"
+#include "base/run_loop.h"
+#include "base/single_thread_task_runner.h"
+#include "base/synchronization/waitable_event.h"
+#include "base/test/simple_test_tick_clock.h"
+#include "base/test/trace_event_analyzer.h"
+#include "base/threading/thread.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "base/trace_event/blame_context.h"
+#include "components/viz/test/ordered_simple_task_runner.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "third_party/blink/public/platform/scheduler/test/renderer_scheduler_test_support.h"
+#include "third_party/blink/renderer/platform/scheduler/base/real_time_domain.h"
+#include "third_party/blink/renderer/platform/scheduler/base/task_queue_impl.h"
+#include "third_party/blink/renderer/platform/scheduler/base/task_queue_selector.h"
+#include "third_party/blink/renderer/platform/scheduler/base/test_count_uses_time_source.h"
+#include "third_party/blink/renderer/platform/scheduler/base/test_task_time_observer.h"
+#include "third_party/blink/renderer/platform/scheduler/base/thread_controller_impl.h"
+#include "third_party/blink/renderer/platform/scheduler/base/virtual_time_domain.h"
+#include "third_party/blink/renderer/platform/scheduler/base/work_queue.h"
+#include "third_party/blink/renderer/platform/scheduler/base/work_queue_sets.h"
+#include "third_party/blink/renderer/platform/scheduler/test/task_queue_manager_for_test.h"
+#include "third_party/blink/renderer/platform/scheduler/test/test_task_queue.h"
+
+using testing::AnyNumber;
+using testing::Contains;
+using testing::ElementsAre;
+using testing::ElementsAreArray;
+using testing::Mock;
+using testing::Not;
+using testing::_;
+using blink::scheduler::internal::EnqueueOrder;
+
+namespace blink {
+namespace scheduler {
+
+class TaskQueueManagerTest : public testing::Test {
+ public:
+ TaskQueueManagerTest() = default;
+ void DeleteTaskQueueManager() { manager_.reset(); }
+
+ protected:
+ void TearDown() { manager_.reset(); }
+
+ scoped_refptr<TestTaskQueue> CreateTaskQueueWithSpec(TaskQueue::Spec spec) {
+ return manager_->CreateTaskQueue<TestTaskQueue>(spec);
+ }
+
+ scoped_refptr<TestTaskQueue> CreateTaskQueue() {
+ return CreateTaskQueueWithSpec(TaskQueue::Spec("test"));
+ }
+
+ scoped_refptr<TestTaskQueue> CreateTaskQueueWithMonitoredQuiescence() {
+ return CreateTaskQueueWithSpec(
+ TaskQueue::Spec("test").SetShouldMonitorQuiescence(true));
+ }
+
+ void Initialize(size_t num_queues) {
+ now_src_.Advance(base::TimeDelta::FromMicroseconds(1000));
+
+ test_task_runner_ =
+ base::WrapRefCounted(new cc::OrderedSimpleTaskRunner(&now_src_, false));
+
+ manager_ = TaskQueueManagerForTest::Create(nullptr, test_task_runner_.get(),
+ &now_src_);
+
+ for (size_t i = 0; i < num_queues; i++)
+ runners_.push_back(CreateTaskQueue());
+ }
+
+ void InitializeWithRealMessageLoop(size_t num_queues) {
+ message_loop_.reset(new base::MessageLoop());
+ original_message_loop_task_runner_ = message_loop_->task_runner();
+ // A null clock triggers some assertions.
+ now_src_.Advance(base::TimeDelta::FromMicroseconds(1000));
+ manager_ = TaskQueueManagerForTest::Create(
+ message_loop_.get(), GetSingleThreadTaskRunnerForTesting(), &now_src_);
+
+ for (size_t i = 0; i < num_queues; i++)
+ runners_.push_back(CreateTaskQueue());
+ }
+
+ void WakeUpReadyDelayedQueues(LazyNow lazy_now) {
+ manager_->WakeUpReadyDelayedQueues(&lazy_now);
+ }
+
+ EnqueueOrder GetNextSequenceNumber() const {
+ return manager_->GetNextSequenceNumber();
+ }
+
+ void MaybeScheduleImmediateWork(const base::Location& from_here) {
+ manager_->MaybeScheduleImmediateWork(from_here);
+ }
+
+ // Runs all immediate tasks until there is no more work to do and advances
+ // time if there is a pending delayed task. |per_run_time_callback| is called
+ // when the clock advances.
+ void RunUntilIdle(base::RepeatingClosure per_run_time_callback) {
+ for (;;) {
+ // Advance time if we've run out of immediate work to do.
+ if (!manager_->HasImmediateWork()) {
+ base::TimeTicks run_time;
+ if (manager_->GetRealTimeDomain()->NextScheduledRunTime(&run_time)) {
+ now_src_.SetNowTicks(run_time);
+ per_run_time_callback.Run();
+ } else {
+ break;
+ }
+ }
+
+ test_task_runner_->RunPendingTasks();
+ }
+ }
+
+ base::TimeTicks Now() { return now_src_.NowTicks(); }
+
+ std::unique_ptr<base::MessageLoop> message_loop_;
+ scoped_refptr<base::SingleThreadTaskRunner>
+ original_message_loop_task_runner_;
+ base::SimpleTestTickClock now_src_;
+ scoped_refptr<cc::OrderedSimpleTaskRunner> test_task_runner_;
+ std::unique_ptr<TaskQueueManagerForTest> manager_;
+ std::vector<scoped_refptr<TestTaskQueue>> runners_;
+ TestTaskTimeObserver test_task_time_observer_;
+};
+
+void PostFromNestedRunloop(
+ base::MessageLoop* message_loop,
+ base::SingleThreadTaskRunner* runner,
+ std::vector<std::pair<base::OnceClosure, bool>>* tasks) {
+ base::MessageLoop::ScopedNestableTaskAllower allow(message_loop);
+ for (std::pair<base::OnceClosure, bool>& pair : *tasks) {
+ if (pair.second) {
+ runner->PostTask(FROM_HERE, std::move(pair.first));
+ } else {
+ runner->PostNonNestableTask(FROM_HERE, std::move(pair.first));
+ }
+ }
+ base::RunLoop().RunUntilIdle();
+}
+
+void NopTask() {}
+
+TEST_F(TaskQueueManagerTest,
+ NowCalledMinimumNumberOfTimesToComputeTaskDurations) {
+ message_loop_.reset(new base::MessageLoop());
+ // This memory is managed by the TaskQueueManager, but we need to hold a
+ // pointer to this object to read out how many times Now was called.
+ TestCountUsesTimeSource test_count_uses_time_source;
+
+ manager_ = TaskQueueManagerForTest::Create(
+ nullptr, GetSingleThreadTaskRunnerForTesting(),
+ &test_count_uses_time_source);
+ manager_->SetWorkBatchSize(6);
+ manager_->AddTaskTimeObserver(&test_task_time_observer_);
+
+ for (size_t i = 0; i < 3; i++)
+ runners_.push_back(CreateTaskQueue());
+
+ runners_[0]->PostTask(FROM_HERE, base::BindOnce(&NopTask));
+ runners_[0]->PostTask(FROM_HERE, base::BindOnce(&NopTask));
+ runners_[1]->PostTask(FROM_HERE, base::BindOnce(&NopTask));
+ runners_[1]->PostTask(FROM_HERE, base::BindOnce(&NopTask));
+ runners_[2]->PostTask(FROM_HERE, base::BindOnce(&NopTask));
+ runners_[2]->PostTask(FROM_HERE, base::BindOnce(&NopTask));
+
+ base::RunLoop().RunUntilIdle();
+ // Now is called each time a task is queued, when first task is started
+ // running, and when a task is completed. 6 * 3 = 18 calls.
+ EXPECT_EQ(18, test_count_uses_time_source.now_calls_count());
+}
+
+TEST_F(TaskQueueManagerTest, NowNotCalledForNestedTasks) {
+ message_loop_.reset(new base::MessageLoop());
+ // This memory is managed by the TaskQueueManager, but we need to hold a
+ // pointer to this object to read out how many times Now was called.
+ TestCountUsesTimeSource test_count_uses_time_source;
+
+ manager_ = TaskQueueManagerForTest::Create(message_loop_.get(),
+ message_loop_->task_runner(),
+ &test_count_uses_time_source);
+ manager_->AddTaskTimeObserver(&test_task_time_observer_);
+
+ runners_.push_back(CreateTaskQueue());
+
+ std::vector<std::pair<base::OnceClosure, bool>>
+ tasks_to_post_from_nested_loop;
+ for (int i = 0; i < 7; ++i) {
+ tasks_to_post_from_nested_loop.push_back(
+ std::make_pair(base::BindOnce(&NopTask), true));
+ }
+
+ runners_[0]->PostTask(
+ FROM_HERE,
+ base::BindOnce(&PostFromNestedRunloop, message_loop_.get(),
+ base::RetainedRef(runners_[0]),
+ base::Unretained(&tasks_to_post_from_nested_loop)));
+
+ base::RunLoop().RunUntilIdle();
+ // We need to call Now twice, to measure the start and end of the outermost
+ // task. We shouldn't call it for any of the nested tasks.
+ // Also Now is called when a task is scheduled (8 times).
+ // That brings expected call count for Now() to 2 + 8 = 10
+ EXPECT_EQ(10, test_count_uses_time_source.now_calls_count());
+}
+
+void NullTask() {}
+
+void TestTask(EnqueueOrder value, std::vector<EnqueueOrder>* out_result) {
+ out_result->push_back(value);
+}
+
+void DisableQueueTestTask(EnqueueOrder value,
+ std::vector<EnqueueOrder>* out_result,
+ TaskQueue::QueueEnabledVoter* voter) {
+ out_result->push_back(value);
+ voter->SetQueueEnabled(false);
+}
+
+TEST_F(TaskQueueManagerTest, SingleQueuePosting) {
+ Initialize(1u);
+
+ std::vector<EnqueueOrder> run_order;
+ runners_[0]->PostTask(FROM_HERE, base::BindOnce(&TestTask, 1, &run_order));
+ runners_[0]->PostTask(FROM_HERE, base::BindOnce(&TestTask, 2, &run_order));
+ runners_[0]->PostTask(FROM_HERE, base::BindOnce(&TestTask, 3, &run_order));
+
+ test_task_runner_->RunUntilIdle();
+ EXPECT_THAT(run_order, ElementsAre(1, 2, 3));
+}
+
+TEST_F(TaskQueueManagerTest, MultiQueuePosting) {
+ Initialize(3u);
+
+ std::vector<EnqueueOrder> run_order;
+ runners_[0]->PostTask(FROM_HERE, base::BindOnce(&TestTask, 1, &run_order));
+ runners_[0]->PostTask(FROM_HERE, base::BindOnce(&TestTask, 2, &run_order));
+ runners_[1]->PostTask(FROM_HERE, base::BindOnce(&TestTask, 3, &run_order));
+ runners_[1]->PostTask(FROM_HERE, base::BindOnce(&TestTask, 4, &run_order));
+ runners_[2]->PostTask(FROM_HERE, base::BindOnce(&TestTask, 5, &run_order));
+ runners_[2]->PostTask(FROM_HERE, base::BindOnce(&TestTask, 6, &run_order));
+
+ test_task_runner_->RunUntilIdle();
+ EXPECT_THAT(run_order, ElementsAre(1, 2, 3, 4, 5, 6));
+}
+
+TEST_F(TaskQueueManagerTest, NonNestableTaskPosting) {
+ InitializeWithRealMessageLoop(1u);
+
+ std::vector<EnqueueOrder> run_order;
+ runners_[0]->PostNonNestableTask(FROM_HERE,
+ base::BindOnce(&TestTask, 1, &run_order));
+
+ base::RunLoop().RunUntilIdle();
+ EXPECT_THAT(run_order, ElementsAre(1));
+}
+
+TEST_F(TaskQueueManagerTest, NonNestableTaskExecutesInExpectedOrder) {
+ InitializeWithRealMessageLoop(1u);
+
+ std::vector<EnqueueOrder> run_order;
+ runners_[0]->PostTask(FROM_HERE, base::BindOnce(&TestTask, 1, &run_order));
+ runners_[0]->PostTask(FROM_HERE, base::BindOnce(&TestTask, 2, &run_order));
+ runners_[0]->PostTask(FROM_HERE, base::BindOnce(&TestTask, 3, &run_order));
+ runners_[0]->PostTask(FROM_HERE, base::BindOnce(&TestTask, 4, &run_order));
+ runners_[0]->PostNonNestableTask(FROM_HERE,
+ base::BindOnce(&TestTask, 5, &run_order));
+
+ base::RunLoop().RunUntilIdle();
+ EXPECT_THAT(run_order, ElementsAre(1, 2, 3, 4, 5));
+}
+
+TEST_F(TaskQueueManagerTest, NonNestableTasksDoesntExecuteInNestedLoop) {
+ InitializeWithRealMessageLoop(1u);
+
+ std::vector<EnqueueOrder> run_order;
+ runners_[0]->PostTask(FROM_HERE, base::BindOnce(&TestTask, 1, &run_order));
+ runners_[0]->PostTask(FROM_HERE, base::BindOnce(&TestTask, 2, &run_order));
+
+ std::vector<std::pair<base::OnceClosure, bool>>
+ tasks_to_post_from_nested_loop;
+ tasks_to_post_from_nested_loop.push_back(
+ std::make_pair(base::BindOnce(&TestTask, 3, &run_order), false));
+ tasks_to_post_from_nested_loop.push_back(
+ std::make_pair(base::BindOnce(&TestTask, 4, &run_order), false));
+ tasks_to_post_from_nested_loop.push_back(
+ std::make_pair(base::BindOnce(&TestTask, 5, &run_order), true));
+ tasks_to_post_from_nested_loop.push_back(
+ std::make_pair(base::BindOnce(&TestTask, 6, &run_order), true));
+
+ runners_[0]->PostTask(
+ FROM_HERE,
+ base::BindOnce(&PostFromNestedRunloop, message_loop_.get(),
+ base::RetainedRef(runners_[0]),
+ base::Unretained(&tasks_to_post_from_nested_loop)));
+
+ base::RunLoop().RunUntilIdle();
+ // Note we expect tasks 3 & 4 to run last because they're non-nestable.
+ EXPECT_THAT(run_order, ElementsAre(1, 2, 5, 6, 3, 4));
+}
+
+namespace {
+
+void InsertFenceAndPostTestTask(EnqueueOrder id,
+ std::vector<EnqueueOrder>* run_order,
+ scoped_refptr<TestTaskQueue> task_queue) {
+ run_order->push_back(id);
+ task_queue->InsertFence(TaskQueue::InsertFencePosition::kNow);
+ task_queue->PostTask(FROM_HERE, base::BindOnce(&TestTask, id + 1, run_order));
+
+ // Force reload of immediate work queue. In real life the same effect can be
+ // achieved with cross-thread posting.
+ task_queue->GetTaskQueueImpl()->ReloadImmediateWorkQueueIfEmpty();
+}
+
+} // namespace
+
+TEST_F(TaskQueueManagerTest, TaskQueueDisabledFromNestedLoop) {
+ InitializeWithRealMessageLoop(1u);
+ std::vector<EnqueueOrder> run_order;
+
+ std::vector<std::pair<base::OnceClosure, bool>>
+ tasks_to_post_from_nested_loop;
+
+ tasks_to_post_from_nested_loop.push_back(
+ std::make_pair(base::BindOnce(&TestTask, 1, &run_order), false));
+ tasks_to_post_from_nested_loop.push_back(std::make_pair(
+ base::BindOnce(&InsertFenceAndPostTestTask, 2, &run_order, runners_[0]),
+ true));
+
+ runners_[0]->PostTask(
+ FROM_HERE,
+ base::BindOnce(&PostFromNestedRunloop, message_loop_.get(),
+ base::RetainedRef(runners_[0]),
+ base::Unretained(&tasks_to_post_from_nested_loop)));
+ base::RunLoop().RunUntilIdle();
+
+ // Task 1 shouldn't run first due to it being non-nestable and queue gets
+ // blocked after task 2. Task 1 runs after existing nested message loop
+ // due to being posted before inserting a fence.
+ // This test checks that breaks when nestable task is pushed into a redo
+ // queue.
+ EXPECT_THAT(run_order, ElementsAre(2, 1));
+
+ runners_[0]->RemoveFence();
+ base::RunLoop().RunUntilIdle();
+ EXPECT_THAT(run_order, ElementsAre(2, 1, 3));
+}
+
+TEST_F(TaskQueueManagerTest, HasPendingImmediateWork_ImmediateTask) {
+ Initialize(1u);
+
+ std::vector<EnqueueOrder> run_order;
+ EXPECT_FALSE(runners_[0]->HasTaskToRunImmediately());
+ runners_[0]->PostTask(FROM_HERE, base::BindOnce(&TestTask, 1, &run_order));
+ EXPECT_TRUE(runners_[0]->HasTaskToRunImmediately());
+
+ // Move the task into the |immediate_work_queue|.
+ EXPECT_TRUE(runners_[0]->GetTaskQueueImpl()->immediate_work_queue()->Empty());
+ std::unique_ptr<TaskQueue::QueueEnabledVoter> voter =
+ runners_[0]->CreateQueueEnabledVoter();
+ voter->SetQueueEnabled(false);
+ test_task_runner_->RunUntilIdle();
+ EXPECT_FALSE(
+ runners_[0]->GetTaskQueueImpl()->immediate_work_queue()->Empty());
+ EXPECT_TRUE(runners_[0]->HasTaskToRunImmediately());
+
+ // Run the task, making the queue empty.
+ voter->SetQueueEnabled(true);
+ test_task_runner_->RunUntilIdle();
+ EXPECT_FALSE(runners_[0]->HasTaskToRunImmediately());
+}
+
+TEST_F(TaskQueueManagerTest, HasPendingImmediateWork_DelayedTask) {
+ Initialize(1u);
+
+ std::vector<EnqueueOrder> run_order;
+ base::TimeDelta delay(base::TimeDelta::FromMilliseconds(10));
+ runners_[0]->PostDelayedTask(FROM_HERE,
+ base::BindOnce(&TestTask, 1, &run_order), delay);
+ EXPECT_FALSE(runners_[0]->HasTaskToRunImmediately());
+ now_src_.Advance(delay);
+ EXPECT_TRUE(runners_[0]->HasTaskToRunImmediately());
+
+ // Move the task into the |delayed_work_queue|.
+ WakeUpReadyDelayedQueues(LazyNow(&now_src_));
+ EXPECT_FALSE(runners_[0]->GetTaskQueueImpl()->delayed_work_queue()->Empty());
+ EXPECT_TRUE(runners_[0]->HasTaskToRunImmediately());
+
+ // Run the task, making the queue empty.
+ test_task_runner_->RunUntilIdle();
+ EXPECT_FALSE(runners_[0]->HasTaskToRunImmediately());
+}
+
+TEST_F(TaskQueueManagerTest, DelayedTaskPosting) {
+ Initialize(1u);
+
+ std::vector<EnqueueOrder> run_order;
+ base::TimeDelta delay(base::TimeDelta::FromMilliseconds(10));
+ runners_[0]->PostDelayedTask(FROM_HERE,
+ base::BindOnce(&TestTask, 1, &run_order), delay);
+ EXPECT_EQ(delay, test_task_runner_->DelayToNextTaskTime());
+ EXPECT_FALSE(runners_[0]->HasTaskToRunImmediately());
+ EXPECT_TRUE(run_order.empty());
+
+ // The task doesn't run before the delay has completed.
+ test_task_runner_->RunForPeriod(base::TimeDelta::FromMilliseconds(9));
+ EXPECT_TRUE(run_order.empty());
+
+ // After the delay has completed, the task runs normally.
+ test_task_runner_->RunForPeriod(base::TimeDelta::FromMilliseconds(1));
+ EXPECT_THAT(run_order, ElementsAre(1));
+ EXPECT_FALSE(runners_[0]->HasTaskToRunImmediately());
+}
+
+bool MessageLoopTaskCounter(size_t* count) {
+ *count = *count + 1;
+ return true;
+}
+
+TEST_F(TaskQueueManagerTest, DelayedTaskExecutedInOneMessageLoopTask) {
+ Initialize(1u);
+
+ base::TimeDelta delay(base::TimeDelta::FromMilliseconds(10));
+ runners_[0]->PostDelayedTask(FROM_HERE, base::BindOnce(&NopTask), delay);
+
+ size_t task_count = 0;
+ test_task_runner_->RunTasksWhile(
+ base::BindRepeating(&MessageLoopTaskCounter, &task_count));
+ EXPECT_EQ(1u, task_count);
+}
+
+TEST_F(TaskQueueManagerTest, DelayedTaskPosting_MultipleTasks_DecendingOrder) {
+ Initialize(1u);
+
+ std::vector<EnqueueOrder> run_order;
+ runners_[0]->PostDelayedTask(FROM_HERE,
+ base::BindOnce(&TestTask, 1, &run_order),
+ base::TimeDelta::FromMilliseconds(10));
+
+ runners_[0]->PostDelayedTask(FROM_HERE,
+ base::BindOnce(&TestTask, 2, &run_order),
+ base::TimeDelta::FromMilliseconds(8));
+
+ runners_[0]->PostDelayedTask(FROM_HERE,
+ base::BindOnce(&TestTask, 3, &run_order),
+ base::TimeDelta::FromMilliseconds(5));
+
+ EXPECT_EQ(base::TimeDelta::FromMilliseconds(5),
+ test_task_runner_->DelayToNextTaskTime());
+
+ test_task_runner_->RunForPeriod(base::TimeDelta::FromMilliseconds(5));
+ EXPECT_THAT(run_order, ElementsAre(3));
+ EXPECT_EQ(base::TimeDelta::FromMilliseconds(3),
+ test_task_runner_->DelayToNextTaskTime());
+
+ test_task_runner_->RunForPeriod(base::TimeDelta::FromMilliseconds(3));
+ EXPECT_THAT(run_order, ElementsAre(3, 2));
+ EXPECT_EQ(base::TimeDelta::FromMilliseconds(2),
+ test_task_runner_->DelayToNextTaskTime());
+
+ test_task_runner_->RunForPeriod(base::TimeDelta::FromMilliseconds(2));
+ EXPECT_THAT(run_order, ElementsAre(3, 2, 1));
+}
+
+TEST_F(TaskQueueManagerTest, DelayedTaskPosting_MultipleTasks_AscendingOrder) {
+ Initialize(1u);
+
+ std::vector<EnqueueOrder> run_order;
+ runners_[0]->PostDelayedTask(FROM_HERE,
+ base::BindOnce(&TestTask, 1, &run_order),
+ base::TimeDelta::FromMilliseconds(1));
+
+ runners_[0]->PostDelayedTask(FROM_HERE,
+ base::BindOnce(&TestTask, 2, &run_order),
+ base::TimeDelta::FromMilliseconds(5));
+
+ runners_[0]->PostDelayedTask(FROM_HERE,
+ base::BindOnce(&TestTask, 3, &run_order),
+ base::TimeDelta::FromMilliseconds(10));
+
+ EXPECT_EQ(base::TimeDelta::FromMilliseconds(1),
+ test_task_runner_->DelayToNextTaskTime());
+
+ test_task_runner_->RunForPeriod(base::TimeDelta::FromMilliseconds(1));
+ EXPECT_THAT(run_order, ElementsAre(1));
+ EXPECT_EQ(base::TimeDelta::FromMilliseconds(4),
+ test_task_runner_->DelayToNextTaskTime());
+
+ test_task_runner_->RunForPeriod(base::TimeDelta::FromMilliseconds(4));
+ EXPECT_THAT(run_order, ElementsAre(1, 2));
+ EXPECT_EQ(base::TimeDelta::FromMilliseconds(5),
+ test_task_runner_->DelayToNextTaskTime());
+
+ test_task_runner_->RunForPeriod(base::TimeDelta::FromMilliseconds(5));
+ EXPECT_THAT(run_order, ElementsAre(1, 2, 3));
+}
+
+TEST_F(TaskQueueManagerTest, PostDelayedTask_SharesUnderlyingDelayedTasks) {
+ Initialize(1u);
+
+ std::vector<EnqueueOrder> run_order;
+ base::TimeDelta delay(base::TimeDelta::FromMilliseconds(10));
+ runners_[0]->PostDelayedTask(FROM_HERE,
+ base::BindOnce(&TestTask, 1, &run_order), delay);
+ runners_[0]->PostDelayedTask(FROM_HERE,
+ base::BindOnce(&TestTask, 2, &run_order), delay);
+ runners_[0]->PostDelayedTask(FROM_HERE,
+ base::BindOnce(&TestTask, 3, &run_order), delay);
+
+ EXPECT_EQ(1u, test_task_runner_->NumPendingTasks());
+}
+
+class TestObject {
+ public:
+ ~TestObject() { destructor_count__++; }
+
+ void Run() { FAIL() << "TestObject::Run should not be called"; }
+
+ static int destructor_count__;
+};
+
+int TestObject::destructor_count__ = 0;
+
+TEST_F(TaskQueueManagerTest, PendingDelayedTasksRemovedOnShutdown) {
+ Initialize(1u);
+
+ TestObject::destructor_count__ = 0;
+
+ base::TimeDelta delay(base::TimeDelta::FromMilliseconds(10));
+ runners_[0]->PostDelayedTask(
+ FROM_HERE,
+ base::BindOnce(&TestObject::Run, base::Owned(new TestObject())), delay);
+ runners_[0]->PostTask(
+ FROM_HERE,
+ base::BindOnce(&TestObject::Run, base::Owned(new TestObject())));
+
+ manager_.reset();
+
+ EXPECT_EQ(2, TestObject::destructor_count__);
+}
+
+TEST_F(TaskQueueManagerTest, InsertAndRemoveFence) {
+ Initialize(1u);
+ runners_[0]->InsertFence(TaskQueue::InsertFencePosition::kNow);
+
+ std::vector<EnqueueOrder> run_order;
+ // Posting a task when pumping is disabled doesn't result in work getting
+ // posted.
+ runners_[0]->PostTask(FROM_HERE, base::BindOnce(&TestTask, 1, &run_order));
+ EXPECT_FALSE(test_task_runner_->HasPendingTasks());
+
+ // However polling still works.
+ EXPECT_TRUE(runners_[0]->HasTaskToRunImmediately());
+
+ // After removing the fence the task runs normally.
+ runners_[0]->RemoveFence();
+ EXPECT_TRUE(test_task_runner_->HasPendingTasks());
+ test_task_runner_->RunUntilIdle();
+ EXPECT_THAT(run_order, ElementsAre(1));
+}
+
+TEST_F(TaskQueueManagerTest, RemovingFenceForDisabledQueueDoesNotPostDoWork) {
+ Initialize(1u);
+
+ std::vector<EnqueueOrder> run_order;
+ std::unique_ptr<TaskQueue::QueueEnabledVoter> voter =
+ runners_[0]->CreateQueueEnabledVoter();
+ voter->SetQueueEnabled(false);
+ runners_[0]->InsertFence(TaskQueue::InsertFencePosition::kNow);
+ runners_[0]->PostTask(FROM_HERE, base::BindOnce(&TestTask, 1, &run_order));
+
+ runners_[0]->RemoveFence();
+ EXPECT_FALSE(test_task_runner_->HasPendingTasks());
+}
+
+TEST_F(TaskQueueManagerTest, EnablingFencedQueueDoesNotPostDoWork) {
+ Initialize(1u);
+
+ std::vector<EnqueueOrder> run_order;
+ std::unique_ptr<TaskQueue::QueueEnabledVoter> voter =
+ runners_[0]->CreateQueueEnabledVoter();
+ voter->SetQueueEnabled(false);
+ runners_[0]->InsertFence(TaskQueue::InsertFencePosition::kNow);
+ runners_[0]->PostTask(FROM_HERE, base::BindOnce(&TestTask, 1, &run_order));
+
+ voter->SetQueueEnabled(true);
+ EXPECT_FALSE(test_task_runner_->HasPendingTasks());
+}
+
+TEST_F(TaskQueueManagerTest, DenyRunning_BeforePosting) {
+ Initialize(1u);
+
+ std::vector<EnqueueOrder> run_order;
+ std::unique_ptr<TaskQueue::QueueEnabledVoter> voter =
+ runners_[0]->CreateQueueEnabledVoter();
+ voter->SetQueueEnabled(false);
+ runners_[0]->PostTask(FROM_HERE, base::BindOnce(&TestTask, 1, &run_order));
+ EXPECT_FALSE(test_task_runner_->HasPendingTasks());
+
+ test_task_runner_->RunUntilIdle();
+ EXPECT_TRUE(run_order.empty());
+
+ voter->SetQueueEnabled(true);
+ test_task_runner_->RunUntilIdle();
+ EXPECT_THAT(run_order, ElementsAre(1));
+}
+
+TEST_F(TaskQueueManagerTest, DenyRunning_AfterPosting) {
+ Initialize(1u);
+
+ std::vector<EnqueueOrder> run_order;
+ runners_[0]->PostTask(FROM_HERE, base::BindOnce(&TestTask, 1, &run_order));
+ std::unique_ptr<TaskQueue::QueueEnabledVoter> voter =
+ runners_[0]->CreateQueueEnabledVoter();
+ EXPECT_TRUE(test_task_runner_->HasPendingTasks());
+ voter->SetQueueEnabled(false);
+
+ test_task_runner_->RunUntilIdle();
+ EXPECT_TRUE(run_order.empty());
+
+ voter->SetQueueEnabled(true);
+ test_task_runner_->RunUntilIdle();
+ EXPECT_THAT(run_order, ElementsAre(1));
+}
+
+TEST_F(TaskQueueManagerTest, DenyRunning_AfterRemovingFence) {
+ Initialize(1u);
+
+ std::vector<EnqueueOrder> run_order;
+ runners_[0]->InsertFence(TaskQueue::InsertFencePosition::kNow);
+ std::unique_ptr<TaskQueue::QueueEnabledVoter> voter =
+ runners_[0]->CreateQueueEnabledVoter();
+ voter->SetQueueEnabled(false);
+ runners_[0]->PostTask(FROM_HERE, base::BindOnce(&TestTask, 1, &run_order));
+
+ test_task_runner_->RunUntilIdle();
+ EXPECT_TRUE(run_order.empty());
+
+ runners_[0]->RemoveFence();
+ voter->SetQueueEnabled(true);
+ test_task_runner_->RunUntilIdle();
+ EXPECT_THAT(run_order, ElementsAre(1));
+}
+
+TEST_F(TaskQueueManagerTest, RemovingFenceWithDelayedTask) {
+ Initialize(1u);
+ runners_[0]->InsertFence(TaskQueue::InsertFencePosition::kNow);
+
+ std::vector<EnqueueOrder> run_order;
+ // Posting a delayed task when fenced will apply the delay, but won't cause
+ // work to executed afterwards.
+ base::TimeDelta delay(base::TimeDelta::FromMilliseconds(10));
+ runners_[0]->PostDelayedTask(FROM_HERE,
+ base::BindOnce(&TestTask, 1, &run_order), delay);
+
+ // The task does not run even though it's delay is up.
+ test_task_runner_->RunForPeriod(base::TimeDelta::FromMilliseconds(10));
+ EXPECT_TRUE(run_order.empty());
+
+ // Removing the fence causes the task to run.
+ runners_[0]->RemoveFence();
+ EXPECT_TRUE(test_task_runner_->HasPendingTasks());
+ test_task_runner_->RunPendingTasks();
+ EXPECT_THAT(run_order, ElementsAre(1));
+}
+
+TEST_F(TaskQueueManagerTest, RemovingFenceWithMultipleDelayedTasks) {
+ Initialize(1u);
+ runners_[0]->InsertFence(TaskQueue::InsertFencePosition::kNow);
+
+ std::vector<EnqueueOrder> run_order;
+ // Posting a delayed task when fenced will apply the delay, but won't cause
+ // work to executed afterwards.
+ base::TimeDelta delay1(base::TimeDelta::FromMilliseconds(1));
+ base::TimeDelta delay2(base::TimeDelta::FromMilliseconds(10));
+ base::TimeDelta delay3(base::TimeDelta::FromMilliseconds(20));
+ runners_[0]->PostDelayedTask(
+ FROM_HERE, base::BindOnce(&TestTask, 1, &run_order), delay1);
+ runners_[0]->PostDelayedTask(
+ FROM_HERE, base::BindOnce(&TestTask, 2, &run_order), delay2);
+ runners_[0]->PostDelayedTask(
+ FROM_HERE, base::BindOnce(&TestTask, 3, &run_order), delay3);
+
+ now_src_.Advance(base::TimeDelta::FromMilliseconds(15));
+ test_task_runner_->RunUntilIdle();
+ EXPECT_TRUE(run_order.empty());
+
+ // Removing the fence causes the ready tasks to run.
+ runners_[0]->RemoveFence();
+ test_task_runner_->RunUntilIdle();
+ EXPECT_THAT(run_order, ElementsAre(1, 2));
+}
+
+TEST_F(TaskQueueManagerTest, InsertFencePreventsDelayedTasksFromRunning) {
+ Initialize(1u);
+ runners_[0]->InsertFence(TaskQueue::InsertFencePosition::kNow);
+
+ std::vector<EnqueueOrder> run_order;
+ base::TimeDelta delay(base::TimeDelta::FromMilliseconds(10));
+ runners_[0]->PostDelayedTask(FROM_HERE,
+ base::BindOnce(&TestTask, 1, &run_order), delay);
+
+ test_task_runner_->RunForPeriod(base::TimeDelta::FromMilliseconds(10));
+ EXPECT_TRUE(run_order.empty());
+}
+
+TEST_F(TaskQueueManagerTest, MultipleFences) {
+ Initialize(1u);
+
+ std::vector<EnqueueOrder> run_order;
+ runners_[0]->PostTask(FROM_HERE, base::BindOnce(&TestTask, 1, &run_order));
+ runners_[0]->PostTask(FROM_HERE, base::BindOnce(&TestTask, 2, &run_order));
+ runners_[0]->InsertFence(TaskQueue::InsertFencePosition::kNow);
+
+ runners_[0]->PostTask(FROM_HERE, base::BindOnce(&TestTask, 3, &run_order));
+ test_task_runner_->RunUntilIdle();
+ EXPECT_THAT(run_order, ElementsAre(1, 2));
+
+ runners_[0]->InsertFence(TaskQueue::InsertFencePosition::kNow);
+ // Subsequent tasks should be blocked.
+ runners_[0]->PostTask(FROM_HERE, base::BindOnce(&TestTask, 4, &run_order));
+ test_task_runner_->RunUntilIdle();
+ EXPECT_THAT(run_order, ElementsAre(1, 2, 3));
+}
+
+TEST_F(TaskQueueManagerTest, InsertFenceThenImmediatlyRemoveDoesNotBlock) {
+ Initialize(1u);
+ runners_[0]->InsertFence(TaskQueue::InsertFencePosition::kNow);
+ runners_[0]->RemoveFence();
+
+ std::vector<EnqueueOrder> run_order;
+ runners_[0]->PostTask(FROM_HERE, base::BindOnce(&TestTask, 1, &run_order));
+ runners_[0]->PostTask(FROM_HERE, base::BindOnce(&TestTask, 2, &run_order));
+
+ test_task_runner_->RunUntilIdle();
+ EXPECT_THAT(run_order, ElementsAre(1, 2));
+}
+
+TEST_F(TaskQueueManagerTest, InsertFencePostThenRemoveDoesNotBlock) {
+ Initialize(1u);
+ runners_[0]->InsertFence(TaskQueue::InsertFencePosition::kNow);
+
+ std::vector<EnqueueOrder> run_order;
+ runners_[0]->PostTask(FROM_HERE, base::BindOnce(&TestTask, 1, &run_order));
+ runners_[0]->PostTask(FROM_HERE, base::BindOnce(&TestTask, 2, &run_order));
+ runners_[0]->RemoveFence();
+
+ test_task_runner_->RunUntilIdle();
+ EXPECT_THAT(run_order, ElementsAre(1, 2));
+}
+
+TEST_F(TaskQueueManagerTest, MultipleFencesWithInitiallyEmptyQueue) {
+ Initialize(1u);
+ runners_[0]->InsertFence(TaskQueue::InsertFencePosition::kNow);
+
+ std::vector<EnqueueOrder> run_order;
+ runners_[0]->PostTask(FROM_HERE, base::BindOnce(&TestTask, 1, &run_order));
+ runners_[0]->InsertFence(TaskQueue::InsertFencePosition::kNow);
+ runners_[0]->PostTask(FROM_HERE, base::BindOnce(&TestTask, 2, &run_order));
+ test_task_runner_->RunUntilIdle();
+ EXPECT_THAT(run_order, ElementsAre(1));
+}
+
+TEST_F(TaskQueueManagerTest, BlockedByFence) {
+ Initialize(1u);
+ EXPECT_FALSE(runners_[0]->BlockedByFence());
+
+ runners_[0]->InsertFence(TaskQueue::InsertFencePosition::kNow);
+ EXPECT_TRUE(runners_[0]->BlockedByFence());
+
+ runners_[0]->RemoveFence();
+ EXPECT_FALSE(runners_[0]->BlockedByFence());
+
+ runners_[0]->PostTask(FROM_HERE, base::BindOnce(&NopTask));
+ runners_[0]->InsertFence(TaskQueue::InsertFencePosition::kNow);
+ EXPECT_FALSE(runners_[0]->BlockedByFence());
+
+ test_task_runner_->RunUntilIdle();
+ EXPECT_TRUE(runners_[0]->BlockedByFence());
+
+ runners_[0]->RemoveFence();
+ EXPECT_FALSE(runners_[0]->BlockedByFence());
+}
+
+TEST_F(TaskQueueManagerTest, BlockedByFence_BothTypesOfFence) {
+ Initialize(1u);
+
+ runners_[0]->PostTask(FROM_HERE, base::BindOnce(&NopTask));
+
+ runners_[0]->InsertFence(TaskQueue::InsertFencePosition::kNow);
+ EXPECT_FALSE(runners_[0]->BlockedByFence());
+
+ runners_[0]->InsertFence(TaskQueue::InsertFencePosition::kBeginningOfTime);
+ EXPECT_TRUE(runners_[0]->BlockedByFence());
+}
+
+namespace {
+
+void RecordTimeTask(std::vector<base::TimeTicks>* run_times,
+ base::SimpleTestTickClock* clock) {
+ run_times->push_back(clock->NowTicks());
+}
+
+void RecordTimeAndQueueTask(
+ std::vector<std::pair<scoped_refptr<TestTaskQueue>, base::TimeTicks>>*
+ run_times,
+ scoped_refptr<TestTaskQueue> task_queue,
+ base::SimpleTestTickClock* clock) {
+ run_times->emplace_back(task_queue, clock->NowTicks());
+}
+
+} // namespace
+
+TEST_F(TaskQueueManagerTest, DelayedFence_DelayedTasks) {
+ Initialize(1u);
+ test_task_runner_->SetAutoAdvanceNowToPendingTasks(true);
+
+ std::vector<base::TimeTicks> run_times;
+ runners_[0]->PostDelayedTask(
+ FROM_HERE, base::BindOnce(&RecordTimeTask, &run_times, &now_src_),
+ base::TimeDelta::FromMilliseconds(100));
+ runners_[0]->PostDelayedTask(
+ FROM_HERE, base::BindOnce(&RecordTimeTask, &run_times, &now_src_),
+ base::TimeDelta::FromMilliseconds(200));
+ runners_[0]->PostDelayedTask(
+ FROM_HERE, base::BindOnce(&RecordTimeTask, &run_times, &now_src_),
+ base::TimeDelta::FromMilliseconds(300));
+
+ runners_[0]->InsertFenceAt(Now() + base::TimeDelta::FromMilliseconds(250));
+ EXPECT_FALSE(runners_[0]->HasActiveFence());
+
+ test_task_runner_->RunUntilIdle();
+
+ EXPECT_TRUE(runners_[0]->HasActiveFence());
+ EXPECT_THAT(
+ run_times,
+ ElementsAre(base::TimeTicks() + base::TimeDelta::FromMilliseconds(101),
+ base::TimeTicks() + base::TimeDelta::FromMilliseconds(201)));
+ run_times.clear();
+
+ runners_[0]->RemoveFence();
+ test_task_runner_->RunUntilIdle();
+
+ EXPECT_FALSE(runners_[0]->HasActiveFence());
+ EXPECT_THAT(run_times, ElementsAre(base::TimeTicks() +
+ base::TimeDelta::FromMilliseconds(301)));
+}
+
+TEST_F(TaskQueueManagerTest, DelayedFence_ImmediateTasks) {
+ Initialize(1u);
+ test_task_runner_->SetAutoAdvanceNowToPendingTasks(true);
+
+ std::vector<base::TimeTicks> run_times;
+ runners_[0]->InsertFenceAt(Now() + base::TimeDelta::FromMilliseconds(250));
+
+ for (int i = 0; i < 5; ++i) {
+ runners_[0]->PostTask(
+ FROM_HERE, base::BindOnce(&RecordTimeTask, &run_times, &now_src_));
+ test_task_runner_->RunForPeriod(base::TimeDelta::FromMilliseconds(100));
+ if (i < 2) {
+ EXPECT_FALSE(runners_[0]->HasActiveFence());
+ } else {
+ EXPECT_TRUE(runners_[0]->HasActiveFence());
+ }
+ }
+
+ EXPECT_THAT(
+ run_times,
+ ElementsAre(base::TimeTicks() + base::TimeDelta::FromMilliseconds(1),
+ base::TimeTicks() + base::TimeDelta::FromMilliseconds(101),
+ base::TimeTicks() + base::TimeDelta::FromMilliseconds(201)));
+ run_times.clear();
+
+ runners_[0]->RemoveFence();
+ test_task_runner_->RunUntilIdle();
+
+ EXPECT_THAT(
+ run_times,
+ ElementsAre(base::TimeTicks() + base::TimeDelta::FromMilliseconds(501),
+ base::TimeTicks() + base::TimeDelta::FromMilliseconds(501)));
+}
+
+TEST_F(TaskQueueManagerTest, DelayedFence_RemovedFenceDoesNotActivate) {
+ Initialize(1u);
+ test_task_runner_->SetAutoAdvanceNowToPendingTasks(true);
+
+ std::vector<base::TimeTicks> run_times;
+ runners_[0]->InsertFenceAt(Now() + base::TimeDelta::FromMilliseconds(250));
+
+ for (int i = 0; i < 3; ++i) {
+ runners_[0]->PostTask(
+ FROM_HERE, base::BindOnce(&RecordTimeTask, &run_times, &now_src_));
+ EXPECT_FALSE(runners_[0]->HasActiveFence());
+ test_task_runner_->RunForPeriod(base::TimeDelta::FromMilliseconds(100));
+ }
+
+ EXPECT_TRUE(runners_[0]->HasActiveFence());
+ runners_[0]->RemoveFence();
+
+ for (int i = 0; i < 2; ++i) {
+ runners_[0]->PostTask(
+ FROM_HERE, base::BindOnce(&RecordTimeTask, &run_times, &now_src_));
+ test_task_runner_->RunForPeriod(base::TimeDelta::FromMilliseconds(100));
+ EXPECT_FALSE(runners_[0]->HasActiveFence());
+ }
+
+ EXPECT_THAT(
+ run_times,
+ ElementsAre(base::TimeTicks() + base::TimeDelta::FromMilliseconds(1),
+ base::TimeTicks() + base::TimeDelta::FromMilliseconds(101),
+ base::TimeTicks() + base::TimeDelta::FromMilliseconds(201),
+ base::TimeTicks() + base::TimeDelta::FromMilliseconds(301),
+ base::TimeTicks() + base::TimeDelta::FromMilliseconds(401)));
+}
+
+TEST_F(TaskQueueManagerTest, DelayedFence_TakeIncomingImmediateQueue) {
+ // This test checks that everything works correctly when a work queue
+ // is swapped with an immediate incoming queue and a delayed fence
+ // is activated, forcing a different queue to become active.
+ Initialize(2u);
+ test_task_runner_->SetAutoAdvanceNowToPendingTasks(true);
+
+ scoped_refptr<TestTaskQueue> queue1 = runners_[0];
+ scoped_refptr<TestTaskQueue> queue2 = runners_[1];
+
+ std::vector<std::pair<scoped_refptr<TestTaskQueue>, base::TimeTicks>>
+ run_times;
+
+ // Fence ensures that the task posted after advancing time is blocked.
+ queue1->InsertFenceAt(Now() + base::TimeDelta::FromMilliseconds(250));
+
+ // This task should not be blocked and should run immediately after
+ // advancing time at 301ms.
+ queue1->PostTask(FROM_HERE, base::BindOnce(&RecordTimeAndQueueTask,
+ &run_times, queue1, &now_src_));
+ // Force reload of immediate work queue. In real life the same effect can be
+ // achieved with cross-thread posting.
+ queue1->GetTaskQueueImpl()->ReloadImmediateWorkQueueIfEmpty();
+
+ now_src_.Advance(base::TimeDelta::FromMilliseconds(300));
+
+ // This task should be blocked.
+ queue1->PostTask(FROM_HERE, base::BindOnce(&RecordTimeAndQueueTask,
+ &run_times, queue1, &now_src_));
+ // This task on a different runner should run as expected.
+ queue2->PostTask(FROM_HERE, base::BindOnce(&RecordTimeAndQueueTask,
+ &run_times, queue2, &now_src_));
+
+ test_task_runner_->RunUntilIdle();
+
+ EXPECT_THAT(
+ run_times,
+ ElementsAre(
+ std::make_pair(queue1, base::TimeTicks() +
+ base::TimeDelta::FromMilliseconds(301)),
+ std::make_pair(queue2, base::TimeTicks() +
+ base::TimeDelta::FromMilliseconds(301))));
+}
+
+namespace {
+
+void ReentrantTestTask(scoped_refptr<base::SingleThreadTaskRunner> runner,
+ int countdown,
+ std::vector<EnqueueOrder>* out_result) {
+ out_result->push_back(countdown);
+ if (--countdown) {
+ runner->PostTask(
+ FROM_HERE, BindOnce(&ReentrantTestTask, runner, countdown, out_result));
+ }
+}
+
+} // namespace
+
+TEST_F(TaskQueueManagerTest, ReentrantPosting) {
+ Initialize(1u);
+
+ std::vector<EnqueueOrder> run_order;
+ runners_[0]->PostTask(
+ FROM_HERE, BindOnce(&ReentrantTestTask, runners_[0], 3, &run_order));
+
+ test_task_runner_->RunUntilIdle();
+ EXPECT_THAT(run_order, ElementsAre(3, 2, 1));
+}
+
+TEST_F(TaskQueueManagerTest, NoTasksAfterShutdown) {
+ Initialize(1u);
+
+ std::vector<EnqueueOrder> run_order;
+ runners_[0]->PostTask(FROM_HERE, base::BindOnce(&TestTask, 1, &run_order));
+ manager_.reset();
+ runners_[0]->PostTask(FROM_HERE, base::BindOnce(&TestTask, 1, &run_order));
+
+ test_task_runner_->RunUntilIdle();
+ EXPECT_TRUE(run_order.empty());
+}
+
+void PostTaskToRunner(scoped_refptr<base::SingleThreadTaskRunner> runner,
+ std::vector<EnqueueOrder>* run_order) {
+ runner->PostTask(FROM_HERE, base::BindOnce(&TestTask, 1, run_order));
+}
+
+TEST_F(TaskQueueManagerTest, PostFromThread) {
+ InitializeWithRealMessageLoop(1u);
+
+ std::vector<EnqueueOrder> run_order;
+ base::Thread thread("TestThread");
+ thread.Start();
+ thread.task_runner()->PostTask(
+ FROM_HERE, base::BindOnce(&PostTaskToRunner, runners_[0], &run_order));
+ thread.Stop();
+
+ base::RunLoop().RunUntilIdle();
+ EXPECT_THAT(run_order, ElementsAre(1));
+}
+
+void RePostingTestTask(scoped_refptr<base::SingleThreadTaskRunner> runner,
+ int* run_count) {
+ (*run_count)++;
+ runner->PostTask(
+ FROM_HERE,
+ BindOnce(&RePostingTestTask, base::Unretained(runner.get()), run_count));
+}
+
+TEST_F(TaskQueueManagerTest, DoWorkCantPostItselfMultipleTimes) {
+ Initialize(1u);
+
+ int run_count = 0;
+ runners_[0]->PostTask(
+ FROM_HERE, base::BindOnce(&RePostingTestTask, runners_[0], &run_count));
+
+ test_task_runner_->RunPendingTasks();
+ // NOTE without the executing_task_ check in MaybeScheduleDoWork there
+ // will be two tasks here.
+ EXPECT_EQ(1u, test_task_runner_->NumPendingTasks());
+ EXPECT_EQ(1, run_count);
+}
+
+TEST_F(TaskQueueManagerTest, PostFromNestedRunloop) {
+ InitializeWithRealMessageLoop(1u);
+
+ std::vector<EnqueueOrder> run_order;
+ std::vector<std::pair<base::OnceClosure, bool>>
+ tasks_to_post_from_nested_loop;
+ tasks_to_post_from_nested_loop.push_back(
+ std::make_pair(base::BindOnce(&TestTask, 1, &run_order), true));
+
+ runners_[0]->PostTask(FROM_HERE, base::BindOnce(&TestTask, 0, &run_order));
+ runners_[0]->PostTask(
+ FROM_HERE,
+ base::BindOnce(&PostFromNestedRunloop, message_loop_.get(),
+ base::RetainedRef(runners_[0]),
+ base::Unretained(&tasks_to_post_from_nested_loop)));
+ runners_[0]->PostTask(FROM_HERE, base::BindOnce(&TestTask, 2, &run_order));
+
+ base::RunLoop().RunUntilIdle();
+
+ EXPECT_THAT(run_order, ElementsAre(0, 2, 1));
+}
+
+TEST_F(TaskQueueManagerTest, WorkBatching) {
+ Initialize(1u);
+
+ manager_->SetWorkBatchSize(2);
+
+ std::vector<EnqueueOrder> run_order;
+ runners_[0]->PostTask(FROM_HERE, base::BindOnce(&TestTask, 1, &run_order));
+ runners_[0]->PostTask(FROM_HERE, base::BindOnce(&TestTask, 2, &run_order));
+ runners_[0]->PostTask(FROM_HERE, base::BindOnce(&TestTask, 3, &run_order));
+ runners_[0]->PostTask(FROM_HERE, base::BindOnce(&TestTask, 4, &run_order));
+
+ // Running one task in the host message loop should cause two posted tasks to
+ // get executed.
+ EXPECT_EQ(test_task_runner_->NumPendingTasks(), 1u);
+ test_task_runner_->RunPendingTasks();
+ EXPECT_THAT(run_order, ElementsAre(1, 2));
+
+ // The second task runs the remaining two posted tasks.
+ EXPECT_EQ(test_task_runner_->NumPendingTasks(), 1u);
+ test_task_runner_->RunPendingTasks();
+ EXPECT_THAT(run_order, ElementsAre(1, 2, 3, 4));
+}
+
+class MockTaskObserver : public base::MessageLoop::TaskObserver {
+ public:
+ MOCK_METHOD1(DidProcessTask, void(const base::PendingTask& task));
+ MOCK_METHOD1(WillProcessTask, void(const base::PendingTask& task));
+};
+
+TEST_F(TaskQueueManagerTest, TaskObserverAdding) {
+ InitializeWithRealMessageLoop(1u);
+ MockTaskObserver observer;
+
+ manager_->SetWorkBatchSize(2);
+ manager_->AddTaskObserver(&observer);
+
+ std::vector<EnqueueOrder> run_order;
+ runners_[0]->PostTask(FROM_HERE, base::BindOnce(&TestTask, 1, &run_order));
+ runners_[0]->PostTask(FROM_HERE, base::BindOnce(&TestTask, 2, &run_order));
+
+ EXPECT_CALL(observer, WillProcessTask(_)).Times(2);
+ EXPECT_CALL(observer, DidProcessTask(_)).Times(2);
+ base::RunLoop().RunUntilIdle();
+}
+
+TEST_F(TaskQueueManagerTest, TaskObserverRemoving) {
+ InitializeWithRealMessageLoop(1u);
+ MockTaskObserver observer;
+ manager_->SetWorkBatchSize(2);
+ manager_->AddTaskObserver(&observer);
+ manager_->RemoveTaskObserver(&observer);
+
+ std::vector<EnqueueOrder> run_order;
+ runners_[0]->PostTask(FROM_HERE, base::BindOnce(&TestTask, 1, &run_order));
+
+ EXPECT_CALL(observer, WillProcessTask(_)).Times(0);
+ EXPECT_CALL(observer, DidProcessTask(_)).Times(0);
+
+ base::RunLoop().RunUntilIdle();
+}
+
+void RemoveObserverTask(TaskQueueManagerImpl* manager,
+ base::MessageLoop::TaskObserver* observer) {
+ manager->RemoveTaskObserver(observer);
+}
+
+TEST_F(TaskQueueManagerTest, TaskObserverRemovingInsideTask) {
+ InitializeWithRealMessageLoop(1u);
+ MockTaskObserver observer;
+ manager_->SetWorkBatchSize(3);
+ manager_->AddTaskObserver(&observer);
+
+ runners_[0]->PostTask(FROM_HERE, base::BindOnce(&RemoveObserverTask,
+ manager_.get(), &observer));
+
+ EXPECT_CALL(observer, WillProcessTask(_)).Times(1);
+ EXPECT_CALL(observer, DidProcessTask(_)).Times(0);
+ base::RunLoop().RunUntilIdle();
+}
+
+TEST_F(TaskQueueManagerTest, QueueTaskObserverAdding) {
+ InitializeWithRealMessageLoop(2u);
+ MockTaskObserver observer;
+
+ manager_->SetWorkBatchSize(2);
+ runners_[0]->AddTaskObserver(&observer);
+
+ std::vector<EnqueueOrder> run_order;
+ runners_[0]->PostTask(FROM_HERE, base::BindOnce(&TestTask, 1, &run_order));
+ runners_[1]->PostTask(FROM_HERE, base::BindOnce(&TestTask, 2, &run_order));
+
+ EXPECT_CALL(observer, WillProcessTask(_)).Times(1);
+ EXPECT_CALL(observer, DidProcessTask(_)).Times(1);
+ base::RunLoop().RunUntilIdle();
+}
+
+TEST_F(TaskQueueManagerTest, QueueTaskObserverRemoving) {
+ InitializeWithRealMessageLoop(1u);
+ MockTaskObserver observer;
+ manager_->SetWorkBatchSize(2);
+ runners_[0]->AddTaskObserver(&observer);
+ runners_[0]->RemoveTaskObserver(&observer);
+
+ std::vector<EnqueueOrder> run_order;
+ runners_[0]->PostTask(FROM_HERE, base::BindOnce(&TestTask, 1, &run_order));
+
+ EXPECT_CALL(observer, WillProcessTask(_)).Times(0);
+ EXPECT_CALL(observer, DidProcessTask(_)).Times(0);
+
+ base::RunLoop().RunUntilIdle();
+}
+
+void RemoveQueueObserverTask(scoped_refptr<TaskQueue> queue,
+ base::MessageLoop::TaskObserver* observer) {
+ queue->RemoveTaskObserver(observer);
+}
+
+TEST_F(TaskQueueManagerTest, QueueTaskObserverRemovingInsideTask) {
+ InitializeWithRealMessageLoop(1u);
+ MockTaskObserver observer;
+ runners_[0]->AddTaskObserver(&observer);
+
+ runners_[0]->PostTask(FROM_HERE, base::BindOnce(&RemoveQueueObserverTask,
+ runners_[0], &observer));
+
+ EXPECT_CALL(observer, WillProcessTask(_)).Times(1);
+ EXPECT_CALL(observer, DidProcessTask(_)).Times(0);
+ base::RunLoop().RunUntilIdle();
+}
+
+TEST_F(TaskQueueManagerTest, ThreadCheckAfterTermination) {
+ Initialize(1u);
+ EXPECT_TRUE(runners_[0]->RunsTasksInCurrentSequence());
+ manager_.reset();
+ EXPECT_TRUE(runners_[0]->RunsTasksInCurrentSequence());
+}
+
+TEST_F(TaskQueueManagerTest, TimeDomain_NextScheduledRunTime) {
+ Initialize(2u);
+ now_src_.Advance(base::TimeDelta::FromMicroseconds(10000));
+
+ // With no delayed tasks.
+ base::TimeTicks run_time;
+ EXPECT_FALSE(manager_->GetRealTimeDomain()->NextScheduledRunTime(&run_time));
+
+ // With a non-delayed task.
+ runners_[0]->PostTask(FROM_HERE, base::BindOnce(&NopTask));
+ EXPECT_FALSE(manager_->GetRealTimeDomain()->NextScheduledRunTime(&run_time));
+
+ // With a delayed task.
+ base::TimeDelta expected_delay = base::TimeDelta::FromMilliseconds(50);
+ runners_[0]->PostDelayedTask(FROM_HERE, base::BindOnce(&NopTask),
+ expected_delay);
+ EXPECT_TRUE(manager_->GetRealTimeDomain()->NextScheduledRunTime(&run_time));
+ EXPECT_EQ(now_src_.NowTicks() + expected_delay, run_time);
+
+ // With another delayed task in the same queue with a longer delay.
+ runners_[0]->PostDelayedTask(FROM_HERE, base::BindOnce(&NopTask),
+ base::TimeDelta::FromMilliseconds(100));
+ EXPECT_TRUE(manager_->GetRealTimeDomain()->NextScheduledRunTime(&run_time));
+ EXPECT_EQ(now_src_.NowTicks() + expected_delay, run_time);
+
+ // With another delayed task in the same queue with a shorter delay.
+ expected_delay = base::TimeDelta::FromMilliseconds(20);
+ runners_[0]->PostDelayedTask(FROM_HERE, base::BindOnce(&NopTask),
+ expected_delay);
+ EXPECT_TRUE(manager_->GetRealTimeDomain()->NextScheduledRunTime(&run_time));
+ EXPECT_EQ(now_src_.NowTicks() + expected_delay, run_time);
+
+ // With another delayed task in a different queue with a shorter delay.
+ expected_delay = base::TimeDelta::FromMilliseconds(10);
+ runners_[1]->PostDelayedTask(FROM_HERE, base::BindOnce(&NopTask),
+ expected_delay);
+ EXPECT_TRUE(manager_->GetRealTimeDomain()->NextScheduledRunTime(&run_time));
+ EXPECT_EQ(now_src_.NowTicks() + expected_delay, run_time);
+
+ // Test it updates as time progresses
+ now_src_.Advance(expected_delay);
+ EXPECT_TRUE(manager_->GetRealTimeDomain()->NextScheduledRunTime(&run_time));
+ EXPECT_EQ(now_src_.NowTicks(), run_time);
+}
+
+TEST_F(TaskQueueManagerTest, TimeDomain_NextScheduledRunTime_MultipleQueues) {
+ Initialize(3u);
+
+ base::TimeDelta delay1 = base::TimeDelta::FromMilliseconds(50);
+ base::TimeDelta delay2 = base::TimeDelta::FromMilliseconds(5);
+ base::TimeDelta delay3 = base::TimeDelta::FromMilliseconds(10);
+ runners_[0]->PostDelayedTask(FROM_HERE, base::BindOnce(&NopTask), delay1);
+ runners_[1]->PostDelayedTask(FROM_HERE, base::BindOnce(&NopTask), delay2);
+ runners_[2]->PostDelayedTask(FROM_HERE, base::BindOnce(&NopTask), delay3);
+ runners_[0]->PostTask(FROM_HERE, base::BindOnce(&NopTask));
+
+ base::TimeTicks run_time;
+ EXPECT_TRUE(manager_->GetRealTimeDomain()->NextScheduledRunTime(&run_time));
+ EXPECT_EQ(now_src_.NowTicks() + delay2, run_time);
+}
+
+TEST_F(TaskQueueManagerTest, DeleteTaskQueueManagerInsideATask) {
+ Initialize(1u);
+
+ runners_[0]->PostTask(
+ FROM_HERE, base::BindOnce(&TaskQueueManagerTest::DeleteTaskQueueManager,
+ base::Unretained(this)));
+
+ // This should not crash, assuming DoWork detects the TaskQueueManager has
+ // been deleted.
+ test_task_runner_->RunUntilIdle();
+}
+
+TEST_F(TaskQueueManagerTest, GetAndClearSystemIsQuiescentBit) {
+ Initialize(3u);
+
+ scoped_refptr<TaskQueue> queue0 = CreateTaskQueueWithMonitoredQuiescence();
+ scoped_refptr<TaskQueue> queue1 = CreateTaskQueueWithMonitoredQuiescence();
+ scoped_refptr<TaskQueue> queue2 = CreateTaskQueue();
+
+ EXPECT_TRUE(manager_->GetAndClearSystemIsQuiescentBit());
+
+ queue0->PostTask(FROM_HERE, base::BindOnce(&NopTask));
+ test_task_runner_->RunUntilIdle();
+ EXPECT_FALSE(manager_->GetAndClearSystemIsQuiescentBit());
+ EXPECT_TRUE(manager_->GetAndClearSystemIsQuiescentBit());
+
+ queue1->PostTask(FROM_HERE, base::BindOnce(&NopTask));
+ test_task_runner_->RunUntilIdle();
+ EXPECT_FALSE(manager_->GetAndClearSystemIsQuiescentBit());
+ EXPECT_TRUE(manager_->GetAndClearSystemIsQuiescentBit());
+
+ queue2->PostTask(FROM_HERE, base::BindOnce(&NopTask));
+ test_task_runner_->RunUntilIdle();
+ EXPECT_TRUE(manager_->GetAndClearSystemIsQuiescentBit());
+
+ queue0->PostTask(FROM_HERE, base::BindOnce(&NopTask));
+ queue1->PostTask(FROM_HERE, base::BindOnce(&NopTask));
+ test_task_runner_->RunUntilIdle();
+ EXPECT_FALSE(manager_->GetAndClearSystemIsQuiescentBit());
+ EXPECT_TRUE(manager_->GetAndClearSystemIsQuiescentBit());
+}
+
+TEST_F(TaskQueueManagerTest, HasPendingImmediateWork) {
+ Initialize(1u);
+
+ EXPECT_FALSE(runners_[0]->HasTaskToRunImmediately());
+ runners_[0]->PostTask(FROM_HERE, base::BindOnce(NullTask));
+ EXPECT_TRUE(runners_[0]->HasTaskToRunImmediately());
+
+ test_task_runner_->RunUntilIdle();
+ EXPECT_FALSE(runners_[0]->HasTaskToRunImmediately());
+}
+
+TEST_F(TaskQueueManagerTest, HasPendingImmediateWork_DelayedTasks) {
+ Initialize(1u);
+
+ EXPECT_FALSE(runners_[0]->HasTaskToRunImmediately());
+ runners_[0]->PostDelayedTask(FROM_HERE, base::BindOnce(NullTask),
+ base::TimeDelta::FromMilliseconds(12));
+ EXPECT_FALSE(runners_[0]->HasTaskToRunImmediately());
+
+ // Move time forwards until just before the delayed task should run.
+ now_src_.Advance(base::TimeDelta::FromMilliseconds(10));
+ WakeUpReadyDelayedQueues(LazyNow(&now_src_));
+ EXPECT_FALSE(runners_[0]->HasTaskToRunImmediately());
+
+ // Force the delayed task onto the work queue.
+ now_src_.Advance(base::TimeDelta::FromMilliseconds(2));
+ WakeUpReadyDelayedQueues(LazyNow(&now_src_));
+ EXPECT_TRUE(runners_[0]->HasTaskToRunImmediately());
+
+ test_task_runner_->RunUntilIdle();
+ EXPECT_FALSE(runners_[0]->HasTaskToRunImmediately());
+}
+
+void ExpensiveTestTask(int value,
+ base::SimpleTestTickClock* clock,
+ std::vector<EnqueueOrder>* out_result) {
+ out_result->push_back(value);
+ clock->Advance(base::TimeDelta::FromMilliseconds(1));
+}
+
+TEST_F(TaskQueueManagerTest, ImmediateAndDelayedTaskInterleaving) {
+ Initialize(1u);
+
+ std::vector<EnqueueOrder> run_order;
+ base::TimeDelta delay = base::TimeDelta::FromMilliseconds(10);
+ for (int i = 10; i < 19; i++) {
+ runners_[0]->PostDelayedTask(
+ FROM_HERE, base::BindOnce(&ExpensiveTestTask, i, &now_src_, &run_order),
+ delay);
+ }
+
+ test_task_runner_->RunForPeriod(delay);
+
+ for (int i = 0; i < 9; i++) {
+ runners_[0]->PostTask(FROM_HERE, base::BindOnce(&ExpensiveTestTask, i,
+ &now_src_, &run_order));
+ }
+
+ test_task_runner_->SetAutoAdvanceNowToPendingTasks(true);
+ test_task_runner_->RunUntilIdle();
+
+ // Delayed tasks are not allowed to starve out immediate work which is why
+ // some of the immediate tasks run out of order.
+ int expected_run_order[] = {10, 11, 12, 13, 0, 14, 15, 16, 1,
+ 17, 18, 2, 3, 4, 5, 6, 7, 8};
+ EXPECT_THAT(run_order, ElementsAreArray(expected_run_order));
+}
+
+TEST_F(TaskQueueManagerTest,
+ DelayedTaskDoesNotSkipAHeadOfNonDelayedTask_SameQueue) {
+ Initialize(1u);
+
+ std::vector<EnqueueOrder> run_order;
+ base::TimeDelta delay = base::TimeDelta::FromMilliseconds(10);
+ runners_[0]->PostTask(FROM_HERE, base::BindOnce(&TestTask, 2, &run_order));
+ runners_[0]->PostTask(FROM_HERE, base::BindOnce(&TestTask, 3, &run_order));
+ runners_[0]->PostDelayedTask(FROM_HERE,
+ base::BindOnce(&TestTask, 1, &run_order), delay);
+
+ now_src_.Advance(delay * 2);
+ test_task_runner_->RunUntilIdle();
+
+ EXPECT_THAT(run_order, ElementsAre(2, 3, 1));
+}
+
+TEST_F(TaskQueueManagerTest,
+ DelayedTaskDoesNotSkipAHeadOfNonDelayedTask_DifferentQueues) {
+ Initialize(2u);
+
+ std::vector<EnqueueOrder> run_order;
+ base::TimeDelta delay = base::TimeDelta::FromMilliseconds(10);
+ runners_[1]->PostTask(FROM_HERE, base::BindOnce(&TestTask, 2, &run_order));
+ runners_[1]->PostTask(FROM_HERE, base::BindOnce(&TestTask, 3, &run_order));
+ runners_[0]->PostDelayedTask(FROM_HERE,
+ base::BindOnce(&TestTask, 1, &run_order), delay);
+
+ now_src_.Advance(delay * 2);
+ test_task_runner_->RunUntilIdle();
+
+ EXPECT_THAT(run_order, ElementsAre(2, 3, 1));
+}
+
+TEST_F(TaskQueueManagerTest, DelayedTaskDoesNotSkipAHeadOfShorterDelayedTask) {
+ Initialize(2u);
+
+ std::vector<EnqueueOrder> run_order;
+ base::TimeDelta delay1 = base::TimeDelta::FromMilliseconds(10);
+ base::TimeDelta delay2 = base::TimeDelta::FromMilliseconds(5);
+ runners_[0]->PostDelayedTask(
+ FROM_HERE, base::BindOnce(&TestTask, 1, &run_order), delay1);
+ runners_[1]->PostDelayedTask(
+ FROM_HERE, base::BindOnce(&TestTask, 2, &run_order), delay2);
+
+ now_src_.Advance(delay1 * 2);
+ test_task_runner_->RunUntilIdle();
+
+ EXPECT_THAT(run_order, ElementsAre(2, 1));
+}
+
+void CheckIsNested(bool* is_nested) {
+ *is_nested = base::RunLoop::IsNestedOnCurrentThread();
+}
+
+void PostAndQuitFromNestedRunloop(base::RunLoop* run_loop,
+ base::SingleThreadTaskRunner* runner,
+ bool* was_nested) {
+ base::MessageLoop::ScopedNestableTaskAllower allow(
+ base::MessageLoop::current());
+ runner->PostTask(FROM_HERE, run_loop->QuitClosure());
+ runner->PostTask(FROM_HERE, base::BindOnce(&CheckIsNested, was_nested));
+ run_loop->Run();
+}
+
+TEST_F(TaskQueueManagerTest, QuitWhileNested) {
+ // This test makes sure we don't continue running a work batch after a nested
+ // run loop has been exited in the middle of the batch.
+ InitializeWithRealMessageLoop(1u);
+ manager_->SetWorkBatchSize(2);
+
+ bool was_nested = true;
+ base::RunLoop run_loop;
+ runners_[0]->PostTask(
+ FROM_HERE,
+ base::BindOnce(&PostAndQuitFromNestedRunloop, base::Unretained(&run_loop),
+ base::RetainedRef(runners_[0]),
+ base::Unretained(&was_nested)));
+
+ base::RunLoop().RunUntilIdle();
+ EXPECT_FALSE(was_nested);
+}
+
+class SequenceNumberCapturingTaskObserver
+ : public base::MessageLoop::TaskObserver {
+ public:
+ // MessageLoop::TaskObserver overrides.
+ void WillProcessTask(const base::PendingTask& pending_task) override {}
+ void DidProcessTask(const base::PendingTask& pending_task) override {
+ sequence_numbers_.push_back(pending_task.sequence_num);
+ }
+
+ const std::vector<EnqueueOrder>& sequence_numbers() const {
+ return sequence_numbers_;
+ }
+
+ private:
+ std::vector<EnqueueOrder> sequence_numbers_;
+};
+
+TEST_F(TaskQueueManagerTest, SequenceNumSetWhenTaskIsPosted) {
+ Initialize(1u);
+
+ SequenceNumberCapturingTaskObserver observer;
+ manager_->AddTaskObserver(&observer);
+
+ // Register four tasks that will run in reverse order.
+ std::vector<EnqueueOrder> run_order;
+ runners_[0]->PostDelayedTask(FROM_HERE,
+ base::BindOnce(&TestTask, 1, &run_order),
+ base::TimeDelta::FromMilliseconds(30));
+ runners_[0]->PostDelayedTask(FROM_HERE,
+ base::BindOnce(&TestTask, 2, &run_order),
+ base::TimeDelta::FromMilliseconds(20));
+ runners_[0]->PostDelayedTask(FROM_HERE,
+ base::BindOnce(&TestTask, 3, &run_order),
+ base::TimeDelta::FromMilliseconds(10));
+ runners_[0]->PostTask(FROM_HERE, base::BindOnce(&TestTask, 4, &run_order));
+
+ test_task_runner_->RunForPeriod(base::TimeDelta::FromMilliseconds(40));
+ ASSERT_THAT(run_order, ElementsAre(4, 3, 2, 1));
+
+ // The sequence numbers are a one-based monotonically incrememting counter
+ // which should be set when the task is posted rather than when it's enqueued
+ // onto the Incoming queue. This counter starts with 2.
+ EXPECT_THAT(observer.sequence_numbers(), ElementsAre(5, 4, 3, 2));
+
+ manager_->RemoveTaskObserver(&observer);
+}
+
+TEST_F(TaskQueueManagerTest, NewTaskQueues) {
+ Initialize(1u);
+
+ scoped_refptr<TaskQueue> queue1 = CreateTaskQueue();
+ scoped_refptr<TaskQueue> queue2 = CreateTaskQueue();
+ scoped_refptr<TaskQueue> queue3 = CreateTaskQueue();
+
+ ASSERT_NE(queue1, queue2);
+ ASSERT_NE(queue1, queue3);
+ ASSERT_NE(queue2, queue3);
+
+ std::vector<EnqueueOrder> run_order;
+ queue1->PostTask(FROM_HERE, base::BindOnce(&TestTask, 1, &run_order));
+ queue2->PostTask(FROM_HERE, base::BindOnce(&TestTask, 2, &run_order));
+ queue3->PostTask(FROM_HERE, base::BindOnce(&TestTask, 3, &run_order));
+ test_task_runner_->RunUntilIdle();
+
+ EXPECT_THAT(run_order, ElementsAre(1, 2, 3));
+}
+
+TEST_F(TaskQueueManagerTest, ShutdownTaskQueue) {
+ Initialize(1u);
+
+ scoped_refptr<TaskQueue> queue1 = CreateTaskQueue();
+ scoped_refptr<TaskQueue> queue2 = CreateTaskQueue();
+ scoped_refptr<TaskQueue> queue3 = CreateTaskQueue();
+
+ ASSERT_NE(queue1, queue2);
+ ASSERT_NE(queue1, queue3);
+ ASSERT_NE(queue2, queue3);
+
+ std::vector<EnqueueOrder> run_order;
+ queue1->PostTask(FROM_HERE, base::BindOnce(&TestTask, 1, &run_order));
+ queue2->PostTask(FROM_HERE, base::BindOnce(&TestTask, 2, &run_order));
+ queue3->PostTask(FROM_HERE, base::BindOnce(&TestTask, 3, &run_order));
+
+ queue2->ShutdownTaskQueue();
+ test_task_runner_->RunUntilIdle();
+
+ EXPECT_THAT(run_order, ElementsAre(1, 3));
+}
+
+TEST_F(TaskQueueManagerTest, ShutdownTaskQueue_WithDelayedTasks) {
+ Initialize(2u);
+
+ // Register three delayed tasks
+ std::vector<EnqueueOrder> run_order;
+ runners_[0]->PostDelayedTask(FROM_HERE,
+ base::BindOnce(&TestTask, 1, &run_order),
+ base::TimeDelta::FromMilliseconds(10));
+ runners_[1]->PostDelayedTask(FROM_HERE,
+ base::BindOnce(&TestTask, 2, &run_order),
+ base::TimeDelta::FromMilliseconds(20));
+ runners_[0]->PostDelayedTask(FROM_HERE,
+ base::BindOnce(&TestTask, 3, &run_order),
+ base::TimeDelta::FromMilliseconds(30));
+
+ runners_[1]->ShutdownTaskQueue();
+ test_task_runner_->RunUntilIdle();
+
+ test_task_runner_->RunForPeriod(base::TimeDelta::FromMilliseconds(40));
+ ASSERT_THAT(run_order, ElementsAre(1, 3));
+}
+
+namespace {
+void ShutdownQueue(scoped_refptr<TaskQueue> queue) {
+ queue->ShutdownTaskQueue();
+}
+} // namespace
+
+TEST_F(TaskQueueManagerTest, ShutdownTaskQueue_InTasks) {
+ Initialize(3u);
+
+ std::vector<EnqueueOrder> run_order;
+ runners_[0]->PostTask(FROM_HERE, base::BindOnce(&TestTask, 1, &run_order));
+ runners_[0]->PostTask(FROM_HERE, base::BindOnce(&ShutdownQueue, runners_[1]));
+ runners_[0]->PostTask(FROM_HERE, base::BindOnce(&ShutdownQueue, runners_[2]));
+ runners_[1]->PostTask(FROM_HERE, base::BindOnce(&TestTask, 2, &run_order));
+ runners_[2]->PostTask(FROM_HERE, base::BindOnce(&TestTask, 3, &run_order));
+
+ test_task_runner_->RunUntilIdle();
+ ASSERT_THAT(run_order, ElementsAre(1));
+}
+
+namespace {
+
+class MockObserver : public TaskQueueManager::Observer {
+ public:
+ MOCK_METHOD0(OnTriedToExecuteBlockedTask, void());
+ MOCK_METHOD0(OnBeginNestedRunLoop, void());
+ MOCK_METHOD0(OnExitNestedRunLoop, void());
+};
+
+} // namespace
+
+TEST_F(TaskQueueManagerTest, ShutdownTaskQueueInNestedLoop) {
+ InitializeWithRealMessageLoop(1u);
+
+ // We retain a reference to the task queue even when the manager has deleted
+ // its reference.
+ scoped_refptr<TaskQueue> task_queue = CreateTaskQueue();
+
+ std::vector<bool> log;
+ std::vector<std::pair<base::OnceClosure, bool>>
+ tasks_to_post_from_nested_loop;
+
+ // Inside a nested run loop, call task_queue->ShutdownTaskQueue, bookended
+ // by calls to HasOneRefTask to make sure the manager doesn't release its
+ // reference until the nested run loop exits.
+ // NB: This first HasOneRefTask is a sanity check.
+ tasks_to_post_from_nested_loop.push_back(
+ std::make_pair(base::BindOnce(&NopTask), true));
+ tasks_to_post_from_nested_loop.push_back(
+ std::make_pair(base::BindOnce(&TaskQueue::ShutdownTaskQueue,
+ base::Unretained(task_queue.get())),
+ true));
+ tasks_to_post_from_nested_loop.push_back(
+ std::make_pair(base::BindOnce(&NopTask), true));
+ runners_[0]->PostTask(
+ FROM_HERE,
+ base::BindOnce(&PostFromNestedRunloop, message_loop_.get(),
+ base::RetainedRef(runners_[0]),
+ base::Unretained(&tasks_to_post_from_nested_loop)));
+ base::RunLoop().RunUntilIdle();
+
+ // Just make sure that we don't crash.
+}
+
+TEST_F(TaskQueueManagerTest, TimeDomainsAreIndependant) {
+ Initialize(2u);
+
+ base::TimeTicks start_time_ticks = manager_->NowTicks();
+ std::unique_ptr<VirtualTimeDomain> domain_a(
+ new VirtualTimeDomain(start_time_ticks));
+ std::unique_ptr<VirtualTimeDomain> domain_b(
+ new VirtualTimeDomain(start_time_ticks));
+ manager_->RegisterTimeDomain(domain_a.get());
+ manager_->RegisterTimeDomain(domain_b.get());
+ runners_[0]->SetTimeDomain(domain_a.get());
+ runners_[1]->SetTimeDomain(domain_b.get());
+
+ std::vector<EnqueueOrder> run_order;
+ runners_[0]->PostDelayedTask(FROM_HERE,
+ base::BindOnce(&TestTask, 1, &run_order),
+ base::TimeDelta::FromMilliseconds(10));
+ runners_[0]->PostDelayedTask(FROM_HERE,
+ base::BindOnce(&TestTask, 2, &run_order),
+ base::TimeDelta::FromMilliseconds(20));
+ runners_[0]->PostDelayedTask(FROM_HERE,
+ base::BindOnce(&TestTask, 3, &run_order),
+ base::TimeDelta::FromMilliseconds(30));
+
+ runners_[1]->PostDelayedTask(FROM_HERE,
+ base::BindOnce(&TestTask, 4, &run_order),
+ base::TimeDelta::FromMilliseconds(10));
+ runners_[1]->PostDelayedTask(FROM_HERE,
+ base::BindOnce(&TestTask, 5, &run_order),
+ base::TimeDelta::FromMilliseconds(20));
+ runners_[1]->PostDelayedTask(FROM_HERE,
+ base::BindOnce(&TestTask, 6, &run_order),
+ base::TimeDelta::FromMilliseconds(30));
+
+ domain_b->AdvanceNowTo(start_time_ticks +
+ base::TimeDelta::FromMilliseconds(50));
+ manager_->MaybeScheduleImmediateWork(FROM_HERE);
+
+ test_task_runner_->RunUntilIdle();
+ EXPECT_THAT(run_order, ElementsAre(4, 5, 6));
+
+ domain_a->AdvanceNowTo(start_time_ticks +
+ base::TimeDelta::FromMilliseconds(50));
+ manager_->MaybeScheduleImmediateWork(FROM_HERE);
+
+ test_task_runner_->RunUntilIdle();
+ EXPECT_THAT(run_order, ElementsAre(4, 5, 6, 1, 2, 3));
+
+ runners_[0]->ShutdownTaskQueue();
+ runners_[1]->ShutdownTaskQueue();
+
+ manager_->UnregisterTimeDomain(domain_a.get());
+ manager_->UnregisterTimeDomain(domain_b.get());
+}
+
+TEST_F(TaskQueueManagerTest, TimeDomainMigration) {
+ Initialize(1u);
+
+ base::TimeTicks start_time_ticks = manager_->NowTicks();
+ std::unique_ptr<VirtualTimeDomain> domain_a(
+ new VirtualTimeDomain(start_time_ticks));
+ manager_->RegisterTimeDomain(domain_a.get());
+ runners_[0]->SetTimeDomain(domain_a.get());
+
+ std::vector<EnqueueOrder> run_order;
+ runners_[0]->PostDelayedTask(FROM_HERE,
+ base::BindOnce(&TestTask, 1, &run_order),
+ base::TimeDelta::FromMilliseconds(10));
+ runners_[0]->PostDelayedTask(FROM_HERE,
+ base::BindOnce(&TestTask, 2, &run_order),
+ base::TimeDelta::FromMilliseconds(20));
+ runners_[0]->PostDelayedTask(FROM_HERE,
+ base::BindOnce(&TestTask, 3, &run_order),
+ base::TimeDelta::FromMilliseconds(30));
+ runners_[0]->PostDelayedTask(FROM_HERE,
+ base::BindOnce(&TestTask, 4, &run_order),
+ base::TimeDelta::FromMilliseconds(40));
+
+ domain_a->AdvanceNowTo(start_time_ticks +
+ base::TimeDelta::FromMilliseconds(20));
+ manager_->MaybeScheduleImmediateWork(FROM_HERE);
+ test_task_runner_->RunUntilIdle();
+ EXPECT_THAT(run_order, ElementsAre(1, 2));
+
+ std::unique_ptr<VirtualTimeDomain> domain_b(
+ new VirtualTimeDomain(start_time_ticks));
+ manager_->RegisterTimeDomain(domain_b.get());
+ runners_[0]->SetTimeDomain(domain_b.get());
+
+ domain_b->AdvanceNowTo(start_time_ticks +
+ base::TimeDelta::FromMilliseconds(50));
+ manager_->MaybeScheduleImmediateWork(FROM_HERE);
+
+ test_task_runner_->RunUntilIdle();
+ EXPECT_THAT(run_order, ElementsAre(1, 2, 3, 4));
+
+ runners_[0]->ShutdownTaskQueue();
+
+ manager_->UnregisterTimeDomain(domain_a.get());
+ manager_->UnregisterTimeDomain(domain_b.get());
+}
+
+TEST_F(TaskQueueManagerTest, TimeDomainMigrationWithIncomingImmediateTasks) {
+ Initialize(1u);
+
+ base::TimeTicks start_time_ticks = manager_->NowTicks();
+ std::unique_ptr<VirtualTimeDomain> domain_a(
+ new VirtualTimeDomain(start_time_ticks));
+ std::unique_ptr<VirtualTimeDomain> domain_b(
+ new VirtualTimeDomain(start_time_ticks));
+ manager_->RegisterTimeDomain(domain_a.get());
+ manager_->RegisterTimeDomain(domain_b.get());
+
+ runners_[0]->SetTimeDomain(domain_a.get());
+ std::vector<EnqueueOrder> run_order;
+ runners_[0]->PostTask(FROM_HERE, base::BindOnce(&TestTask, 1, &run_order));
+ runners_[0]->SetTimeDomain(domain_b.get());
+
+ test_task_runner_->RunUntilIdle();
+ EXPECT_THAT(run_order, ElementsAre(1));
+
+ runners_[0]->ShutdownTaskQueue();
+
+ manager_->UnregisterTimeDomain(domain_a.get());
+ manager_->UnregisterTimeDomain(domain_b.get());
+}
+
+TEST_F(TaskQueueManagerTest,
+ PostDelayedTasksReverseOrderAlternatingTimeDomains) {
+ Initialize(1u);
+
+ std::vector<EnqueueOrder> run_order;
+
+ std::unique_ptr<RealTimeDomain> domain_a(new RealTimeDomain());
+ std::unique_ptr<RealTimeDomain> domain_b(new RealTimeDomain());
+ manager_->RegisterTimeDomain(domain_a.get());
+ manager_->RegisterTimeDomain(domain_b.get());
+
+ runners_[0]->SetTimeDomain(domain_a.get());
+ runners_[0]->PostDelayedTask(FROM_HERE,
+ base::BindOnce(&TestTask, 1, &run_order),
+ base::TimeDelta::FromMilliseconds(40));
+
+ runners_[0]->SetTimeDomain(domain_b.get());
+ runners_[0]->PostDelayedTask(FROM_HERE,
+ base::BindOnce(&TestTask, 2, &run_order),
+ base::TimeDelta::FromMilliseconds(30));
+
+ runners_[0]->SetTimeDomain(domain_a.get());
+ runners_[0]->PostDelayedTask(FROM_HERE,
+ base::BindOnce(&TestTask, 3, &run_order),
+ base::TimeDelta::FromMilliseconds(20));
+
+ runners_[0]->SetTimeDomain(domain_b.get());
+ runners_[0]->PostDelayedTask(FROM_HERE,
+ base::BindOnce(&TestTask, 4, &run_order),
+ base::TimeDelta::FromMilliseconds(10));
+
+ test_task_runner_->RunForPeriod(base::TimeDelta::FromMilliseconds(40));
+ EXPECT_THAT(run_order, ElementsAre(4, 3, 2, 1));
+
+ runners_[0]->ShutdownTaskQueue();
+
+ manager_->UnregisterTimeDomain(domain_a.get());
+ manager_->UnregisterTimeDomain(domain_b.get());
+}
+
+namespace {
+
+class MockTaskQueueObserver : public TaskQueue::Observer {
+ public:
+ ~MockTaskQueueObserver() override = default;
+
+ MOCK_METHOD2(OnQueueNextWakeUpChanged, void(TaskQueue*, base::TimeTicks));
+};
+
+} // namespace
+
+TEST_F(TaskQueueManagerTest, TaskQueueObserver_ImmediateTask) {
+ Initialize(1u);
+
+ MockTaskQueueObserver observer;
+ runners_[0]->SetObserver(&observer);
+
+ // We should get a notification when a task is posted on an empty queue.
+ EXPECT_CALL(observer, OnQueueNextWakeUpChanged(runners_[0].get(), _));
+ runners_[0]->PostTask(FROM_HERE, base::BindOnce(&NopTask));
+ Mock::VerifyAndClearExpectations(&observer);
+
+ // But not subsequently.
+ EXPECT_CALL(observer, OnQueueNextWakeUpChanged(_, _)).Times(0);
+ runners_[0]->PostTask(FROM_HERE, base::BindOnce(&NopTask));
+ Mock::VerifyAndClearExpectations(&observer);
+
+ // Unless the immediate work queue is emptied.
+ runners_[0]->GetTaskQueueImpl()->ReloadImmediateWorkQueueIfEmpty();
+ EXPECT_CALL(observer, OnQueueNextWakeUpChanged(runners_[0].get(), _));
+ runners_[0]->PostTask(FROM_HERE, base::BindOnce(&NopTask));
+
+ // Tidy up.
+ runners_[0]->ShutdownTaskQueue();
+}
+
+TEST_F(TaskQueueManagerTest, TaskQueueObserver_DelayedTask) {
+ Initialize(1u);
+
+ base::TimeTicks start_time = manager_->NowTicks();
+ base::TimeDelta delay10s(base::TimeDelta::FromSeconds(10));
+ base::TimeDelta delay100s(base::TimeDelta::FromSeconds(100));
+ base::TimeDelta delay1s(base::TimeDelta::FromSeconds(1));
+
+ MockTaskQueueObserver observer;
+ runners_[0]->SetObserver(&observer);
+
+ // We should get a notification when a delayed task is posted on an empty
+ // queue.
+ EXPECT_CALL(observer, OnQueueNextWakeUpChanged(runners_[0].get(),
+ start_time + delay10s));
+ runners_[0]->PostDelayedTask(FROM_HERE, base::BindOnce(&NopTask), delay10s);
+ Mock::VerifyAndClearExpectations(&observer);
+
+ // We should not get a notification for a longer delay.
+ EXPECT_CALL(observer, OnQueueNextWakeUpChanged(_, _)).Times(0);
+ runners_[0]->PostDelayedTask(FROM_HERE, base::BindOnce(&NopTask), delay100s);
+ Mock::VerifyAndClearExpectations(&observer);
+
+ // We should get a notification for a shorter delay.
+ EXPECT_CALL(observer, OnQueueNextWakeUpChanged(runners_[0].get(),
+ start_time + delay1s));
+ runners_[0]->PostDelayedTask(FROM_HERE, base::BindOnce(&NopTask), delay1s);
+ Mock::VerifyAndClearExpectations(&observer);
+
+ std::unique_ptr<TaskQueue::QueueEnabledVoter> voter =
+ runners_[0]->CreateQueueEnabledVoter();
+ voter->SetQueueEnabled(false);
+ Mock::VerifyAndClearExpectations(&observer);
+
+ // When a queue has been enabled, we may get a notification if the
+ // TimeDomain's next scheduled wake-up has changed.
+ EXPECT_CALL(observer, OnQueueNextWakeUpChanged(runners_[0].get(),
+ start_time + delay1s));
+ voter->SetQueueEnabled(true);
+ Mock::VerifyAndClearExpectations(&observer);
+
+ // Tidy up.
+ runners_[0]->ShutdownTaskQueue();
+}
+
+TEST_F(TaskQueueManagerTest, TaskQueueObserver_DelayedTaskMultipleQueues) {
+ Initialize(2u);
+
+ MockTaskQueueObserver observer;
+ runners_[0]->SetObserver(&observer);
+ runners_[1]->SetObserver(&observer);
+
+ base::TimeTicks start_time = manager_->NowTicks();
+ base::TimeDelta delay1s(base::TimeDelta::FromSeconds(1));
+ base::TimeDelta delay10s(base::TimeDelta::FromSeconds(10));
+
+ EXPECT_CALL(observer,
+ OnQueueNextWakeUpChanged(runners_[0].get(), start_time + delay1s))
+ .Times(1);
+ EXPECT_CALL(observer, OnQueueNextWakeUpChanged(runners_[1].get(),
+ start_time + delay10s))
+ .Times(1);
+ runners_[0]->PostDelayedTask(FROM_HERE, base::BindOnce(&NopTask), delay1s);
+ runners_[1]->PostDelayedTask(FROM_HERE, base::BindOnce(&NopTask), delay10s);
+ testing::Mock::VerifyAndClearExpectations(&observer);
+
+ std::unique_ptr<TaskQueue::QueueEnabledVoter> voter0 =
+ runners_[0]->CreateQueueEnabledVoter();
+ std::unique_ptr<TaskQueue::QueueEnabledVoter> voter1 =
+ runners_[1]->CreateQueueEnabledVoter();
+
+ // Disabling a queue should not trigger a notification.
+ EXPECT_CALL(observer, OnQueueNextWakeUpChanged(_, _)).Times(0);
+ voter0->SetQueueEnabled(false);
+ Mock::VerifyAndClearExpectations(&observer);
+
+ // Re-enabling it should should also trigger a notification.
+ EXPECT_CALL(observer, OnQueueNextWakeUpChanged(runners_[0].get(),
+ start_time + delay1s));
+ voter0->SetQueueEnabled(true);
+ Mock::VerifyAndClearExpectations(&observer);
+
+ // Disabling a queue should not trigger a notification.
+ EXPECT_CALL(observer, OnQueueNextWakeUpChanged(_, _)).Times(0);
+ voter1->SetQueueEnabled(false);
+ Mock::VerifyAndClearExpectations(&observer);
+
+ // Re-enabling it should should trigger a notification.
+ EXPECT_CALL(observer, OnQueueNextWakeUpChanged(runners_[1].get(),
+ start_time + delay10s));
+ voter1->SetQueueEnabled(true);
+ Mock::VerifyAndClearExpectations(&observer);
+
+ // Tidy up.
+ EXPECT_CALL(observer, OnQueueNextWakeUpChanged(_, _)).Times(AnyNumber());
+ runners_[0]->ShutdownTaskQueue();
+ runners_[1]->ShutdownTaskQueue();
+}
+
+TEST_F(TaskQueueManagerTest, TaskQueueObserver_DelayedWorkWhichCanRunNow) {
+ // This test checks that when delayed work becomes available
+ // the notification still fires. This usually happens when time advances
+ // and task becomes available in the middle of the scheduling code.
+ // For this test we rely on the fact that notification dispatching code
+ // is the same in all conditions and just change a time domain to
+ // trigger notification.
+
+ Initialize(1u);
+
+ base::TimeDelta delay1s(base::TimeDelta::FromSeconds(1));
+ base::TimeDelta delay10s(base::TimeDelta::FromSeconds(10));
+
+ MockTaskQueueObserver observer;
+ runners_[0]->SetObserver(&observer);
+
+ // We should get a notification when a delayed task is posted on an empty
+ // queue.
+ EXPECT_CALL(observer, OnQueueNextWakeUpChanged(_, _));
+ runners_[0]->PostDelayedTask(FROM_HERE, base::BindOnce(&NopTask), delay1s);
+ Mock::VerifyAndClearExpectations(&observer);
+
+ std::unique_ptr<TimeDomain> mock_time_domain =
+ std::make_unique<RealTimeDomain>();
+ manager_->RegisterTimeDomain(mock_time_domain.get());
+
+ now_src_.Advance(delay10s);
+
+ EXPECT_CALL(observer, OnQueueNextWakeUpChanged(_, _));
+ runners_[0]->SetTimeDomain(mock_time_domain.get());
+ Mock::VerifyAndClearExpectations(&observer);
+
+ // Tidy up.
+ runners_[0]->ShutdownTaskQueue();
+}
+
+class CancelableTask {
+ public:
+ explicit CancelableTask(const base::TickClock* clock)
+ : clock_(clock), weak_factory_(this) {}
+
+ void RecordTimeTask(std::vector<base::TimeTicks>* run_times) {
+ run_times->push_back(clock_->NowTicks());
+ }
+
+ const base::TickClock* clock_;
+ base::WeakPtrFactory<CancelableTask> weak_factory_;
+};
+
+TEST_F(TaskQueueManagerTest, TaskQueueObserver_SweepCanceledDelayedTasks) {
+ Initialize(1u);
+
+ MockTaskQueueObserver observer;
+ runners_[0]->SetObserver(&observer);
+
+ base::TimeTicks start_time = manager_->NowTicks();
+ base::TimeDelta delay1(base::TimeDelta::FromSeconds(5));
+ base::TimeDelta delay2(base::TimeDelta::FromSeconds(10));
+
+ EXPECT_CALL(observer,
+ OnQueueNextWakeUpChanged(runners_[0].get(), start_time + delay1))
+ .Times(1);
+
+ CancelableTask task1(&now_src_);
+ CancelableTask task2(&now_src_);
+ std::vector<base::TimeTicks> run_times;
+ runners_[0]->PostDelayedTask(
+ FROM_HERE,
+ base::BindOnce(&CancelableTask::RecordTimeTask,
+ task1.weak_factory_.GetWeakPtr(), &run_times),
+ delay1);
+ runners_[0]->PostDelayedTask(
+ FROM_HERE,
+ base::BindOnce(&CancelableTask::RecordTimeTask,
+ task2.weak_factory_.GetWeakPtr(), &run_times),
+ delay2);
+
+ task1.weak_factory_.InvalidateWeakPtrs();
+
+ // Sweeping away canceled delayed tasks should trigger a notification.
+ EXPECT_CALL(observer,
+ OnQueueNextWakeUpChanged(runners_[0].get(), start_time + delay2))
+ .Times(1);
+ manager_->SweepCanceledDelayedTasks();
+}
+
+namespace {
+void ChromiumRunloopInspectionTask(
+ scoped_refptr<cc::OrderedSimpleTaskRunner> test_task_runner) {
+ EXPECT_EQ(1u, test_task_runner->NumPendingTasks());
+}
+} // namespace
+
+TEST_F(TaskQueueManagerTest, NumberOfPendingTasksOnChromiumRunLoop) {
+ Initialize(1u);
+
+ // NOTE because tasks posted to the chromiumrun loop are not cancellable, we
+ // will end up with a lot more tasks posted if the delayed tasks were posted
+ // in the reverse order.
+ // TODO(alexclarke): Consider talking to the message pump directly.
+ test_task_runner_->SetAutoAdvanceNowToPendingTasks(true);
+ for (int i = 1; i < 100; i++) {
+ runners_[0]->PostDelayedTask(
+ FROM_HERE,
+ base::BindOnce(&ChromiumRunloopInspectionTask, test_task_runner_),
+ base::TimeDelta::FromMilliseconds(i));
+ }
+ test_task_runner_->RunUntilIdle();
+}
+
+namespace {
+
+class QuadraticTask {
+ public:
+ QuadraticTask(scoped_refptr<TaskQueue> task_queue,
+ base::TimeDelta delay,
+ base::SimpleTestTickClock* now_src)
+ : count_(0), task_queue_(task_queue), delay_(delay), now_src_(now_src) {}
+
+ void SetShouldExit(base::RepeatingCallback<bool()> should_exit) {
+ should_exit_ = should_exit;
+ }
+
+ void Run() {
+ if (should_exit_.Run())
+ return;
+ count_++;
+ task_queue_->PostDelayedTask(
+ FROM_HERE, base::BindOnce(&QuadraticTask::Run, base::Unretained(this)),
+ delay_);
+ task_queue_->PostDelayedTask(
+ FROM_HERE, base::BindOnce(&QuadraticTask::Run, base::Unretained(this)),
+ delay_);
+ now_src_->Advance(base::TimeDelta::FromMilliseconds(5));
+ }
+
+ int Count() const { return count_; }
+
+ private:
+ int count_;
+ scoped_refptr<TaskQueue> task_queue_;
+ base::TimeDelta delay_;
+ base::RepeatingCallback<bool()> should_exit_;
+ base::SimpleTestTickClock* now_src_;
+};
+
+class LinearTask {
+ public:
+ LinearTask(scoped_refptr<TaskQueue> task_queue,
+ base::TimeDelta delay,
+ base::SimpleTestTickClock* now_src)
+ : count_(0), task_queue_(task_queue), delay_(delay), now_src_(now_src) {}
+
+ void SetShouldExit(base::RepeatingCallback<bool()> should_exit) {
+ should_exit_ = should_exit;
+ }
+
+ void Run() {
+ if (should_exit_.Run())
+ return;
+ count_++;
+ task_queue_->PostDelayedTask(
+ FROM_HERE, base::BindOnce(&LinearTask::Run, base::Unretained(this)),
+ delay_);
+ now_src_->Advance(base::TimeDelta::FromMilliseconds(5));
+ }
+
+ int Count() const { return count_; }
+
+ private:
+ int count_;
+ scoped_refptr<TaskQueue> task_queue_;
+ base::TimeDelta delay_;
+ base::RepeatingCallback<bool()> should_exit_;
+ base::SimpleTestTickClock* now_src_;
+};
+
+bool ShouldExit(QuadraticTask* quadratic_task, LinearTask* linear_task) {
+ return quadratic_task->Count() == 1000 || linear_task->Count() == 1000;
+}
+
+} // namespace
+
+TEST_F(TaskQueueManagerTest,
+ DelayedTasksDontBadlyStarveNonDelayedWork_SameQueue) {
+ Initialize(1u);
+
+ QuadraticTask quadratic_delayed_task(
+ runners_[0], base::TimeDelta::FromMilliseconds(10), &now_src_);
+ LinearTask linear_immediate_task(runners_[0], base::TimeDelta(), &now_src_);
+ base::RepeatingCallback<bool()> should_exit = base::BindRepeating(
+ ShouldExit, &quadratic_delayed_task, &linear_immediate_task);
+ quadratic_delayed_task.SetShouldExit(should_exit);
+ linear_immediate_task.SetShouldExit(should_exit);
+
+ quadratic_delayed_task.Run();
+ linear_immediate_task.Run();
+
+ test_task_runner_->SetAutoAdvanceNowToPendingTasks(true);
+ test_task_runner_->RunUntilIdle();
+
+ double ratio = static_cast<double>(linear_immediate_task.Count()) /
+ static_cast<double>(quadratic_delayed_task.Count());
+
+ EXPECT_GT(ratio, 0.333);
+ EXPECT_LT(ratio, 1.1);
+}
+
+TEST_F(TaskQueueManagerTest, ImmediateWorkCanStarveDelayedTasks_SameQueue) {
+ Initialize(1u);
+
+ QuadraticTask quadratic_immediate_task(runners_[0], base::TimeDelta(),
+ &now_src_);
+ LinearTask linear_delayed_task(
+ runners_[0], base::TimeDelta::FromMilliseconds(10), &now_src_);
+ base::RepeatingCallback<bool()> should_exit = base::BindRepeating(
+ &ShouldExit, &quadratic_immediate_task, &linear_delayed_task);
+
+ quadratic_immediate_task.SetShouldExit(should_exit);
+ linear_delayed_task.SetShouldExit(should_exit);
+
+ quadratic_immediate_task.Run();
+ linear_delayed_task.Run();
+
+ test_task_runner_->SetAutoAdvanceNowToPendingTasks(true);
+ test_task_runner_->RunUntilIdle();
+
+ double ratio = static_cast<double>(linear_delayed_task.Count()) /
+ static_cast<double>(quadratic_immediate_task.Count());
+
+ // This is by design, we want to enforce a strict ordering in task execution
+ // where by delayed tasks can not skip ahead of non-delayed work.
+ EXPECT_GT(ratio, 0.0);
+ EXPECT_LT(ratio, 0.1);
+}
+
+TEST_F(TaskQueueManagerTest,
+ DelayedTasksDontBadlyStarveNonDelayedWork_DifferentQueue) {
+ Initialize(2u);
+
+ QuadraticTask quadratic_delayed_task(
+ runners_[0], base::TimeDelta::FromMilliseconds(10), &now_src_);
+ LinearTask linear_immediate_task(runners_[1], base::TimeDelta(), &now_src_);
+ base::RepeatingCallback<bool()> should_exit = base::BindRepeating(
+ ShouldExit, &quadratic_delayed_task, &linear_immediate_task);
+ quadratic_delayed_task.SetShouldExit(should_exit);
+ linear_immediate_task.SetShouldExit(should_exit);
+
+ quadratic_delayed_task.Run();
+ linear_immediate_task.Run();
+
+ test_task_runner_->SetAutoAdvanceNowToPendingTasks(true);
+ test_task_runner_->RunUntilIdle();
+
+ double ratio = static_cast<double>(linear_immediate_task.Count()) /
+ static_cast<double>(quadratic_delayed_task.Count());
+
+ EXPECT_GT(ratio, 0.333);
+ EXPECT_LT(ratio, 1.1);
+}
+
+TEST_F(TaskQueueManagerTest,
+ ImmediateWorkCanStarveDelayedTasks_DifferentQueue) {
+ Initialize(2u);
+
+ QuadraticTask quadratic_immediate_task(runners_[0], base::TimeDelta(),
+ &now_src_);
+ LinearTask linear_delayed_task(
+ runners_[1], base::TimeDelta::FromMilliseconds(10), &now_src_);
+ base::RepeatingCallback<bool()> should_exit = base::BindRepeating(
+ &ShouldExit, &quadratic_immediate_task, &linear_delayed_task);
+
+ quadratic_immediate_task.SetShouldExit(should_exit);
+ linear_delayed_task.SetShouldExit(should_exit);
+
+ quadratic_immediate_task.Run();
+ linear_delayed_task.Run();
+
+ test_task_runner_->SetAutoAdvanceNowToPendingTasks(true);
+ test_task_runner_->RunUntilIdle();
+
+ double ratio = static_cast<double>(linear_delayed_task.Count()) /
+ static_cast<double>(quadratic_immediate_task.Count());
+
+ // This is by design, we want to enforce a strict ordering in task execution
+ // where by delayed tasks can not skip ahead of non-delayed work.
+ EXPECT_GT(ratio, 0.0);
+ EXPECT_LT(ratio, 0.1);
+}
+
+TEST_F(TaskQueueManagerTest, CurrentlyExecutingTaskQueue_NoTaskRunning) {
+ Initialize(1u);
+
+ EXPECT_EQ(nullptr, manager_->currently_executing_task_queue());
+}
+
+namespace {
+void CurrentlyExecutingTaskQueueTestTask(
+ TaskQueueManagerImpl* task_queue_manager,
+ std::vector<internal::TaskQueueImpl*>* task_sources) {
+ task_sources->push_back(task_queue_manager->currently_executing_task_queue());
+}
+} // namespace
+
+TEST_F(TaskQueueManagerTest, CurrentlyExecutingTaskQueue_TaskRunning) {
+ Initialize(2u);
+
+ TestTaskQueue* queue0 = runners_[0].get();
+ TestTaskQueue* queue1 = runners_[1].get();
+
+ std::vector<internal::TaskQueueImpl*> task_sources;
+ queue0->PostTask(FROM_HERE,
+ base::BindOnce(&CurrentlyExecutingTaskQueueTestTask,
+ manager_.get(), &task_sources));
+ queue1->PostTask(FROM_HERE,
+ base::BindOnce(&CurrentlyExecutingTaskQueueTestTask,
+ manager_.get(), &task_sources));
+ test_task_runner_->RunUntilIdle();
+
+ EXPECT_THAT(task_sources, ElementsAre(queue0->GetTaskQueueImpl(),
+ queue1->GetTaskQueueImpl()));
+ EXPECT_EQ(nullptr, manager_->currently_executing_task_queue());
+}
+
+namespace {
+void RunloopCurrentlyExecutingTaskQueueTestTask(
+ base::MessageLoop* message_loop,
+ TaskQueueManagerImpl* task_queue_manager,
+ std::vector<internal::TaskQueueImpl*>* task_sources,
+ std::vector<std::pair<base::OnceClosure, TestTaskQueue*>>* tasks) {
+ base::MessageLoop::ScopedNestableTaskAllower allow(message_loop);
+ task_sources->push_back(task_queue_manager->currently_executing_task_queue());
+
+ for (std::pair<base::OnceClosure, TestTaskQueue*>& pair : *tasks) {
+ pair.second->PostTask(FROM_HERE, std::move(pair.first));
+ }
+
+ base::RunLoop().RunUntilIdle();
+ task_sources->push_back(task_queue_manager->currently_executing_task_queue());
+}
+} // namespace
+
+TEST_F(TaskQueueManagerTest, CurrentlyExecutingTaskQueue_NestedLoop) {
+ InitializeWithRealMessageLoop(3u);
+
+ TestTaskQueue* queue0 = runners_[0].get();
+ TestTaskQueue* queue1 = runners_[1].get();
+ TestTaskQueue* queue2 = runners_[2].get();
+
+ std::vector<internal::TaskQueueImpl*> task_sources;
+ std::vector<std::pair<base::OnceClosure, TestTaskQueue*>>
+ tasks_to_post_from_nested_loop;
+ tasks_to_post_from_nested_loop.push_back(
+ std::make_pair(base::BindOnce(&CurrentlyExecutingTaskQueueTestTask,
+ manager_.get(), &task_sources),
+ queue1));
+ tasks_to_post_from_nested_loop.push_back(
+ std::make_pair(base::BindOnce(&CurrentlyExecutingTaskQueueTestTask,
+ manager_.get(), &task_sources),
+ queue2));
+
+ queue0->PostTask(
+ FROM_HERE,
+ base::BindOnce(&RunloopCurrentlyExecutingTaskQueueTestTask,
+ message_loop_.get(), manager_.get(), &task_sources,
+ &tasks_to_post_from_nested_loop));
+
+ base::RunLoop().RunUntilIdle();
+ EXPECT_THAT(
+ task_sources,
+ ElementsAre(queue0->GetTaskQueueImpl(), queue1->GetTaskQueueImpl(),
+ queue2->GetTaskQueueImpl(), queue0->GetTaskQueueImpl()));
+ EXPECT_EQ(nullptr, manager_->currently_executing_task_queue());
+}
+
+TEST_F(TaskQueueManagerTest, BlameContextAttribution) {
+ using trace_analyzer::Query;
+
+ InitializeWithRealMessageLoop(1u);
+ TestTaskQueue* queue = runners_[0].get();
+
+ trace_analyzer::Start("*");
+ {
+ base::trace_event::BlameContext blame_context("cat", "name", "type",
+ "scope", 0, nullptr);
+ blame_context.Initialize();
+ queue->SetBlameContext(&blame_context);
+ queue->PostTask(FROM_HERE, base::BindOnce(&NopTask));
+ base::RunLoop().RunUntilIdle();
+ }
+ auto analyzer = trace_analyzer::Stop();
+
+ trace_analyzer::TraceEventVector events;
+ Query q = Query::EventPhaseIs(TRACE_EVENT_PHASE_ENTER_CONTEXT) ||
+ Query::EventPhaseIs(TRACE_EVENT_PHASE_LEAVE_CONTEXT);
+ analyzer->FindEvents(q, &events);
+
+ EXPECT_EQ(2u, events.size());
+}
+
+TEST_F(TaskQueueManagerTest, NoWakeUpsForCanceledDelayedTasks) {
+ Initialize(1u);
+
+ base::TimeTicks start_time = manager_->NowTicks();
+
+ CancelableTask task1(&now_src_);
+ CancelableTask task2(&now_src_);
+ CancelableTask task3(&now_src_);
+ CancelableTask task4(&now_src_);
+ base::TimeDelta delay1(base::TimeDelta::FromSeconds(5));
+ base::TimeDelta delay2(base::TimeDelta::FromSeconds(10));
+ base::TimeDelta delay3(base::TimeDelta::FromSeconds(15));
+ base::TimeDelta delay4(base::TimeDelta::FromSeconds(30));
+ std::vector<base::TimeTicks> run_times;
+ runners_[0]->PostDelayedTask(
+ FROM_HERE,
+ base::BindOnce(&CancelableTask::RecordTimeTask,
+ task1.weak_factory_.GetWeakPtr(), &run_times),
+ delay1);
+ runners_[0]->PostDelayedTask(
+ FROM_HERE,
+ base::BindOnce(&CancelableTask::RecordTimeTask,
+ task2.weak_factory_.GetWeakPtr(), &run_times),
+ delay2);
+ runners_[0]->PostDelayedTask(
+ FROM_HERE,
+ base::BindOnce(&CancelableTask::RecordTimeTask,
+ task3.weak_factory_.GetWeakPtr(), &run_times),
+ delay3);
+ runners_[0]->PostDelayedTask(
+ FROM_HERE,
+ base::BindOnce(&CancelableTask::RecordTimeTask,
+ task4.weak_factory_.GetWeakPtr(), &run_times),
+ delay4);
+
+ task2.weak_factory_.InvalidateWeakPtrs();
+ task3.weak_factory_.InvalidateWeakPtrs();
+
+ std::set<base::TimeTicks> wake_up_times;
+
+ RunUntilIdle(base::BindRepeating(
+ [](std::set<base::TimeTicks>* wake_up_times,
+ base::SimpleTestTickClock* clock) {
+ wake_up_times->insert(clock->NowTicks());
+ },
+ &wake_up_times, &now_src_));
+
+ EXPECT_THAT(wake_up_times,
+ ElementsAre(start_time + delay1, start_time + delay4));
+ EXPECT_THAT(run_times, ElementsAre(start_time + delay1, start_time + delay4));
+}
+
+TEST_F(TaskQueueManagerTest, NoWakeUpsForCanceledDelayedTasksReversePostOrder) {
+ Initialize(1u);
+
+ base::TimeTicks start_time = manager_->NowTicks();
+
+ CancelableTask task1(&now_src_);
+ CancelableTask task2(&now_src_);
+ CancelableTask task3(&now_src_);
+ CancelableTask task4(&now_src_);
+ base::TimeDelta delay1(base::TimeDelta::FromSeconds(5));
+ base::TimeDelta delay2(base::TimeDelta::FromSeconds(10));
+ base::TimeDelta delay3(base::TimeDelta::FromSeconds(15));
+ base::TimeDelta delay4(base::TimeDelta::FromSeconds(30));
+ std::vector<base::TimeTicks> run_times;
+ runners_[0]->PostDelayedTask(
+ FROM_HERE,
+ base::BindOnce(&CancelableTask::RecordTimeTask,
+ task4.weak_factory_.GetWeakPtr(), &run_times),
+ delay4);
+ runners_[0]->PostDelayedTask(
+ FROM_HERE,
+ base::BindOnce(&CancelableTask::RecordTimeTask,
+ task3.weak_factory_.GetWeakPtr(), &run_times),
+ delay3);
+ runners_[0]->PostDelayedTask(
+ FROM_HERE,
+ base::BindOnce(&CancelableTask::RecordTimeTask,
+ task2.weak_factory_.GetWeakPtr(), &run_times),
+ delay2);
+ runners_[0]->PostDelayedTask(
+ FROM_HERE,
+ base::BindOnce(&CancelableTask::RecordTimeTask,
+ task1.weak_factory_.GetWeakPtr(), &run_times),
+ delay1);
+
+ task2.weak_factory_.InvalidateWeakPtrs();
+ task3.weak_factory_.InvalidateWeakPtrs();
+
+ std::set<base::TimeTicks> wake_up_times;
+
+ RunUntilIdle(base::BindRepeating(
+ [](std::set<base::TimeTicks>* wake_up_times,
+ base::SimpleTestTickClock* clock) {
+ wake_up_times->insert(clock->NowTicks());
+ },
+ &wake_up_times, &now_src_));
+
+ EXPECT_THAT(wake_up_times,
+ ElementsAre(start_time + delay1, start_time + delay4));
+ EXPECT_THAT(run_times, ElementsAre(start_time + delay1, start_time + delay4));
+}
+
+TEST_F(TaskQueueManagerTest, TimeDomainWakeUpOnlyCancelledIfAllUsesCancelled) {
+ Initialize(1u);
+
+ base::TimeTicks start_time = manager_->NowTicks();
+
+ CancelableTask task1(&now_src_);
+ CancelableTask task2(&now_src_);
+ CancelableTask task3(&now_src_);
+ CancelableTask task4(&now_src_);
+ base::TimeDelta delay1(base::TimeDelta::FromSeconds(5));
+ base::TimeDelta delay2(base::TimeDelta::FromSeconds(10));
+ base::TimeDelta delay3(base::TimeDelta::FromSeconds(15));
+ base::TimeDelta delay4(base::TimeDelta::FromSeconds(30));
+ std::vector<base::TimeTicks> run_times;
+ runners_[0]->PostDelayedTask(
+ FROM_HERE,
+ base::BindOnce(&CancelableTask::RecordTimeTask,
+ task1.weak_factory_.GetWeakPtr(), &run_times),
+ delay1);
+ runners_[0]->PostDelayedTask(
+ FROM_HERE,
+ base::BindOnce(&CancelableTask::RecordTimeTask,
+ task2.weak_factory_.GetWeakPtr(), &run_times),
+ delay2);
+ runners_[0]->PostDelayedTask(
+ FROM_HERE,
+ base::BindOnce(&CancelableTask::RecordTimeTask,
+ task3.weak_factory_.GetWeakPtr(), &run_times),
+ delay3);
+ runners_[0]->PostDelayedTask(
+ FROM_HERE,
+ base::BindOnce(&CancelableTask::RecordTimeTask,
+ task4.weak_factory_.GetWeakPtr(), &run_times),
+ delay4);
+
+ // Post a non-canceled task with |delay3|. So we should still get a wake-up at
+ // |delay3| even though we cancel |task3|.
+ runners_[0]->PostDelayedTask(
+ FROM_HERE,
+ base::BindOnce(&CancelableTask::RecordTimeTask, base::Unretained(&task3),
+ &run_times),
+ delay3);
+
+ task2.weak_factory_.InvalidateWeakPtrs();
+ task3.weak_factory_.InvalidateWeakPtrs();
+ task1.weak_factory_.InvalidateWeakPtrs();
+
+ std::set<base::TimeTicks> wake_up_times;
+
+ RunUntilIdle(base::BindRepeating(
+ [](std::set<base::TimeTicks>* wake_up_times,
+ base::SimpleTestTickClock* clock) {
+ wake_up_times->insert(clock->NowTicks());
+ },
+ &wake_up_times, &now_src_));
+
+ EXPECT_THAT(wake_up_times,
+ ElementsAre(start_time + delay1, start_time + delay3,
+ start_time + delay4));
+
+ EXPECT_THAT(run_times, ElementsAre(start_time + delay3, start_time + delay4));
+}
+
+TEST_F(TaskQueueManagerTest, TaskQueueVoters) {
+ Initialize(1u);
+
+ // The task queue should be initially enabled.
+ EXPECT_TRUE(runners_[0]->IsQueueEnabled());
+
+ std::unique_ptr<TaskQueue::QueueEnabledVoter> voter1 =
+ runners_[0]->CreateQueueEnabledVoter();
+ std::unique_ptr<TaskQueue::QueueEnabledVoter> voter2 =
+ runners_[0]->CreateQueueEnabledVoter();
+ std::unique_ptr<TaskQueue::QueueEnabledVoter> voter3 =
+ runners_[0]->CreateQueueEnabledVoter();
+ std::unique_ptr<TaskQueue::QueueEnabledVoter> voter4 =
+ runners_[0]->CreateQueueEnabledVoter();
+
+ // Voters should initially vote for the queue to be enabled.
+ EXPECT_TRUE(runners_[0]->IsQueueEnabled());
+
+ // If any voter wants to disable, the queue is disabled.
+ voter1->SetQueueEnabled(false);
+ EXPECT_FALSE(runners_[0]->IsQueueEnabled());
+
+ // If the voter is deleted then the queue should be re-enabled.
+ voter1.reset();
+ EXPECT_TRUE(runners_[0]->IsQueueEnabled());
+
+ // If any of the remaining voters wants to disable, the queue should be
+ // disabled.
+ voter2->SetQueueEnabled(false);
+ EXPECT_FALSE(runners_[0]->IsQueueEnabled());
+
+ // If another queue votes to disable, nothing happens because it's already
+ // disabled.
+ voter3->SetQueueEnabled(false);
+ EXPECT_FALSE(runners_[0]->IsQueueEnabled());
+
+ // There are two votes to disable, so one of them voting to enable does
+ // nothing.
+ voter2->SetQueueEnabled(true);
+ EXPECT_FALSE(runners_[0]->IsQueueEnabled());
+
+ // IF all queues vote to enable then the queue is enabled.
+ voter3->SetQueueEnabled(true);
+ EXPECT_TRUE(runners_[0]->IsQueueEnabled());
+}
+
+TEST_F(TaskQueueManagerTest, ShutdownQueueBeforeEnabledVoterDeleted) {
+ Initialize(1u);
+
+ scoped_refptr<TaskQueue> queue = CreateTaskQueue();
+
+ std::unique_ptr<TaskQueue::QueueEnabledVoter> voter =
+ queue->CreateQueueEnabledVoter();
+
+ voter->SetQueueEnabled(true); // NOP
+ queue->ShutdownTaskQueue();
+
+ // This should complete without DCHECKing.
+ voter.reset();
+}
+
+TEST_F(TaskQueueManagerTest, ShutdownQueueBeforeDisabledVoterDeleted) {
+ Initialize(1u);
+
+ scoped_refptr<TaskQueue> queue = CreateTaskQueue();
+
+ std::unique_ptr<TaskQueue::QueueEnabledVoter> voter =
+ queue->CreateQueueEnabledVoter();
+
+ voter->SetQueueEnabled(false);
+ queue->ShutdownTaskQueue();
+
+ // This should complete without DCHECKing.
+ voter.reset();
+}
+
+TEST_F(TaskQueueManagerTest, SweepCanceledDelayedTasks) {
+ Initialize(1u);
+
+ CancelableTask task1(&now_src_);
+ CancelableTask task2(&now_src_);
+ CancelableTask task3(&now_src_);
+ CancelableTask task4(&now_src_);
+ base::TimeDelta delay1(base::TimeDelta::FromSeconds(5));
+ base::TimeDelta delay2(base::TimeDelta::FromSeconds(10));
+ base::TimeDelta delay3(base::TimeDelta::FromSeconds(15));
+ base::TimeDelta delay4(base::TimeDelta::FromSeconds(30));
+ std::vector<base::TimeTicks> run_times;
+ runners_[0]->PostDelayedTask(
+ FROM_HERE,
+ base::BindOnce(&CancelableTask::RecordTimeTask,
+ task1.weak_factory_.GetWeakPtr(), &run_times),
+ delay1);
+ runners_[0]->PostDelayedTask(
+ FROM_HERE,
+ base::BindOnce(&CancelableTask::RecordTimeTask,
+ task2.weak_factory_.GetWeakPtr(), &run_times),
+ delay2);
+ runners_[0]->PostDelayedTask(
+ FROM_HERE,
+ base::BindOnce(&CancelableTask::RecordTimeTask,
+ task3.weak_factory_.GetWeakPtr(), &run_times),
+ delay3);
+ runners_[0]->PostDelayedTask(
+ FROM_HERE,
+ base::BindOnce(&CancelableTask::RecordTimeTask,
+ task4.weak_factory_.GetWeakPtr(), &run_times),
+ delay4);
+
+ EXPECT_EQ(4u, runners_[0]->GetNumberOfPendingTasks());
+ task2.weak_factory_.InvalidateWeakPtrs();
+ task3.weak_factory_.InvalidateWeakPtrs();
+ EXPECT_EQ(4u, runners_[0]->GetNumberOfPendingTasks());
+
+ manager_->SweepCanceledDelayedTasks();
+ EXPECT_EQ(2u, runners_[0]->GetNumberOfPendingTasks());
+
+ task1.weak_factory_.InvalidateWeakPtrs();
+ task4.weak_factory_.InvalidateWeakPtrs();
+
+ manager_->SweepCanceledDelayedTasks();
+ EXPECT_EQ(0u, runners_[0]->GetNumberOfPendingTasks());
+}
+
+TEST_F(TaskQueueManagerTest, DelayTillNextTask) {
+ Initialize(2u);
+
+ LazyNow lazy_now(&now_src_);
+ EXPECT_EQ(base::TimeDelta::Max(), manager_->DelayTillNextTask(&lazy_now));
+
+ runners_[0]->PostDelayedTask(FROM_HERE, base::BindOnce(&NopTask),
+ base::TimeDelta::FromSeconds(10));
+
+ EXPECT_EQ(base::TimeDelta::FromSeconds(10),
+ manager_->DelayTillNextTask(&lazy_now));
+
+ runners_[1]->PostDelayedTask(FROM_HERE, base::BindOnce(&NopTask),
+ base::TimeDelta::FromSeconds(15));
+
+ EXPECT_EQ(base::TimeDelta::FromSeconds(10),
+ manager_->DelayTillNextTask(&lazy_now));
+
+ runners_[1]->PostDelayedTask(FROM_HERE, base::BindOnce(&NopTask),
+ base::TimeDelta::FromSeconds(5));
+
+ EXPECT_EQ(base::TimeDelta::FromSeconds(5),
+ manager_->DelayTillNextTask(&lazy_now));
+
+ runners_[0]->PostTask(FROM_HERE, base::BindOnce(&NopTask));
+
+ EXPECT_EQ(base::TimeDelta(), manager_->DelayTillNextTask(&lazy_now));
+}
+
+TEST_F(TaskQueueManagerTest, DelayTillNextTask_Disabled) {
+ Initialize(1u);
+
+ std::unique_ptr<TaskQueue::QueueEnabledVoter> voter =
+ runners_[0]->CreateQueueEnabledVoter();
+ voter->SetQueueEnabled(false);
+ runners_[0]->PostTask(FROM_HERE, base::BindOnce(&NopTask));
+
+ LazyNow lazy_now(&now_src_);
+ EXPECT_EQ(base::TimeDelta::Max(), manager_->DelayTillNextTask(&lazy_now));
+}
+
+TEST_F(TaskQueueManagerTest, DelayTillNextTask_Fence) {
+ Initialize(1u);
+
+ runners_[0]->InsertFence(TaskQueue::InsertFencePosition::kNow);
+ runners_[0]->PostTask(FROM_HERE, base::BindOnce(&NopTask));
+
+ LazyNow lazy_now(&now_src_);
+ EXPECT_EQ(base::TimeDelta::Max(), manager_->DelayTillNextTask(&lazy_now));
+}
+
+TEST_F(TaskQueueManagerTest, DelayTillNextTask_FenceUnblocking) {
+ Initialize(1u);
+
+ runners_[0]->InsertFence(TaskQueue::InsertFencePosition::kNow);
+ runners_[0]->PostTask(FROM_HERE, base::BindOnce(&NopTask));
+ runners_[0]->InsertFence(TaskQueue::InsertFencePosition::kNow);
+
+ LazyNow lazy_now(&now_src_);
+ EXPECT_EQ(base::TimeDelta(), manager_->DelayTillNextTask(&lazy_now));
+}
+
+TEST_F(TaskQueueManagerTest, DelayTillNextTask_DelayedTaskReady) {
+ Initialize(1u);
+
+ runners_[0]->PostDelayedTask(FROM_HERE, base::BindOnce(&NopTask),
+ base::TimeDelta::FromSeconds(1));
+
+ now_src_.Advance(base::TimeDelta::FromSeconds(10));
+
+ LazyNow lazy_now(&now_src_);
+ EXPECT_EQ(base::TimeDelta(), manager_->DelayTillNextTask(&lazy_now));
+}
+
+namespace {
+void MessageLoopTaskWithDelayedQuit(base::MessageLoop* message_loop,
+ base::SimpleTestTickClock* now_src,
+ scoped_refptr<TaskQueue> task_queue) {
+ base::MessageLoop::ScopedNestableTaskAllower allow(message_loop);
+ base::RunLoop run_loop;
+ task_queue->PostDelayedTask(FROM_HERE, run_loop.QuitClosure(),
+ base::TimeDelta::FromMilliseconds(100));
+ now_src->Advance(base::TimeDelta::FromMilliseconds(200));
+ run_loop.Run();
+}
+} // namespace
+
+TEST_F(TaskQueueManagerTest, DelayedTaskRunsInNestedMessageLoop) {
+ InitializeWithRealMessageLoop(1u);
+ base::RunLoop run_loop;
+ runners_[0]->PostTask(
+ FROM_HERE,
+ base::BindOnce(&MessageLoopTaskWithDelayedQuit, message_loop_.get(),
+ &now_src_, base::RetainedRef(runners_[0])));
+ run_loop.RunUntilIdle();
+}
+
+namespace {
+void MessageLoopTaskWithImmediateQuit(base::MessageLoop* message_loop,
+ base::OnceClosure non_nested_quit_closure,
+ scoped_refptr<TaskQueue> task_queue) {
+ base::MessageLoop::ScopedNestableTaskAllower allow(message_loop);
+
+ base::RunLoop run_loop;
+ // Needed because entering the nested run loop causes a DoWork to get
+ // posted.
+ task_queue->PostTask(FROM_HERE, base::BindOnce(&NopTask));
+ task_queue->PostTask(FROM_HERE, run_loop.QuitClosure());
+ run_loop.Run();
+ std::move(non_nested_quit_closure).Run();
+}
+} // namespace
+
+TEST_F(TaskQueueManagerTest,
+ DelayedNestedMessageLoopDoesntPreventTasksRunning) {
+ InitializeWithRealMessageLoop(1u);
+ base::RunLoop run_loop;
+ runners_[0]->PostDelayedTask(
+ FROM_HERE,
+ base::BindOnce(&MessageLoopTaskWithImmediateQuit, message_loop_.get(),
+ run_loop.QuitClosure(), base::RetainedRef(runners_[0])),
+ base::TimeDelta::FromMilliseconds(100));
+
+ now_src_.Advance(base::TimeDelta::FromMilliseconds(200));
+ run_loop.Run();
+}
+
+TEST_F(TaskQueueManagerTest, CouldTaskRun_DisableAndReenable) {
+ Initialize(1u);
+
+ EnqueueOrder enqueue_order = GetNextSequenceNumber();
+ EXPECT_TRUE(runners_[0]->GetTaskQueueImpl()->CouldTaskRun(enqueue_order));
+
+ std::unique_ptr<TaskQueue::QueueEnabledVoter> voter =
+ runners_[0]->CreateQueueEnabledVoter();
+ voter->SetQueueEnabled(false);
+ EXPECT_FALSE(runners_[0]->GetTaskQueueImpl()->CouldTaskRun(enqueue_order));
+
+ voter->SetQueueEnabled(true);
+ EXPECT_TRUE(runners_[0]->GetTaskQueueImpl()->CouldTaskRun(enqueue_order));
+}
+
+TEST_F(TaskQueueManagerTest, CouldTaskRun_Fence) {
+ Initialize(1u);
+
+ EnqueueOrder enqueue_order = GetNextSequenceNumber();
+ EXPECT_TRUE(runners_[0]->GetTaskQueueImpl()->CouldTaskRun(enqueue_order));
+
+ runners_[0]->InsertFence(TaskQueue::InsertFencePosition::kNow);
+ EXPECT_TRUE(runners_[0]->GetTaskQueueImpl()->CouldTaskRun(enqueue_order));
+
+ runners_[0]->InsertFence(TaskQueue::InsertFencePosition::kBeginningOfTime);
+ EXPECT_FALSE(runners_[0]->GetTaskQueueImpl()->CouldTaskRun(enqueue_order));
+
+ runners_[0]->RemoveFence();
+ EXPECT_TRUE(runners_[0]->GetTaskQueueImpl()->CouldTaskRun(enqueue_order));
+}
+
+TEST_F(TaskQueueManagerTest, CouldTaskRun_FenceBeforeThenAfter) {
+ Initialize(1u);
+
+ runners_[0]->InsertFence(TaskQueue::InsertFencePosition::kNow);
+
+ EnqueueOrder enqueue_order = GetNextSequenceNumber();
+ EXPECT_FALSE(runners_[0]->GetTaskQueueImpl()->CouldTaskRun(enqueue_order));
+
+ runners_[0]->InsertFence(TaskQueue::InsertFencePosition::kNow);
+ EXPECT_TRUE(runners_[0]->GetTaskQueueImpl()->CouldTaskRun(enqueue_order));
+}
+
+TEST_F(TaskQueueManagerTest, DelayedDoWorkNotPostedForDisabledQueue) {
+ Initialize(1u);
+
+ runners_[0]->PostDelayedTask(FROM_HERE, base::BindOnce(&NopTask),
+ base::TimeDelta::FromMilliseconds(1));
+ ASSERT_TRUE(test_task_runner_->HasPendingTasks());
+ EXPECT_EQ(base::TimeDelta::FromMilliseconds(1),
+ test_task_runner_->DelayToNextTaskTime());
+
+ std::unique_ptr<TaskQueue::QueueEnabledVoter> voter =
+ runners_[0]->CreateQueueEnabledVoter();
+ voter->SetQueueEnabled(false);
+
+ EXPECT_TRUE(test_task_runner_->HasPendingTasks());
+ test_task_runner_->RemoveCancelledTasks();
+ EXPECT_FALSE(test_task_runner_->HasPendingTasks());
+
+ voter->SetQueueEnabled(true);
+ ASSERT_TRUE(test_task_runner_->HasPendingTasks());
+ EXPECT_EQ(base::TimeDelta::FromMilliseconds(1),
+ test_task_runner_->DelayToNextTaskTime());
+}
+
+TEST_F(TaskQueueManagerTest, DisablingQueuesChangesDelayTillNextDoWork) {
+ Initialize(3u);
+ runners_[0]->PostDelayedTask(FROM_HERE, base::BindOnce(&NopTask),
+ base::TimeDelta::FromMilliseconds(1));
+ runners_[1]->PostDelayedTask(FROM_HERE, base::BindOnce(&NopTask),
+ base::TimeDelta::FromMilliseconds(10));
+ runners_[2]->PostDelayedTask(FROM_HERE, base::BindOnce(&NopTask),
+ base::TimeDelta::FromMilliseconds(100));
+
+ std::unique_ptr<TaskQueue::QueueEnabledVoter> voter0 =
+ runners_[0]->CreateQueueEnabledVoter();
+ std::unique_ptr<TaskQueue::QueueEnabledVoter> voter1 =
+ runners_[1]->CreateQueueEnabledVoter();
+ std::unique_ptr<TaskQueue::QueueEnabledVoter> voter2 =
+ runners_[2]->CreateQueueEnabledVoter();
+
+ ASSERT_TRUE(test_task_runner_->HasPendingTasks());
+ EXPECT_EQ(base::TimeDelta::FromMilliseconds(1),
+ test_task_runner_->DelayToNextTaskTime());
+
+ voter0->SetQueueEnabled(false);
+ test_task_runner_->RemoveCancelledTasks();
+ ASSERT_TRUE(test_task_runner_->HasPendingTasks());
+ EXPECT_EQ(base::TimeDelta::FromMilliseconds(10),
+ test_task_runner_->DelayToNextTaskTime());
+
+ voter1->SetQueueEnabled(false);
+ test_task_runner_->RemoveCancelledTasks();
+ ASSERT_TRUE(test_task_runner_->HasPendingTasks());
+ EXPECT_EQ(base::TimeDelta::FromMilliseconds(100),
+ test_task_runner_->DelayToNextTaskTime());
+
+ voter2->SetQueueEnabled(false);
+ test_task_runner_->RemoveCancelledTasks();
+ EXPECT_FALSE(test_task_runner_->HasPendingTasks());
+}
+
+TEST_F(TaskQueueManagerTest, GetNextScheduledWakeUp) {
+ Initialize(1u);
+
+ EXPECT_EQ(base::nullopt, runners_[0]->GetNextScheduledWakeUp());
+
+ base::TimeTicks start_time = manager_->NowTicks();
+ base::TimeDelta delay1 = base::TimeDelta::FromMilliseconds(10);
+ base::TimeDelta delay2 = base::TimeDelta::FromMilliseconds(2);
+
+ runners_[0]->PostDelayedTask(FROM_HERE, base::BindOnce(&NopTask), delay1);
+ EXPECT_EQ(start_time + delay1, runners_[0]->GetNextScheduledWakeUp());
+
+ runners_[0]->PostDelayedTask(FROM_HERE, base::BindOnce(&NopTask), delay2);
+ EXPECT_EQ(start_time + delay2, runners_[0]->GetNextScheduledWakeUp());
+
+ // We don't have wake-ups scheduled for disabled queues.
+ std::unique_ptr<TaskQueue::QueueEnabledVoter> voter =
+ runners_[0]->CreateQueueEnabledVoter();
+ voter->SetQueueEnabled(false);
+ EXPECT_EQ(base::nullopt, runners_[0]->GetNextScheduledWakeUp());
+
+ voter->SetQueueEnabled(true);
+ EXPECT_EQ(start_time + delay2, runners_[0]->GetNextScheduledWakeUp());
+
+ // Immediate tasks shouldn't make any difference.
+ runners_[0]->PostTask(FROM_HERE, base::BindOnce(&NopTask));
+ EXPECT_EQ(start_time + delay2, runners_[0]->GetNextScheduledWakeUp());
+
+ // Neither should fences.
+ runners_[0]->InsertFence(TaskQueue::InsertFencePosition::kBeginningOfTime);
+ EXPECT_EQ(start_time + delay2, runners_[0]->GetNextScheduledWakeUp());
+}
+
+TEST_F(TaskQueueManagerTest, SetTimeDomainForDisabledQueue) {
+ Initialize(1u);
+
+ MockTaskQueueObserver observer;
+ runners_[0]->SetObserver(&observer);
+
+ runners_[0]->PostDelayedTask(FROM_HERE, base::BindOnce(&NopTask),
+ base::TimeDelta::FromMilliseconds(1));
+
+ std::unique_ptr<TaskQueue::QueueEnabledVoter> voter =
+ runners_[0]->CreateQueueEnabledVoter();
+ voter->SetQueueEnabled(false);
+
+ // We should not get a notification for a disabled queue.
+ EXPECT_CALL(observer, OnQueueNextWakeUpChanged(_, _)).Times(0);
+
+ std::unique_ptr<VirtualTimeDomain> domain(
+ new VirtualTimeDomain(manager_->NowTicks()));
+ manager_->RegisterTimeDomain(domain.get());
+ runners_[0]->SetTimeDomain(domain.get());
+
+ // Tidy up.
+ runners_[0]->ShutdownTaskQueue();
+ manager_->UnregisterTimeDomain(domain.get());
+}
+
+namespace {
+void SetOnTaskHandlers(scoped_refptr<TestTaskQueue> task_queue,
+ int* start_counter,
+ int* complete_counter) {
+ task_queue->GetTaskQueueImpl()->SetOnTaskStartedHandler(
+ base::BindRepeating([](int* counter, const TaskQueue::Task& task,
+ base::TimeTicks start) { ++(*counter); },
+ start_counter));
+ task_queue->GetTaskQueueImpl()->SetOnTaskCompletedHandler(base::BindRepeating(
+ [](int* counter, const TaskQueue::Task& task, base::TimeTicks start,
+ base::TimeTicks end,
+ base::Optional<base::TimeDelta> thread_time) { ++(*counter); },
+ complete_counter));
+}
+
+void UnsetOnTaskHandlers(scoped_refptr<TestTaskQueue> task_queue) {
+ task_queue->GetTaskQueueImpl()->SetOnTaskStartedHandler(
+ base::RepeatingCallback<void(const TaskQueue::Task& task,
+ base::TimeTicks start)>());
+ task_queue->GetTaskQueueImpl()->SetOnTaskCompletedHandler(
+ base::RepeatingCallback<void(
+ const TaskQueue::Task& task, base::TimeTicks start,
+ base::TimeTicks end, base::Optional<base::TimeDelta> thread_time)>());
+}
+} // namespace
+
+TEST_F(TaskQueueManagerTest, ProcessTasksWithoutTaskTimeObservers) {
+ Initialize(1u);
+ int start_counter = 0;
+ int complete_counter = 0;
+ std::vector<EnqueueOrder> run_order;
+ SetOnTaskHandlers(runners_[0], &start_counter, &complete_counter);
+ EXPECT_TRUE(runners_[0]->GetTaskQueueImpl()->RequiresTaskTiming());
+ runners_[0]->PostTask(FROM_HERE, base::BindOnce(&TestTask, 1, &run_order));
+ runners_[0]->PostTask(FROM_HERE, base::BindOnce(&TestTask, 2, &run_order));
+ runners_[0]->PostTask(FROM_HERE, base::BindOnce(&TestTask, 3, &run_order));
+ test_task_runner_->RunUntilIdle();
+ EXPECT_EQ(start_counter, 3);
+ EXPECT_EQ(complete_counter, 3);
+ EXPECT_THAT(run_order, ElementsAre(1, 2, 3));
+
+ UnsetOnTaskHandlers(runners_[0]);
+ EXPECT_FALSE(runners_[0]->GetTaskQueueImpl()->RequiresTaskTiming());
+ runners_[0]->PostTask(FROM_HERE, base::BindOnce(&TestTask, 4, &run_order));
+ runners_[0]->PostTask(FROM_HERE, base::BindOnce(&TestTask, 5, &run_order));
+ runners_[0]->PostTask(FROM_HERE, base::BindOnce(&TestTask, 6, &run_order));
+ test_task_runner_->RunUntilIdle();
+ EXPECT_EQ(start_counter, 3);
+ EXPECT_EQ(complete_counter, 3);
+ EXPECT_THAT(run_order, ElementsAre(1, 2, 3, 4, 5, 6));
+}
+
+TEST_F(TaskQueueManagerTest, ProcessTasksWithTaskTimeObservers) {
+ Initialize(1u);
+ int start_counter = 0;
+ int complete_counter = 0;
+
+ manager_->AddTaskTimeObserver(&test_task_time_observer_);
+ SetOnTaskHandlers(runners_[0], &start_counter, &complete_counter);
+ EXPECT_TRUE(runners_[0]->GetTaskQueueImpl()->RequiresTaskTiming());
+ std::vector<EnqueueOrder> run_order;
+ runners_[0]->PostTask(FROM_HERE, base::BindOnce(&TestTask, 1, &run_order));
+ runners_[0]->PostTask(FROM_HERE, base::BindOnce(&TestTask, 2, &run_order));
+ test_task_runner_->RunUntilIdle();
+ EXPECT_EQ(start_counter, 2);
+ EXPECT_EQ(complete_counter, 2);
+ EXPECT_THAT(run_order, ElementsAre(1, 2));
+
+ UnsetOnTaskHandlers(runners_[0]);
+ EXPECT_FALSE(runners_[0]->GetTaskQueueImpl()->RequiresTaskTiming());
+ runners_[0]->PostTask(FROM_HERE, base::BindOnce(&TestTask, 3, &run_order));
+ runners_[0]->PostTask(FROM_HERE, base::BindOnce(&TestTask, 4, &run_order));
+ test_task_runner_->RunUntilIdle();
+ EXPECT_EQ(start_counter, 2);
+ EXPECT_EQ(complete_counter, 2);
+ EXPECT_THAT(run_order, ElementsAre(1, 2, 3, 4));
+
+ manager_->RemoveTaskTimeObserver(&test_task_time_observer_);
+ runners_[0]->PostTask(FROM_HERE, base::BindOnce(&TestTask, 5, &run_order));
+ runners_[0]->PostTask(FROM_HERE, base::BindOnce(&TestTask, 6, &run_order));
+ test_task_runner_->RunUntilIdle();
+ EXPECT_EQ(start_counter, 2);
+ EXPECT_EQ(complete_counter, 2);
+ EXPECT_FALSE(runners_[0]->GetTaskQueueImpl()->RequiresTaskTiming());
+ EXPECT_THAT(run_order, ElementsAre(1, 2, 3, 4, 5, 6));
+
+ SetOnTaskHandlers(runners_[0], &start_counter, &complete_counter);
+ runners_[0]->PostTask(FROM_HERE, base::BindOnce(&TestTask, 7, &run_order));
+ runners_[0]->PostTask(FROM_HERE, base::BindOnce(&TestTask, 8, &run_order));
+ test_task_runner_->RunUntilIdle();
+ EXPECT_EQ(start_counter, 4);
+ EXPECT_EQ(complete_counter, 4);
+ EXPECT_TRUE(runners_[0]->GetTaskQueueImpl()->RequiresTaskTiming());
+ EXPECT_THAT(run_order, ElementsAre(1, 2, 3, 4, 5, 6, 7, 8));
+ UnsetOnTaskHandlers(runners_[0]);
+}
+
+TEST_F(TaskQueueManagerTest, GracefulShutdown) {
+ Initialize(0u);
+ test_task_runner_->SetAutoAdvanceNowToPendingTasks(true);
+
+ std::vector<base::TimeTicks> run_times;
+ scoped_refptr<TestTaskQueue> main_tq = CreateTaskQueue();
+ base::WeakPtr<TestTaskQueue> main_tq_weak_ptr = main_tq->GetWeakPtr();
+
+ EXPECT_EQ(1u, manager_->ActiveQueuesCount());
+ EXPECT_EQ(0u, manager_->QueuesToShutdownCount());
+ EXPECT_EQ(0u, manager_->QueuesToDeleteCount());
+
+ for (int i = 1; i <= 5; ++i) {
+ main_tq->PostDelayedTask(
+ FROM_HERE, base::BindOnce(&RecordTimeTask, &run_times, &now_src_),
+ base::TimeDelta::FromMilliseconds(i * 100));
+ }
+ test_task_runner_->RunForPeriod(base::TimeDelta::FromMilliseconds(250));
+
+ main_tq = nullptr;
+ // Ensure that task queue went away.
+ EXPECT_FALSE(main_tq_weak_ptr.get());
+
+ test_task_runner_->RunForPeriod(base::TimeDelta::FromMilliseconds(1));
+
+ EXPECT_EQ(1u, manager_->ActiveQueuesCount());
+ EXPECT_EQ(1u, manager_->QueuesToShutdownCount());
+ EXPECT_EQ(0u, manager_->QueuesToDeleteCount());
+
+ test_task_runner_->RunUntilIdle();
+
+ // Even with TaskQueue gone, tasks are executed.
+ EXPECT_THAT(
+ run_times,
+ ElementsAre(base::TimeTicks() + base::TimeDelta::FromMilliseconds(101),
+ base::TimeTicks() + base::TimeDelta::FromMilliseconds(201),
+ base::TimeTicks() + base::TimeDelta::FromMilliseconds(301),
+ base::TimeTicks() + base::TimeDelta::FromMilliseconds(401),
+ base::TimeTicks() + base::TimeDelta::FromMilliseconds(501)));
+
+ EXPECT_EQ(0u, manager_->ActiveQueuesCount());
+ EXPECT_EQ(0u, manager_->QueuesToShutdownCount());
+ EXPECT_EQ(0u, manager_->QueuesToDeleteCount());
+}
+
+TEST_F(TaskQueueManagerTest, GracefulShutdown_ManagerDeletedInFlight) {
+ Initialize(0u);
+ test_task_runner_->SetAutoAdvanceNowToPendingTasks(true);
+
+ std::vector<base::TimeTicks> run_times;
+ scoped_refptr<TestTaskQueue> control_tq = CreateTaskQueue();
+ std::vector<scoped_refptr<TestTaskQueue>> main_tqs;
+ std::vector<base::WeakPtr<TestTaskQueue>> main_tq_weak_ptrs;
+
+ // There might be a race condition - async task queues should be unregistered
+ // first. Increase the number of task queues to surely detect that.
+ // The problem is that pointers are compared in a set and generally for
+ // a small number of allocations value of the pointers increases
+ // monotonically. 100 is large enough to force allocations from different
+ // pages.
+ const int N = 100;
+ for (int i = 0; i < N; ++i) {
+ scoped_refptr<TestTaskQueue> tq = CreateTaskQueue();
+ main_tq_weak_ptrs.push_back(tq->GetWeakPtr());
+ main_tqs.push_back(std::move(tq));
+ }
+
+ for (int i = 1; i <= 5; ++i) {
+ main_tqs[0]->PostDelayedTask(
+ FROM_HERE, base::BindOnce(&RecordTimeTask, &run_times, &now_src_),
+ base::TimeDelta::FromMilliseconds(i * 100));
+ }
+ test_task_runner_->RunForPeriod(base::TimeDelta::FromMilliseconds(250));
+
+ main_tqs.clear();
+ // Ensure that task queues went away.
+ for (int i = 0; i < N; ++i) {
+ EXPECT_FALSE(main_tq_weak_ptrs[i].get());
+ }
+
+ // No leaks should occur when TQM was destroyed before processing
+ // shutdown task and TaskQueueImpl should be safely deleted on a correct
+ // thread.
+ manager_.reset();
+
+ test_task_runner_->RunUntilIdle();
+
+ EXPECT_THAT(
+ run_times,
+ ElementsAre(base::TimeTicks() + base::TimeDelta::FromMilliseconds(101),
+ base::TimeTicks() + base::TimeDelta::FromMilliseconds(201)));
+}
+
+TEST_F(TaskQueueManagerTest,
+ GracefulShutdown_ManagerDeletedWithQueuesToShutdown) {
+ Initialize(0u);
+ test_task_runner_->SetAutoAdvanceNowToPendingTasks(true);
+
+ std::vector<base::TimeTicks> run_times;
+ scoped_refptr<TestTaskQueue> main_tq = CreateTaskQueue();
+ base::WeakPtr<TestTaskQueue> main_tq_weak_ptr = main_tq->GetWeakPtr();
+
+ EXPECT_EQ(1u, manager_->ActiveQueuesCount());
+ EXPECT_EQ(0u, manager_->QueuesToShutdownCount());
+ EXPECT_EQ(0u, manager_->QueuesToDeleteCount());
+
+ for (int i = 1; i <= 5; ++i) {
+ main_tq->PostDelayedTask(
+ FROM_HERE, base::BindOnce(&RecordTimeTask, &run_times, &now_src_),
+ base::TimeDelta::FromMilliseconds(i * 100));
+ }
+ test_task_runner_->RunForPeriod(base::TimeDelta::FromMilliseconds(250));
+
+ main_tq = nullptr;
+ // Ensure that task queue went away.
+ EXPECT_FALSE(main_tq_weak_ptr.get());
+
+ test_task_runner_->RunForPeriod(base::TimeDelta::FromMilliseconds(1));
+
+ EXPECT_EQ(1u, manager_->ActiveQueuesCount());
+ EXPECT_EQ(1u, manager_->QueuesToShutdownCount());
+ EXPECT_EQ(0u, manager_->QueuesToDeleteCount());
+
+ // Ensure that all queues-to-gracefully-shutdown are properly unregistered.
+ manager_.reset();
+
+ test_task_runner_->RunUntilIdle();
+
+ EXPECT_THAT(
+ run_times,
+ ElementsAre(base::TimeTicks() + base::TimeDelta::FromMilliseconds(101),
+ base::TimeTicks() + base::TimeDelta::FromMilliseconds(201)));
+}
+
+TEST_F(TaskQueueManagerTest, DefaultTaskRunnerSupport) {
+ base::MessageLoop message_loop;
+ scoped_refptr<base::SingleThreadTaskRunner> original_task_runner =
+ message_loop.task_runner();
+ scoped_refptr<base::SingleThreadTaskRunner> custom_task_runner =
+ base::MakeRefCounted<base::TestSimpleTaskRunner>();
+ {
+ std::unique_ptr<TaskQueueManagerForTest> manager =
+ TaskQueueManagerForTest::Create(&message_loop,
+ message_loop.task_runner(), nullptr);
+ manager->SetDefaultTaskRunner(custom_task_runner);
+ DCHECK_EQ(custom_task_runner, message_loop.task_runner());
+ }
+ DCHECK_EQ(original_task_runner, message_loop.task_runner());
+}
+
+TEST_F(TaskQueueManagerTest, CanceledTasksInQueueCantMakeOtherTasksSkipAhead) {
+ Initialize(2u);
+
+ CancelableTask task1(&now_src_);
+ CancelableTask task2(&now_src_);
+ std::vector<base::TimeTicks> run_times;
+
+ runners_[0]->PostTask(
+ FROM_HERE, base::BindOnce(&CancelableTask::RecordTimeTask,
+ task1.weak_factory_.GetWeakPtr(), &run_times));
+ runners_[0]->PostTask(
+ FROM_HERE, base::BindOnce(&CancelableTask::RecordTimeTask,
+ task2.weak_factory_.GetWeakPtr(), &run_times));
+
+ std::vector<EnqueueOrder> run_order;
+ runners_[1]->PostTask(FROM_HERE, base::BindOnce(&TestTask, 1, &run_order));
+
+ runners_[0]->PostTask(FROM_HERE, base::BindOnce(&TestTask, 2, &run_order));
+
+ task1.weak_factory_.InvalidateWeakPtrs();
+ task2.weak_factory_.InvalidateWeakPtrs();
+ test_task_runner_->RunUntilIdle();
+
+ EXPECT_THAT(run_order, ElementsAre(1, 2));
+}
+
+TEST_F(TaskQueueManagerTest, TaskQueueDeletedOnAnotherThread) {
+ Initialize(0u);
+ test_task_runner_->SetAutoAdvanceNowToPendingTasks(true);
+
+ std::vector<base::TimeTicks> run_times;
+ scoped_refptr<TestTaskQueue> main_tq = CreateTaskQueue();
+
+ int start_counter = 0;
+ int complete_counter = 0;
+ SetOnTaskHandlers(main_tq, &start_counter, &complete_counter);
+
+ EXPECT_EQ(1u, manager_->ActiveQueuesCount());
+ EXPECT_EQ(0u, manager_->QueuesToShutdownCount());
+ EXPECT_EQ(0u, manager_->QueuesToDeleteCount());
+
+ for (int i = 1; i <= 5; ++i) {
+ main_tq->PostDelayedTask(
+ FROM_HERE, base::BindOnce(&RecordTimeTask, &run_times, &now_src_),
+ base::TimeDelta::FromMilliseconds(i * 100));
+ }
+
+ // TODO(altimin): do not do this after switching to weak pointer-based
+ // task handlers.
+ UnsetOnTaskHandlers(main_tq);
+
+ base::WaitableEvent task_queue_deleted(
+ base::WaitableEvent::ResetPolicy::MANUAL,
+ base::WaitableEvent::InitialState::NOT_SIGNALED);
+ std::unique_ptr<base::Thread> thread =
+ std::make_unique<base::Thread>("test thread");
+ thread->StartAndWaitForTesting();
+
+ thread->task_runner()->PostTask(
+ FROM_HERE, base::BindOnce(
+ [](scoped_refptr<base::SingleThreadTaskRunner> task_queue,
+ base::WaitableEvent* task_queue_deleted) {
+ task_queue = nullptr;
+ task_queue_deleted->Signal();
+ },
+ std::move(main_tq), &task_queue_deleted));
+ task_queue_deleted.Wait();
+
+ EXPECT_EQ(1u, manager_->ActiveQueuesCount());
+ EXPECT_EQ(1u, manager_->QueuesToShutdownCount());
+ EXPECT_EQ(0u, manager_->QueuesToDeleteCount());
+
+ test_task_runner_->RunUntilIdle();
+
+ // Even with TaskQueue gone, tasks are executed.
+ EXPECT_THAT(
+ run_times,
+ ElementsAre(base::TimeTicks() + base::TimeDelta::FromMilliseconds(101),
+ base::TimeTicks() + base::TimeDelta::FromMilliseconds(201),
+ base::TimeTicks() + base::TimeDelta::FromMilliseconds(301),
+ base::TimeTicks() + base::TimeDelta::FromMilliseconds(401),
+ base::TimeTicks() + base::TimeDelta::FromMilliseconds(501)));
+
+ EXPECT_EQ(0u, manager_->ActiveQueuesCount());
+ EXPECT_EQ(0u, manager_->QueuesToShutdownCount());
+ EXPECT_EQ(0u, manager_->QueuesToDeleteCount());
+
+ thread->Stop();
+}
+
+namespace {
+
+void DoNothing() {}
+
+class PostTaskInDestructor {
+ public:
+ explicit PostTaskInDestructor(scoped_refptr<TaskQueue> task_queue)
+ : task_queue_(task_queue) {}
+
+ ~PostTaskInDestructor() {
+ task_queue_->PostTask(FROM_HERE, base::BindOnce(&DoNothing));
+ }
+
+ void Do() {}
+
+ private:
+ scoped_refptr<TaskQueue> task_queue_;
+};
+
+} // namespace
+
+TEST_F(TaskQueueManagerTest, TaskQueueUsedInTaskDestructorAfterShutdown) {
+ // This test checks that when a task is posted to a shutdown queue and
+ // destroyed, it can try to post a task to the same queue without deadlocks.
+ Initialize(0u);
+ test_task_runner_->SetAutoAdvanceNowToPendingTasks(true);
+
+ scoped_refptr<TestTaskQueue> main_tq = CreateTaskQueue();
+
+ base::WaitableEvent test_executed(
+ base::WaitableEvent::ResetPolicy::MANUAL,
+ base::WaitableEvent::InitialState::NOT_SIGNALED);
+ std::unique_ptr<base::Thread> thread =
+ std::make_unique<base::Thread>("test thread");
+ thread->StartAndWaitForTesting();
+
+ manager_.reset();
+
+ thread->task_runner()->PostTask(
+ FROM_HERE, base::BindOnce(
+ [](scoped_refptr<base::SingleThreadTaskRunner> task_queue,
+ std::unique_ptr<PostTaskInDestructor> test_object,
+ base::WaitableEvent* test_executed) {
+ task_queue->PostTask(
+ FROM_HERE, base::BindOnce(&PostTaskInDestructor::Do,
+ std::move(test_object)));
+ test_executed->Signal();
+ },
+ main_tq, std::make_unique<PostTaskInDestructor>(main_tq),
+ &test_executed));
+ test_executed.Wait();
+}
+
+} // namespace scheduler
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/platform/scheduler/base/task_queue_manager_perftest.cc b/chromium/third_party/blink/renderer/platform/scheduler/base/task_queue_manager_perftest.cc
new file mode 100644
index 00000000000..acaf725976b
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/scheduler/base/task_queue_manager_perftest.cc
@@ -0,0 +1,230 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "third_party/blink/renderer/platform/scheduler/base/task_queue_manager.h"
+
+#include <stddef.h>
+#include <memory>
+
+#include "base/bind.h"
+#include "base/message_loop/message_loop.h"
+#include "base/run_loop.h"
+#include "base/single_thread_task_runner.h"
+#include "base/strings/stringprintf.h"
+#include "base/threading/thread.h"
+#include "base/threading/thread_task_runner_handle.h"
+#include "base/time/default_tick_clock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "testing/perf/perf_test.h"
+#include "third_party/blink/renderer/platform/scheduler/base/task_queue_impl.h"
+#include "third_party/blink/renderer/platform/scheduler/base/task_queue_selector.h"
+#include "third_party/blink/renderer/platform/scheduler/base/test_task_time_observer.h"
+#include "third_party/blink/renderer/platform/scheduler/base/virtual_time_domain.h"
+#include "third_party/blink/renderer/platform/scheduler/base/work_queue_sets.h"
+#include "third_party/blink/renderer/platform/scheduler/test/task_queue_manager_for_test.h"
+#include "third_party/blink/renderer/platform/scheduler/test/test_task_queue.h"
+
+namespace blink {
+namespace scheduler {
+
+// To reduce noise related to the OS timer, we use a virtual time domain to
+// fast forward the timers.
+class PerfTestTimeDomain : public VirtualTimeDomain {
+ public:
+ PerfTestTimeDomain() : VirtualTimeDomain(base::TimeTicks::Now()) {}
+ ~PerfTestTimeDomain() override = default;
+
+ base::Optional<base::TimeDelta> DelayTillNextTask(
+ LazyNow* lazy_now) override {
+ base::TimeTicks run_time;
+ if (!NextScheduledRunTime(&run_time))
+ return base::Optional<base::TimeDelta>();
+
+ AdvanceNowTo(run_time);
+ return base::TimeDelta(); // Makes DoWork post an immediate continuation.
+ }
+
+ void RequestWakeUpAt(base::TimeTicks now, base::TimeTicks run_time) override {
+ // De-dupe DoWorks.
+ if (NumberOfScheduledWakeUps() == 1u)
+ RequestDoWork();
+ }
+
+ void CancelWakeUpAt(base::TimeTicks run_time) override {
+ // We didn't post a delayed task in RequestWakeUpAt so there's no need to do
+ // anything here.
+ }
+
+ const char* GetName() const override { return "PerfTestTimeDomain"; }
+
+ DISALLOW_COPY_AND_ASSIGN(PerfTestTimeDomain);
+};
+
+class TaskQueueManagerPerfTest : public testing::Test {
+ public:
+ TaskQueueManagerPerfTest()
+ : num_queues_(0),
+ max_tasks_in_flight_(0),
+ num_tasks_in_flight_(0),
+ num_tasks_to_post_(0),
+ num_tasks_to_run_(0) {}
+
+ void SetUp() override {
+ if (base::ThreadTicks::IsSupported())
+ base::ThreadTicks::WaitUntilInitialized();
+ }
+
+ void TearDown() override {
+ queues_.clear();
+ manager_->UnregisterTimeDomain(virtual_time_domain_.get());
+ manager_.reset();
+ }
+
+ void Initialize(size_t num_queues) {
+ num_queues_ = num_queues;
+ message_loop_.reset(new base::MessageLoop());
+ manager_ = TaskQueueManagerForTest::Create(
+ message_loop_.get(), message_loop_->task_runner(),
+ base::DefaultTickClock::GetInstance());
+ manager_->AddTaskTimeObserver(&test_task_time_observer_);
+
+ virtual_time_domain_.reset(new PerfTestTimeDomain());
+ manager_->RegisterTimeDomain(virtual_time_domain_.get());
+
+ for (size_t i = 0; i < num_queues; i++) {
+ queues_.push_back(manager_->CreateTaskQueue<TestTaskQueue>(
+ TaskQueue::Spec("test").SetTimeDomain(virtual_time_domain_.get())));
+ }
+ }
+
+ void TestDelayedTask() {
+ if (--num_tasks_to_run_ == 0) {
+ run_loop_->QuitWhenIdle();
+ return;
+ }
+
+ num_tasks_in_flight_--;
+ // NOTE there are only up to max_tasks_in_flight_ pending delayed tasks at
+ // any one time. Thanks to the lower_num_tasks_to_post going to zero if
+ // there are a lot of tasks in flight, the total number of task in flight at
+ // any one time is very variable.
+ unsigned int lower_num_tasks_to_post =
+ num_tasks_in_flight_ < (max_tasks_in_flight_ / 2) ? 1 : 0;
+ unsigned int max_tasks_to_post =
+ num_tasks_to_post_ % 2 ? lower_num_tasks_to_post : 10;
+ for (unsigned int i = 0;
+ i < max_tasks_to_post && num_tasks_in_flight_ < max_tasks_in_flight_ &&
+ num_tasks_to_post_ > 0;
+ i++) {
+ // Choose a queue weighted towards queue 0.
+ unsigned int queue = num_tasks_to_post_ % (num_queues_ + 1);
+ if (queue == num_queues_) {
+ queue = 0;
+ }
+ // Simulate a mix of short and longer delays.
+ unsigned int delay =
+ num_tasks_to_post_ % 2 ? 1 : (10 + num_tasks_to_post_ % 10);
+ queues_[queue]->PostDelayedTask(
+ FROM_HERE,
+ base::BindOnce(&TaskQueueManagerPerfTest::TestDelayedTask,
+ base::Unretained(this)),
+ base::TimeDelta::FromMilliseconds(delay));
+ num_tasks_in_flight_++;
+ num_tasks_to_post_--;
+ }
+ }
+
+ void ResetAndCallTestDelayedTask(unsigned int num_tasks_to_run) {
+ num_tasks_in_flight_ = 1;
+ num_tasks_to_post_ = num_tasks_to_run;
+ num_tasks_to_run_ = num_tasks_to_run;
+ TestDelayedTask();
+ }
+
+ void Benchmark(const std::string& trace,
+ const base::RepeatingClosure& test_task) {
+ base::ThreadTicks start = base::ThreadTicks::Now();
+ base::ThreadTicks now;
+ unsigned long long num_iterations = 0;
+ do {
+ test_task.Run();
+ run_loop_.reset(new base::RunLoop());
+ run_loop_->Run();
+ now = base::ThreadTicks::Now();
+ num_iterations++;
+ } while (now - start < base::TimeDelta::FromSeconds(5));
+ perf_test::PrintResult(
+ "task", "", trace,
+ (now - start).InMicroseconds() / static_cast<double>(num_iterations),
+ "us/run", true);
+ }
+
+ size_t num_queues_;
+ unsigned int max_tasks_in_flight_;
+ unsigned int num_tasks_in_flight_;
+ unsigned int num_tasks_to_post_;
+ unsigned int num_tasks_to_run_;
+ std::unique_ptr<base::MessageLoop> message_loop_;
+ std::unique_ptr<TaskQueueManager> manager_;
+ std::unique_ptr<base::RunLoop> run_loop_;
+ std::unique_ptr<VirtualTimeDomain> virtual_time_domain_;
+ std::vector<scoped_refptr<base::SingleThreadTaskRunner>> queues_;
+ // TODO(alexclarke): parameterize so we can measure with and without a
+ // TaskTimeObserver.
+ TestTaskTimeObserver test_task_time_observer_;
+};
+
+TEST_F(TaskQueueManagerPerfTest, RunTenThousandDelayedTasks_OneQueue) {
+ if (!base::ThreadTicks::IsSupported())
+ return;
+ Initialize(1u);
+
+ max_tasks_in_flight_ = 200;
+ Benchmark("run 10000 delayed tasks with one queue",
+ base::BindRepeating(
+ &TaskQueueManagerPerfTest::ResetAndCallTestDelayedTask,
+ base::Unretained(this), 10000));
+}
+
+TEST_F(TaskQueueManagerPerfTest, RunTenThousandDelayedTasks_FourQueues) {
+ if (!base::ThreadTicks::IsSupported())
+ return;
+ Initialize(4u);
+
+ max_tasks_in_flight_ = 200;
+ Benchmark("run 10000 delayed tasks with four queues",
+ base::BindRepeating(
+ &TaskQueueManagerPerfTest::ResetAndCallTestDelayedTask,
+ base::Unretained(this), 10000));
+}
+
+TEST_F(TaskQueueManagerPerfTest, RunTenThousandDelayedTasks_EightQueues) {
+ if (!base::ThreadTicks::IsSupported())
+ return;
+ Initialize(8u);
+
+ max_tasks_in_flight_ = 200;
+ Benchmark("run 10000 delayed tasks with eight queues",
+ base::BindRepeating(
+ &TaskQueueManagerPerfTest::ResetAndCallTestDelayedTask,
+ base::Unretained(this), 10000));
+}
+
+TEST_F(TaskQueueManagerPerfTest, RunTenThousandDelayedTasks_ThirtyTwoQueues) {
+ if (!base::ThreadTicks::IsSupported())
+ return;
+ Initialize(32u);
+
+ max_tasks_in_flight_ = 200;
+ Benchmark("run 10000 delayed tasks with eight queues",
+ base::BindRepeating(
+ &TaskQueueManagerPerfTest::ResetAndCallTestDelayedTask,
+ base::Unretained(this), 10000));
+}
+
+// TODO(alexclarke): Add additional tests with different mixes of non-delayed vs
+// delayed tasks.
+
+} // namespace scheduler
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/platform/scheduler/base/task_queue_selector.cc b/chromium/third_party/blink/renderer/platform/scheduler/base/task_queue_selector.cc
new file mode 100644
index 00000000000..46883b04c24
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/scheduler/base/task_queue_selector.cc
@@ -0,0 +1,388 @@
+// Copyright 2014 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "third_party/blink/renderer/platform/scheduler/base/task_queue_selector.h"
+
+#include "base/logging.h"
+#include "base/metrics/histogram_macros.h"
+#include "base/trace_event/trace_event_argument.h"
+#include "third_party/blink/renderer/platform/scheduler/base/task_queue_impl.h"
+#include "third_party/blink/renderer/platform/scheduler/base/work_queue.h"
+
+namespace blink {
+namespace scheduler {
+namespace internal {
+
+namespace {
+
+TaskQueueSelectorLogic QueuePriorityToSelectorLogic(
+ TaskQueue::QueuePriority priority) {
+ switch (priority) {
+ case TaskQueue::kControlPriority:
+ return TaskQueueSelectorLogic::kControlPriorityLogic;
+ case TaskQueue::kHighestPriority:
+ return TaskQueueSelectorLogic::kHighestPriorityLogic;
+ case TaskQueue::kHighPriority:
+ return TaskQueueSelectorLogic::kHighPriorityLogic;
+ case TaskQueue::kNormalPriority:
+ return TaskQueueSelectorLogic::kNormalPriorityLogic;
+ case TaskQueue::kLowPriority:
+ return TaskQueueSelectorLogic::kLowPriorityLogic;
+ case TaskQueue::kBestEffortPriority:
+ return TaskQueueSelectorLogic::kBestEffortPriorityLogic;
+ default:
+ NOTREACHED();
+ return TaskQueueSelectorLogic::kCount;
+ }
+}
+
+// Helper function used to report the number of times a selector logic is
+// trigerred. This will create a histogram for the enumerated data.
+void ReportTaskSelectionLogic(TaskQueueSelectorLogic selector_logic) {
+ UMA_HISTOGRAM_ENUMERATION("TaskQueueSelector.TaskServicedPerSelectorLogic",
+ selector_logic, TaskQueueSelectorLogic::kCount);
+}
+
+} // namespace
+
+TaskQueueSelector::TaskQueueSelector()
+ : prioritizing_selector_(this, "enabled"),
+ immediate_starvation_count_(0),
+ high_priority_starvation_score_(0),
+ normal_priority_starvation_score_(0),
+ low_priority_starvation_score_(0),
+ task_queue_selector_observer_(nullptr) {}
+
+TaskQueueSelector::~TaskQueueSelector() = default;
+
+void TaskQueueSelector::AddQueue(internal::TaskQueueImpl* queue) {
+ DCHECK(main_thread_checker_.CalledOnValidThread());
+ DCHECK(queue->IsQueueEnabled());
+ prioritizing_selector_.AddQueue(queue, TaskQueue::kNormalPriority);
+}
+
+void TaskQueueSelector::RemoveQueue(internal::TaskQueueImpl* queue) {
+ DCHECK(main_thread_checker_.CalledOnValidThread());
+ if (queue->IsQueueEnabled()) {
+ prioritizing_selector_.RemoveQueue(queue);
+ }
+}
+
+void TaskQueueSelector::EnableQueue(internal::TaskQueueImpl* queue) {
+ DCHECK(main_thread_checker_.CalledOnValidThread());
+ DCHECK(queue->IsQueueEnabled());
+ prioritizing_selector_.AddQueue(queue, queue->GetQueuePriority());
+ if (task_queue_selector_observer_)
+ task_queue_selector_observer_->OnTaskQueueEnabled(queue);
+}
+
+void TaskQueueSelector::DisableQueue(internal::TaskQueueImpl* queue) {
+ DCHECK(main_thread_checker_.CalledOnValidThread());
+ DCHECK(!queue->IsQueueEnabled());
+ prioritizing_selector_.RemoveQueue(queue);
+}
+
+void TaskQueueSelector::SetQueuePriority(internal::TaskQueueImpl* queue,
+ TaskQueue::QueuePriority priority) {
+ DCHECK_LT(priority, TaskQueue::kQueuePriorityCount);
+ DCHECK(main_thread_checker_.CalledOnValidThread());
+ if (queue->IsQueueEnabled()) {
+ prioritizing_selector_.ChangeSetIndex(queue, priority);
+ } else {
+ // Disabled queue is not in any set so we can't use ChangeSetIndex here
+ // and have to assign priority for the queue itself.
+ queue->delayed_work_queue()->AssignSetIndex(priority);
+ queue->immediate_work_queue()->AssignSetIndex(priority);
+ }
+ DCHECK_EQ(priority, queue->GetQueuePriority());
+}
+
+TaskQueue::QueuePriority TaskQueueSelector::NextPriority(
+ TaskQueue::QueuePriority priority) {
+ DCHECK(priority < TaskQueue::kQueuePriorityCount);
+ return static_cast<TaskQueue::QueuePriority>(static_cast<int>(priority) + 1);
+}
+
+TaskQueueSelector::PrioritizingSelector::PrioritizingSelector(
+ TaskQueueSelector* task_queue_selector,
+ const char* name)
+ : task_queue_selector_(task_queue_selector),
+ delayed_work_queue_sets_(TaskQueue::kQueuePriorityCount, name),
+ immediate_work_queue_sets_(TaskQueue::kQueuePriorityCount, name) {}
+
+void TaskQueueSelector::PrioritizingSelector::AddQueue(
+ internal::TaskQueueImpl* queue,
+ TaskQueue::QueuePriority priority) {
+#if DCHECK_IS_ON()
+ DCHECK(!CheckContainsQueueForTest(queue));
+#endif
+ delayed_work_queue_sets_.AddQueue(queue->delayed_work_queue(), priority);
+ immediate_work_queue_sets_.AddQueue(queue->immediate_work_queue(), priority);
+#if DCHECK_IS_ON()
+ DCHECK(CheckContainsQueueForTest(queue));
+#endif
+}
+
+void TaskQueueSelector::PrioritizingSelector::ChangeSetIndex(
+ internal::TaskQueueImpl* queue,
+ TaskQueue::QueuePriority priority) {
+#if DCHECK_IS_ON()
+ DCHECK(CheckContainsQueueForTest(queue));
+#endif
+ delayed_work_queue_sets_.ChangeSetIndex(queue->delayed_work_queue(),
+ priority);
+ immediate_work_queue_sets_.ChangeSetIndex(queue->immediate_work_queue(),
+ priority);
+#if DCHECK_IS_ON()
+ DCHECK(CheckContainsQueueForTest(queue));
+#endif
+}
+
+void TaskQueueSelector::PrioritizingSelector::RemoveQueue(
+ internal::TaskQueueImpl* queue) {
+#if DCHECK_IS_ON()
+ DCHECK(CheckContainsQueueForTest(queue));
+#endif
+ delayed_work_queue_sets_.RemoveQueue(queue->delayed_work_queue());
+ immediate_work_queue_sets_.RemoveQueue(queue->immediate_work_queue());
+
+#if DCHECK_IS_ON()
+ DCHECK(!CheckContainsQueueForTest(queue));
+#endif
+}
+
+bool TaskQueueSelector::PrioritizingSelector::
+ ChooseOldestImmediateTaskWithPriority(TaskQueue::QueuePriority priority,
+ WorkQueue** out_work_queue) const {
+ return immediate_work_queue_sets_.GetOldestQueueInSet(priority,
+ out_work_queue);
+}
+
+bool TaskQueueSelector::PrioritizingSelector::
+ ChooseOldestDelayedTaskWithPriority(TaskQueue::QueuePriority priority,
+ WorkQueue** out_work_queue) const {
+ return delayed_work_queue_sets_.GetOldestQueueInSet(priority, out_work_queue);
+}
+
+bool TaskQueueSelector::PrioritizingSelector::
+ ChooseOldestImmediateOrDelayedTaskWithPriority(
+ TaskQueue::QueuePriority priority,
+ bool* out_chose_delayed_over_immediate,
+ WorkQueue** out_work_queue) const {
+ WorkQueue* immediate_queue;
+ DCHECK_EQ(*out_chose_delayed_over_immediate, false);
+ EnqueueOrder immediate_enqueue_order;
+ if (immediate_work_queue_sets_.GetOldestQueueAndEnqueueOrderInSet(
+ priority, &immediate_queue, &immediate_enqueue_order)) {
+ WorkQueue* delayed_queue;
+ EnqueueOrder delayed_enqueue_order;
+ if (delayed_work_queue_sets_.GetOldestQueueAndEnqueueOrderInSet(
+ priority, &delayed_queue, &delayed_enqueue_order)) {
+ if (immediate_enqueue_order < delayed_enqueue_order) {
+ *out_work_queue = immediate_queue;
+ } else {
+ *out_chose_delayed_over_immediate = true;
+ *out_work_queue = delayed_queue;
+ }
+ } else {
+ *out_work_queue = immediate_queue;
+ }
+ return true;
+ }
+ return delayed_work_queue_sets_.GetOldestQueueInSet(priority, out_work_queue);
+}
+
+bool TaskQueueSelector::PrioritizingSelector::ChooseOldestWithPriority(
+ TaskQueue::QueuePriority priority,
+ bool* out_chose_delayed_over_immediate,
+ WorkQueue** out_work_queue) const {
+ // Select an immediate work queue if we are starving immediate tasks.
+ if (task_queue_selector_->immediate_starvation_count_ >=
+ kMaxDelayedStarvationTasks) {
+ if (ChooseOldestImmediateTaskWithPriority(priority, out_work_queue))
+ return true;
+ return ChooseOldestDelayedTaskWithPriority(priority, out_work_queue);
+ }
+ return ChooseOldestImmediateOrDelayedTaskWithPriority(
+ priority, out_chose_delayed_over_immediate, out_work_queue);
+}
+
+bool TaskQueueSelector::PrioritizingSelector::SelectWorkQueueToService(
+ TaskQueue::QueuePriority max_priority,
+ WorkQueue** out_work_queue,
+ bool* out_chose_delayed_over_immediate) {
+ DCHECK(task_queue_selector_->main_thread_checker_.CalledOnValidThread());
+ DCHECK_EQ(*out_chose_delayed_over_immediate, false);
+
+ // Always service the control queue if it has any work.
+ if (max_priority > TaskQueue::kControlPriority &&
+ ChooseOldestWithPriority(TaskQueue::kControlPriority,
+ out_chose_delayed_over_immediate,
+ out_work_queue)) {
+ ReportTaskSelectionLogic(TaskQueueSelectorLogic::kControlPriorityLogic);
+ return true;
+ }
+
+ // Select from the low priority queue if we are starving it.
+ if (max_priority > TaskQueue::kLowPriority &&
+ task_queue_selector_->low_priority_starvation_score_ >=
+ kMaxLowPriorityStarvationScore &&
+ ChooseOldestWithPriority(TaskQueue::kLowPriority,
+ out_chose_delayed_over_immediate,
+ out_work_queue)) {
+ ReportTaskSelectionLogic(
+ TaskQueueSelectorLogic::kLowPriorityStarvationLogic);
+ return true;
+ }
+
+ // Select from the normal priority queue if we are starving it.
+ if (max_priority > TaskQueue::kNormalPriority &&
+ task_queue_selector_->normal_priority_starvation_score_ >=
+ kMaxNormalPriorityStarvationScore &&
+ ChooseOldestWithPriority(TaskQueue::kNormalPriority,
+ out_chose_delayed_over_immediate,
+ out_work_queue)) {
+ ReportTaskSelectionLogic(
+ TaskQueueSelectorLogic::kNormalPriorityStarvationLogic);
+ return true;
+ }
+
+ // Select from the high priority queue if we are starving it.
+ if (max_priority > TaskQueue::kHighPriority &&
+ task_queue_selector_->high_priority_starvation_score_ >=
+ kMaxHighPriorityStarvationScore &&
+ ChooseOldestWithPriority(TaskQueue::kHighPriority,
+ out_chose_delayed_over_immediate,
+ out_work_queue)) {
+ ReportTaskSelectionLogic(
+ TaskQueueSelectorLogic::kHighPriorityStarvationLogic);
+ return true;
+ }
+
+ // Otherwise choose in priority order.
+ for (TaskQueue::QueuePriority priority = TaskQueue::kHighestPriority;
+ priority < max_priority; priority = NextPriority(priority)) {
+ if (ChooseOldestWithPriority(priority, out_chose_delayed_over_immediate,
+ out_work_queue)) {
+ ReportTaskSelectionLogic(QueuePriorityToSelectorLogic(priority));
+ return true;
+ }
+ }
+ return false;
+}
+
+#if DCHECK_IS_ON() || !defined(NDEBUG)
+bool TaskQueueSelector::PrioritizingSelector::CheckContainsQueueForTest(
+ const internal::TaskQueueImpl* queue) const {
+ bool contains_delayed_work_queue =
+ delayed_work_queue_sets_.ContainsWorkQueueForTest(
+ queue->delayed_work_queue());
+
+ bool contains_immediate_work_queue =
+ immediate_work_queue_sets_.ContainsWorkQueueForTest(
+ queue->immediate_work_queue());
+
+ DCHECK_EQ(contains_delayed_work_queue, contains_immediate_work_queue);
+ return contains_delayed_work_queue;
+}
+#endif
+
+bool TaskQueueSelector::SelectWorkQueueToService(WorkQueue** out_work_queue) {
+ DCHECK(main_thread_checker_.CalledOnValidThread());
+ bool chose_delayed_over_immediate = false;
+ bool found_queue = prioritizing_selector_.SelectWorkQueueToService(
+ TaskQueue::kQueuePriorityCount, out_work_queue,
+ &chose_delayed_over_immediate);
+ if (!found_queue)
+ return false;
+
+ // We could use |(*out_work_queue)->task_queue()->GetQueuePriority()| here but
+ // for re-queued non-nestable tasks |task_queue()| returns null.
+ DidSelectQueueWithPriority(static_cast<TaskQueue::QueuePriority>(
+ (*out_work_queue)->work_queue_set_index()),
+ chose_delayed_over_immediate);
+ return true;
+}
+
+void TaskQueueSelector::DidSelectQueueWithPriority(
+ TaskQueue::QueuePriority priority,
+ bool chose_delayed_over_immediate) {
+ switch (priority) {
+ case TaskQueue::kControlPriority:
+ break;
+ case TaskQueue::kHighestPriority:
+ low_priority_starvation_score_ +=
+ kSmallScoreIncrementForLowPriorityStarvation;
+ normal_priority_starvation_score_ +=
+ kSmallScoreIncrementForNormalPriorityStarvation;
+ high_priority_starvation_score_ +=
+ kSmallScoreIncrementForHighPriorityStarvation;
+ break;
+ case TaskQueue::kHighPriority:
+ low_priority_starvation_score_ +=
+ kLargeScoreIncrementForLowPriorityStarvation;
+ normal_priority_starvation_score_ +=
+ kLargeScoreIncrementForNormalPriorityStarvation;
+ high_priority_starvation_score_ = 0;
+ break;
+ case TaskQueue::kNormalPriority:
+ low_priority_starvation_score_ +=
+ kLargeScoreIncrementForLowPriorityStarvation;
+ normal_priority_starvation_score_ = 0;
+ break;
+ case TaskQueue::kLowPriority:
+ case TaskQueue::kBestEffortPriority:
+ low_priority_starvation_score_ = 0;
+ high_priority_starvation_score_ = 0;
+ normal_priority_starvation_score_ = 0;
+ break;
+ default:
+ NOTREACHED();
+ }
+ if (chose_delayed_over_immediate) {
+ immediate_starvation_count_++;
+ } else {
+ immediate_starvation_count_ = 0;
+ }
+}
+
+void TaskQueueSelector::AsValueInto(
+ base::trace_event::TracedValue* state) const {
+ DCHECK(main_thread_checker_.CalledOnValidThread());
+ state->SetInteger("high_priority_starvation_score",
+ high_priority_starvation_score_);
+ state->SetInteger("normal_priority_starvation_score",
+ normal_priority_starvation_score_);
+ state->SetInteger("low_priority_starvation_score",
+ low_priority_starvation_score_);
+ state->SetInteger("immediate_starvation_count", immediate_starvation_count_);
+}
+
+void TaskQueueSelector::SetTaskQueueSelectorObserver(Observer* observer) {
+ task_queue_selector_observer_ = observer;
+}
+
+bool TaskQueueSelector::AllEnabledWorkQueuesAreEmpty() const {
+ DCHECK(main_thread_checker_.CalledOnValidThread());
+ for (TaskQueue::QueuePriority priority = TaskQueue::kControlPriority;
+ priority < TaskQueue::kQueuePriorityCount;
+ priority = NextPriority(priority)) {
+ if (!prioritizing_selector_.delayed_work_queue_sets()->IsSetEmpty(
+ priority) ||
+ !prioritizing_selector_.immediate_work_queue_sets()->IsSetEmpty(
+ priority)) {
+ return false;
+ }
+ }
+ return true;
+}
+
+void TaskQueueSelector::SetImmediateStarvationCountForTest(
+ size_t immediate_starvation_count) {
+ immediate_starvation_count_ = immediate_starvation_count;
+}
+
+} // namespace internal
+} // namespace scheduler
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/platform/scheduler/base/task_queue_selector.h b/chromium/third_party/blink/renderer/platform/scheduler/base/task_queue_selector.h
new file mode 100644
index 00000000000..8f39be19b6c
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/scheduler/base/task_queue_selector.h
@@ -0,0 +1,225 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_PLATFORM_SCHEDULER_BASE_TASK_QUEUE_SELECTOR_H_
+#define THIRD_PARTY_BLINK_RENDERER_PLATFORM_SCHEDULER_BASE_TASK_QUEUE_SELECTOR_H_
+
+#include <stddef.h>
+
+#include <set>
+
+#include "base/compiler_specific.h"
+#include "base/macros.h"
+#include "base/pending_task.h"
+#include "base/threading/thread_checker.h"
+#include "third_party/blink/renderer/platform/scheduler/base/task_queue_selector_logic.h"
+#include "third_party/blink/renderer/platform/scheduler/base/work_queue_sets.h"
+
+namespace blink {
+namespace scheduler {
+namespace internal {
+
+// TaskQueueSelector is used by the SchedulerHelper to enable prioritization
+// of particular task queues.
+class PLATFORM_EXPORT TaskQueueSelector {
+ public:
+ TaskQueueSelector();
+ ~TaskQueueSelector();
+
+ // Called to register a queue that can be selected. This function is called
+ // on the main thread.
+ void AddQueue(internal::TaskQueueImpl* queue);
+
+ // The specified work will no longer be considered for selection. This
+ // function is called on the main thread.
+ void RemoveQueue(internal::TaskQueueImpl* queue);
+
+ // Make |queue| eligible for selection. This function is called on the main
+ // thread. Must only be called if |queue| is disabled.
+ void EnableQueue(internal::TaskQueueImpl* queue);
+
+ // Disable selection from |queue|. Must only be called if |queue| is enabled.
+ void DisableQueue(internal::TaskQueueImpl* queue);
+
+ // Called get or set the priority of |queue|.
+ void SetQueuePriority(internal::TaskQueueImpl* queue,
+ TaskQueue::QueuePriority priority);
+
+ // Called to choose the work queue from which the next task should be taken
+ // and run. Return true if |out_work_queue| indicates the queue to service or
+ // false to avoid running any task.
+ //
+ // This function is called on the main thread.
+ bool SelectWorkQueueToService(WorkQueue** out_work_queue);
+
+ // Serialize the selector state for tracing.
+ void AsValueInto(base::trace_event::TracedValue* state) const;
+
+ class PLATFORM_EXPORT Observer {
+ public:
+ virtual ~Observer() = default;
+
+ // Called when |queue| transitions from disabled to enabled.
+ virtual void OnTaskQueueEnabled(internal::TaskQueueImpl* queue) = 0;
+ };
+
+ // Called once to set the Observer. This function is called
+ // on the main thread. If |observer| is null, then no callbacks will occur.
+ void SetTaskQueueSelectorObserver(Observer* observer);
+
+ // Returns true if all the enabled work queues are empty. Returns false
+ // otherwise.
+ bool AllEnabledWorkQueuesAreEmpty() const;
+
+ protected:
+ class PLATFORM_EXPORT PrioritizingSelector {
+ public:
+ PrioritizingSelector(TaskQueueSelector* task_queue_selector,
+ const char* name);
+
+ void ChangeSetIndex(internal::TaskQueueImpl* queue,
+ TaskQueue::QueuePriority priority);
+ void AddQueue(internal::TaskQueueImpl* queue,
+ TaskQueue::QueuePriority priority);
+ void RemoveQueue(internal::TaskQueueImpl* queue);
+
+ bool SelectWorkQueueToService(TaskQueue::QueuePriority max_priority,
+ WorkQueue** out_work_queue,
+ bool* out_chose_delayed_over_immediate);
+
+ WorkQueueSets* delayed_work_queue_sets() {
+ return &delayed_work_queue_sets_;
+ }
+ WorkQueueSets* immediate_work_queue_sets() {
+ return &immediate_work_queue_sets_;
+ }
+
+ const WorkQueueSets* delayed_work_queue_sets() const {
+ return &delayed_work_queue_sets_;
+ }
+ const WorkQueueSets* immediate_work_queue_sets() const {
+ return &immediate_work_queue_sets_;
+ }
+
+ bool ChooseOldestWithPriority(TaskQueue::QueuePriority priority,
+ bool* out_chose_delayed_over_immediate,
+ WorkQueue** out_work_queue) const;
+
+#if DCHECK_IS_ON() || !defined(NDEBUG)
+ bool CheckContainsQueueForTest(const internal::TaskQueueImpl* queue) const;
+#endif
+
+ private:
+ bool ChooseOldestImmediateTaskWithPriority(
+ TaskQueue::QueuePriority priority,
+ WorkQueue** out_work_queue) const;
+
+ bool ChooseOldestDelayedTaskWithPriority(TaskQueue::QueuePriority priority,
+ WorkQueue** out_work_queue) const;
+
+ // Return true if |out_queue| contains the queue with the oldest pending
+ // task from the set of queues of |priority|, or false if all queues of that
+ // priority are empty. In addition |out_chose_delayed_over_immediate| is set
+ // to true iff we chose a delayed work queue in favour of an immediate work
+ // queue.
+ bool ChooseOldestImmediateOrDelayedTaskWithPriority(
+ TaskQueue::QueuePriority priority,
+ bool* out_chose_delayed_over_immediate,
+ WorkQueue** out_work_queue) const;
+
+ const TaskQueueSelector* task_queue_selector_;
+ WorkQueueSets delayed_work_queue_sets_;
+ WorkQueueSets immediate_work_queue_sets_;
+
+ DISALLOW_COPY_AND_ASSIGN(PrioritizingSelector);
+ };
+
+ // Return true if |out_queue| contains the queue with the oldest pending task
+ // from the set of queues of |priority|, or false if all queues of that
+ // priority are empty. In addition |out_chose_delayed_over_immediate| is set
+ // to true iff we chose a delayed work queue in favour of an immediate work
+ // queue. This method will force select an immediate task if those are being
+ // starved by delayed tasks.
+ void SetImmediateStarvationCountForTest(size_t immediate_starvation_count);
+
+ PrioritizingSelector* prioritizing_selector_for_test() {
+ return &prioritizing_selector_;
+ }
+
+ private:
+ // Returns the priority which is next after |priority|.
+ static TaskQueue::QueuePriority NextPriority(
+ TaskQueue::QueuePriority priority);
+
+ bool SelectWorkQueueToServiceInternal(WorkQueue** out_work_queue);
+
+ // Called whenever the selector chooses a task queue for execution with the
+ // priority |priority|.
+ void DidSelectQueueWithPriority(TaskQueue::QueuePriority priority,
+ bool chose_delayed_over_immediate);
+
+ // Maximum score to accumulate before high priority tasks are run even in
+ // the presence of highest priority tasks.
+ static const size_t kMaxHighPriorityStarvationScore = 3;
+
+ // Increment to be applied to the high priority starvation score when a task
+ // should have only a small effect on the score. E.g. A number of highest
+ // priority tasks must run before the high priority queue is considered
+ // starved.
+ static const size_t kSmallScoreIncrementForHighPriorityStarvation = 1;
+
+ // Maximum score to accumulate before normal priority tasks are run even in
+ // the presence of higher priority tasks i.e. highest and high priority tasks.
+ static const size_t kMaxNormalPriorityStarvationScore = 5;
+
+ // Increment to be applied to the normal priority starvation score when a task
+ // should have a large effect on the score. E.g Only a few high priority
+ // priority tasks must run before the normal priority queue is considered
+ // starved.
+ static const size_t kLargeScoreIncrementForNormalPriorityStarvation = 2;
+
+ // Increment to be applied to the normal priority starvation score when a task
+ // should have only a small effect on the score. E.g. A number of highest
+ // priority tasks must run before the normal priority queue is considered
+ // starved.
+ static const size_t kSmallScoreIncrementForNormalPriorityStarvation = 1;
+
+ // Maximum score to accumulate before low priority tasks are run even in the
+ // presence of highest, high, or normal priority tasks.
+ static const size_t kMaxLowPriorityStarvationScore = 25;
+
+ // Increment to be applied to the low priority starvation score when a task
+ // should have a large effect on the score. E.g. Only a few normal/high
+ // priority tasks must run before the low priority queue is considered
+ // starved.
+ static const size_t kLargeScoreIncrementForLowPriorityStarvation = 5;
+
+ // Increment to be applied to the low priority starvation score when a task
+ // should have only a small effect on the score. E.g. A lot of highest
+ // priority tasks must run before the low priority queue is considered
+ // starved.
+ static const size_t kSmallScoreIncrementForLowPriorityStarvation = 1;
+
+ // Maximum number of delayed tasks tasks which can be run while there's a
+ // waiting non-delayed task.
+ static const size_t kMaxDelayedStarvationTasks = 3;
+
+ private:
+ base::ThreadChecker main_thread_checker_;
+
+ PrioritizingSelector prioritizing_selector_;
+ size_t immediate_starvation_count_;
+ size_t high_priority_starvation_score_;
+ size_t normal_priority_starvation_score_;
+ size_t low_priority_starvation_score_;
+
+ Observer* task_queue_selector_observer_; // NOT OWNED
+ DISALLOW_COPY_AND_ASSIGN(TaskQueueSelector);
+};
+
+} // namespace internal
+} // namespace scheduler
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_PLATFORM_SCHEDULER_BASE_TASK_QUEUE_SELECTOR_H_
diff --git a/chromium/third_party/blink/renderer/platform/scheduler/base/task_queue_selector_logic.h b/chromium/third_party/blink/renderer/platform/scheduler/base/task_queue_selector_logic.h
new file mode 100644
index 00000000000..e29f25788f3
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/scheduler/base/task_queue_selector_logic.h
@@ -0,0 +1,35 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_PLATFORM_SCHEDULER_BASE_TASK_QUEUE_SELECTOR_LOGIC_H_
+#define THIRD_PARTY_BLINK_RENDERER_PLATFORM_SCHEDULER_BASE_TASK_QUEUE_SELECTOR_LOGIC_H_
+
+namespace blink {
+namespace scheduler {
+
+// Used to describe the logic trigerred when a task queue is selected to
+// service.
+// This enum is used for histograms and should not be renumbered.
+enum class TaskQueueSelectorLogic {
+
+ // Selected due to priority rules.
+ kControlPriorityLogic = 0,
+ kHighestPriorityLogic = 1,
+ kHighPriorityLogic = 2,
+ kNormalPriorityLogic = 3,
+ kLowPriorityLogic = 4,
+ kBestEffortPriorityLogic = 5,
+
+ // Selected due to starvation logic.
+ kHighPriorityStarvationLogic = 6,
+ kNormalPriorityStarvationLogic = 7,
+ kLowPriorityStarvationLogic = 8,
+
+ kCount = 9,
+};
+
+} // namespace scheduler
+} // namespace blink
+
+#endif
diff --git a/chromium/third_party/blink/renderer/platform/scheduler/base/task_queue_selector_unittest.cc b/chromium/third_party/blink/renderer/platform/scheduler/base/task_queue_selector_unittest.cc
new file mode 100644
index 00000000000..46a59f25aa5
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/scheduler/base/task_queue_selector_unittest.cc
@@ -0,0 +1,714 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "third_party/blink/renderer/platform/scheduler/base/task_queue_selector.h"
+
+#include <stddef.h>
+
+#include <memory>
+
+#include "base/bind.h"
+#include "base/macros.h"
+#include "base/memory/ptr_util.h"
+#include "base/pending_task.h"
+#include "base/test/histogram_tester.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "testing/gtest/include/gtest/gtest.h"
+#include "third_party/blink/renderer/platform/scheduler/base/task_queue_impl.h"
+#include "third_party/blink/renderer/platform/scheduler/base/virtual_time_domain.h"
+#include "third_party/blink/renderer/platform/scheduler/base/work_queue.h"
+#include "third_party/blink/renderer/platform/scheduler/base/work_queue_sets.h"
+
+using testing::_;
+
+namespace blink {
+namespace scheduler {
+namespace internal {
+// To avoid symbol collisions in jumbo builds.
+namespace task_queue_selector_unittest {
+
+class MockObserver : public TaskQueueSelector::Observer {
+ public:
+ MockObserver() = default;
+ virtual ~MockObserver() = default;
+
+ MOCK_METHOD1(OnTaskQueueEnabled, void(internal::TaskQueueImpl*));
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(MockObserver);
+};
+
+class TaskQueueSelectorForTest : public TaskQueueSelector {
+ public:
+ using TaskQueueSelector::prioritizing_selector_for_test;
+ using TaskQueueSelector::PrioritizingSelector;
+ using TaskQueueSelector::SetImmediateStarvationCountForTest;
+};
+
+class TaskQueueSelectorTest : public testing::Test {
+ public:
+ TaskQueueSelectorTest()
+ : test_closure_(
+ base::BindRepeating(&TaskQueueSelectorTest::TestFunction)) {}
+ ~TaskQueueSelectorTest() override = default;
+
+ TaskQueueSelectorForTest::PrioritizingSelector* prioritizing_selector() {
+ return selector_.prioritizing_selector_for_test();
+ }
+
+ WorkQueueSets* delayed_work_queue_sets() {
+ return prioritizing_selector()->delayed_work_queue_sets();
+ }
+ WorkQueueSets* immediate_work_queue_sets() {
+ return prioritizing_selector()->immediate_work_queue_sets();
+ }
+
+ void PushTasks(const size_t queue_indices[], size_t num_tasks) {
+ std::set<size_t> changed_queue_set;
+ for (size_t i = 0; i < num_tasks; i++) {
+ changed_queue_set.insert(queue_indices[i]);
+ task_queues_[queue_indices[i]]->immediate_work_queue()->Push(
+ TaskQueueImpl::Task(TaskQueue::PostedTask(test_closure_, FROM_HERE),
+ base::TimeTicks(), 0, i));
+ }
+ }
+
+ void PushTasksWithEnqueueOrder(const size_t queue_indices[],
+ const size_t enqueue_orders[],
+ size_t num_tasks) {
+ std::set<size_t> changed_queue_set;
+ for (size_t i = 0; i < num_tasks; i++) {
+ changed_queue_set.insert(queue_indices[i]);
+ task_queues_[queue_indices[i]]->immediate_work_queue()->Push(
+ TaskQueueImpl::Task(TaskQueue::PostedTask(test_closure_, FROM_HERE),
+ base::TimeTicks(), 0, enqueue_orders[i]));
+ }
+ }
+
+ std::vector<size_t> PopTasks() {
+ std::vector<size_t> order;
+ WorkQueue* chosen_work_queue;
+ while (selector_.SelectWorkQueueToService(&chosen_work_queue)) {
+ size_t chosen_queue_index =
+ queue_to_index_map_.find(chosen_work_queue->task_queue())->second;
+ order.push_back(chosen_queue_index);
+ chosen_work_queue->PopTaskForTesting();
+ immediate_work_queue_sets()->OnPopQueue(chosen_work_queue);
+ }
+ return order;
+ }
+
+ static void TestFunction() {}
+
+ protected:
+ void SetUp() final {
+ virtual_time_domain_ = base::WrapUnique<VirtualTimeDomain>(
+ new VirtualTimeDomain(base::TimeTicks()));
+ for (size_t i = 0; i < kTaskQueueCount; i++) {
+ std::unique_ptr<TaskQueueImpl> task_queue =
+ std::make_unique<TaskQueueImpl>(nullptr, virtual_time_domain_.get(),
+ TaskQueue::Spec("test"));
+ selector_.AddQueue(task_queue.get());
+ task_queues_.push_back(std::move(task_queue));
+ }
+ for (size_t i = 0; i < kTaskQueueCount; i++) {
+ EXPECT_EQ(TaskQueue::kNormalPriority, task_queues_[i]->GetQueuePriority())
+ << i;
+ queue_to_index_map_.insert(std::make_pair(task_queues_[i].get(), i));
+ }
+ histogram_tester_.reset(new base::HistogramTester());
+ }
+
+ void TearDown() final {
+ for (std::unique_ptr<TaskQueueImpl>& task_queue : task_queues_) {
+ // Note since this test doesn't have a TaskQueueManager we need to
+ // manually remove |task_queue| from the |selector_|. Normally
+ // UnregisterTaskQueue would do that.
+ selector_.RemoveQueue(task_queue.get());
+ task_queue->UnregisterTaskQueue();
+ }
+ }
+
+ std::unique_ptr<TaskQueueImpl> NewTaskQueueWithBlockReporting() {
+ return std::make_unique<TaskQueueImpl>(nullptr, virtual_time_domain_.get(),
+ TaskQueue::Spec("test"));
+ }
+
+ const size_t kTaskQueueCount = 5;
+ base::RepeatingClosure test_closure_;
+ TaskQueueSelectorForTest selector_;
+ std::unique_ptr<VirtualTimeDomain> virtual_time_domain_;
+ std::vector<std::unique_ptr<TaskQueueImpl>> task_queues_;
+ std::map<TaskQueueImpl*, size_t> queue_to_index_map_;
+ std::unique_ptr<base::HistogramTester> histogram_tester_;
+};
+
+TEST_F(TaskQueueSelectorTest, TestDefaultPriority) {
+ size_t queue_order[] = {4, 3, 2, 1, 0};
+ PushTasks(queue_order, 5);
+ EXPECT_THAT(PopTasks(), testing::ElementsAre(4, 3, 2, 1, 0));
+ EXPECT_EQ(histogram_tester_->GetBucketCount(
+ "TaskQueueSelector.TaskServicedPerSelectorLogic",
+ static_cast<int>(TaskQueueSelectorLogic::kNormalPriorityLogic)),
+ 5);
+}
+
+TEST_F(TaskQueueSelectorTest, TestHighestPriority) {
+ size_t queue_order[] = {0, 1, 2, 3, 4};
+ PushTasks(queue_order, 5);
+ selector_.SetQueuePriority(task_queues_[2].get(),
+ TaskQueue::kHighestPriority);
+ EXPECT_THAT(PopTasks(), ::testing::ElementsAre(2, 0, 1, 3, 4));
+ EXPECT_EQ(
+ histogram_tester_->GetBucketCount(
+ "TaskQueueSelector.TaskServicedPerSelectorLogic",
+ static_cast<int>(TaskQueueSelectorLogic::kHighestPriorityLogic)),
+ 1);
+}
+
+TEST_F(TaskQueueSelectorTest, TestHighPriority) {
+ size_t queue_order[] = {0, 1, 2, 3, 4};
+ PushTasks(queue_order, 5);
+ selector_.SetQueuePriority(task_queues_[2].get(),
+ TaskQueue::kHighestPriority);
+ selector_.SetQueuePriority(task_queues_[1].get(), TaskQueue::kHighPriority);
+ selector_.SetQueuePriority(task_queues_[0].get(), TaskQueue::kLowPriority);
+ EXPECT_THAT(PopTasks(), ::testing::ElementsAre(2, 1, 3, 4, 0));
+ EXPECT_EQ(histogram_tester_->GetBucketCount(
+ "TaskQueueSelector.TaskServicedPerSelectorLogic",
+ static_cast<int>(TaskQueueSelectorLogic::kHighPriorityLogic)),
+ 1);
+}
+
+TEST_F(TaskQueueSelectorTest, TestLowPriority) {
+ size_t queue_order[] = {0, 1, 2, 3, 4};
+ PushTasks(queue_order, 5);
+ selector_.SetQueuePriority(task_queues_[2].get(), TaskQueue::kLowPriority);
+ EXPECT_THAT(PopTasks(), testing::ElementsAre(0, 1, 3, 4, 2));
+ EXPECT_EQ(histogram_tester_->GetBucketCount(
+ "TaskQueueSelector.TaskServicedPerSelectorLogic",
+ static_cast<int>(TaskQueueSelectorLogic::kLowPriorityLogic)),
+ 1);
+}
+
+TEST_F(TaskQueueSelectorTest, TestBestEffortPriority) {
+ size_t queue_order[] = {0, 1, 2, 3, 4};
+ PushTasks(queue_order, 5);
+ selector_.SetQueuePriority(task_queues_[0].get(),
+ TaskQueue::kBestEffortPriority);
+ selector_.SetQueuePriority(task_queues_[2].get(), TaskQueue::kLowPriority);
+ selector_.SetQueuePriority(task_queues_[3].get(),
+ TaskQueue::kHighestPriority);
+ EXPECT_THAT(PopTasks(), ::testing::ElementsAre(3, 1, 4, 2, 0));
+ EXPECT_EQ(
+ histogram_tester_->GetBucketCount(
+ "TaskQueueSelector.TaskServicedPerSelectorLogic",
+ static_cast<int>(TaskQueueSelectorLogic::kBestEffortPriorityLogic)),
+ 1);
+}
+
+TEST_F(TaskQueueSelectorTest, TestControlPriority) {
+ size_t queue_order[] = {0, 1, 2, 3, 4};
+ PushTasks(queue_order, 5);
+ selector_.SetQueuePriority(task_queues_[4].get(),
+ TaskQueue::kControlPriority);
+ EXPECT_EQ(TaskQueue::kControlPriority, task_queues_[4]->GetQueuePriority());
+ selector_.SetQueuePriority(task_queues_[2].get(),
+ TaskQueue::kHighestPriority);
+ EXPECT_EQ(TaskQueue::kHighestPriority, task_queues_[2]->GetQueuePriority());
+ EXPECT_THAT(PopTasks(), ::testing::ElementsAre(4, 2, 0, 1, 3));
+ EXPECT_EQ(
+ histogram_tester_->GetBucketCount(
+ "TaskQueueSelector.TaskServicedPerSelectorLogic",
+ static_cast<int>(TaskQueueSelectorLogic::kControlPriorityLogic)),
+ 1);
+}
+
+TEST_F(TaskQueueSelectorTest, TestObserverWithEnabledQueue) {
+ task_queues_[1]->SetQueueEnabledForTest(false);
+ selector_.DisableQueue(task_queues_[1].get());
+ MockObserver mock_observer;
+ selector_.SetTaskQueueSelectorObserver(&mock_observer);
+ EXPECT_CALL(mock_observer, OnTaskQueueEnabled(_)).Times(1);
+ task_queues_[1]->SetQueueEnabledForTest(true);
+ selector_.EnableQueue(task_queues_[1].get());
+}
+
+TEST_F(TaskQueueSelectorTest,
+ TestObserverWithSetQueuePriorityAndQueueAlreadyEnabled) {
+ selector_.SetQueuePriority(task_queues_[1].get(),
+ TaskQueue::kHighestPriority);
+ MockObserver mock_observer;
+ selector_.SetTaskQueueSelectorObserver(&mock_observer);
+ EXPECT_CALL(mock_observer, OnTaskQueueEnabled(_)).Times(0);
+ selector_.SetQueuePriority(task_queues_[1].get(), TaskQueue::kNormalPriority);
+}
+
+TEST_F(TaskQueueSelectorTest, TestDisableEnable) {
+ MockObserver mock_observer;
+ selector_.SetTaskQueueSelectorObserver(&mock_observer);
+
+ size_t queue_order[] = {0, 1, 2, 3, 4};
+ PushTasks(queue_order, 5);
+ task_queues_[2]->SetQueueEnabledForTest(false);
+ selector_.DisableQueue(task_queues_[2].get());
+ task_queues_[4]->SetQueueEnabledForTest(false);
+ selector_.DisableQueue(task_queues_[4].get());
+ // Disabling a queue should not affect its priority.
+ EXPECT_EQ(TaskQueue::kNormalPriority, task_queues_[2]->GetQueuePriority());
+ EXPECT_EQ(TaskQueue::kNormalPriority, task_queues_[4]->GetQueuePriority());
+ EXPECT_THAT(PopTasks(), testing::ElementsAre(0, 1, 3));
+
+ EXPECT_CALL(mock_observer, OnTaskQueueEnabled(_)).Times(2);
+ task_queues_[2]->SetQueueEnabledForTest(true);
+ selector_.EnableQueue(task_queues_[2].get());
+ selector_.SetQueuePriority(task_queues_[2].get(),
+ TaskQueue::kBestEffortPriority);
+ EXPECT_THAT(PopTasks(), testing::ElementsAre(2));
+ task_queues_[4]->SetQueueEnabledForTest(true);
+ selector_.EnableQueue(task_queues_[4].get());
+ EXPECT_THAT(PopTasks(), testing::ElementsAre(4));
+}
+
+TEST_F(TaskQueueSelectorTest, TestDisableChangePriorityThenEnable) {
+ EXPECT_TRUE(task_queues_[2]->delayed_work_queue()->Empty());
+ EXPECT_TRUE(task_queues_[2]->immediate_work_queue()->Empty());
+
+ task_queues_[2]->SetQueueEnabledForTest(false);
+ selector_.SetQueuePriority(task_queues_[2].get(),
+ TaskQueue::kHighestPriority);
+
+ size_t queue_order[] = {0, 1, 2, 3, 4};
+ PushTasks(queue_order, 5);
+
+ EXPECT_TRUE(task_queues_[2]->delayed_work_queue()->Empty());
+ EXPECT_FALSE(task_queues_[2]->immediate_work_queue()->Empty());
+ task_queues_[2]->SetQueueEnabledForTest(true);
+
+ EXPECT_EQ(TaskQueue::kHighestPriority, task_queues_[2]->GetQueuePriority());
+ EXPECT_THAT(PopTasks(), ::testing::ElementsAre(2, 0, 1, 3, 4));
+}
+
+TEST_F(TaskQueueSelectorTest, TestEmptyQueues) {
+ WorkQueue* chosen_work_queue = nullptr;
+ EXPECT_FALSE(selector_.SelectWorkQueueToService(&chosen_work_queue));
+
+ // Test only disabled queues.
+ size_t queue_order[] = {0};
+ PushTasks(queue_order, 1);
+ task_queues_[0]->SetQueueEnabledForTest(false);
+ selector_.DisableQueue(task_queues_[0].get());
+ EXPECT_FALSE(selector_.SelectWorkQueueToService(&chosen_work_queue));
+
+ // These tests are unusual since there's no TQM. To avoid a later DCHECK when
+ // deleting the task queue, we re-enable the queue here so the selector
+ // doesn't get out of sync.
+ task_queues_[0]->SetQueueEnabledForTest(true);
+ selector_.EnableQueue(task_queues_[0].get());
+}
+
+TEST_F(TaskQueueSelectorTest, TestAge) {
+ size_t enqueue_order[] = {10, 1, 2, 9, 4};
+ size_t queue_order[] = {0, 1, 2, 3, 4};
+ PushTasksWithEnqueueOrder(queue_order, enqueue_order, 5);
+ EXPECT_THAT(PopTasks(), testing::ElementsAre(1, 2, 4, 3, 0));
+}
+
+TEST_F(TaskQueueSelectorTest, TestControlStarvesOthers) {
+ size_t queue_order[] = {0, 1, 2, 3};
+ PushTasks(queue_order, 4);
+ selector_.SetQueuePriority(task_queues_[3].get(),
+ TaskQueue::kControlPriority);
+ selector_.SetQueuePriority(task_queues_[2].get(),
+ TaskQueue::kHighestPriority);
+ selector_.SetQueuePriority(task_queues_[1].get(),
+ TaskQueue::kBestEffortPriority);
+ for (int i = 0; i < 100; i++) {
+ WorkQueue* chosen_work_queue = nullptr;
+ ASSERT_TRUE(selector_.SelectWorkQueueToService(&chosen_work_queue));
+ EXPECT_EQ(task_queues_[3].get(), chosen_work_queue->task_queue());
+ // Don't remove task from queue to simulate all queues still being full.
+ }
+}
+
+TEST_F(TaskQueueSelectorTest, TestHighestPriorityDoesNotStarveHigh) {
+ size_t queue_order[] = {0, 1};
+ PushTasks(queue_order, 2);
+ selector_.SetQueuePriority(task_queues_[0].get(),
+ TaskQueue::kHighestPriority);
+ selector_.SetQueuePriority(task_queues_[1].get(), TaskQueue::kHighPriority);
+
+ size_t counts[] = {0, 0};
+ for (int i = 0; i < 100; i++) {
+ WorkQueue* chosen_work_queue = nullptr;
+ ASSERT_TRUE(selector_.SelectWorkQueueToService(&chosen_work_queue));
+ size_t chosen_queue_index =
+ queue_to_index_map_.find(chosen_work_queue->task_queue())->second;
+ counts[chosen_queue_index]++;
+ // Don't remove task from queue to simulate all queues still being full.
+ }
+ EXPECT_GT(counts[1], 0ul); // Check highest doesn't starve high.
+ EXPECT_GT(counts[0], counts[1]); // Check highest gets more chance to run.
+}
+
+TEST_F(TaskQueueSelectorTest, TestHighestPriorityDoesNotStarveHighOrNormal) {
+ size_t queue_order[] = {0, 1, 2};
+ PushTasks(queue_order, 3);
+ selector_.SetQueuePriority(task_queues_[0].get(),
+ TaskQueue::kHighestPriority);
+ selector_.SetQueuePriority(task_queues_[1].get(), TaskQueue::kHighPriority);
+
+ size_t counts[] = {0, 0, 0};
+ for (int i = 0; i < 100; i++) {
+ WorkQueue* chosen_work_queue = nullptr;
+ ASSERT_TRUE(selector_.SelectWorkQueueToService(&chosen_work_queue));
+ size_t chosen_queue_index =
+ queue_to_index_map_.find(chosen_work_queue->task_queue())->second;
+ counts[chosen_queue_index]++;
+ // Don't remove task from queue to simulate all queues still being full.
+ }
+
+ // Check highest runs more frequently then high.
+ EXPECT_GT(counts[0], counts[1]);
+
+ // Check high runs at least as frequently as normal.
+ EXPECT_GE(counts[1], counts[2]);
+
+ // Check normal isn't starved.
+ EXPECT_GT(counts[2], 0ul);
+}
+
+TEST_F(TaskQueueSelectorTest,
+ TestHighestPriorityDoesNotStarveHighOrNormalOrLow) {
+ size_t queue_order[] = {0, 1, 2, 3};
+ PushTasks(queue_order, 4);
+ selector_.SetQueuePriority(task_queues_[0].get(),
+ TaskQueue::kHighestPriority);
+ selector_.SetQueuePriority(task_queues_[1].get(), TaskQueue::kHighPriority);
+ selector_.SetQueuePriority(task_queues_[3].get(), TaskQueue::kLowPriority);
+
+ size_t counts[] = {0, 0, 0, 0};
+ for (int i = 0; i < 100; i++) {
+ WorkQueue* chosen_work_queue = nullptr;
+ ASSERT_TRUE(selector_.SelectWorkQueueToService(&chosen_work_queue));
+ size_t chosen_queue_index =
+ queue_to_index_map_.find(chosen_work_queue->task_queue())->second;
+ counts[chosen_queue_index]++;
+ // Don't remove task from queue to simulate all queues still being full.
+ }
+
+ // Check highest runs more frequently then high.
+ EXPECT_GT(counts[0], counts[1]);
+
+ // Check high runs at least as frequently as normal.
+ EXPECT_GE(counts[1], counts[2]);
+
+ // Check normal runs more frequently than low.
+ EXPECT_GT(counts[2], counts[3]);
+
+ // Check low isn't starved.
+ EXPECT_GT(counts[3], 0ul);
+}
+
+TEST_F(TaskQueueSelectorTest, TestHighPriorityDoesNotStarveNormal) {
+ size_t queue_order[] = {0, 1};
+ PushTasks(queue_order, 2);
+
+ selector_.SetQueuePriority(task_queues_[0].get(), TaskQueue::kHighPriority);
+
+ size_t counts[] = {0, 0, 0, 0};
+ for (int i = 0; i < 100; i++) {
+ WorkQueue* chosen_work_queue = nullptr;
+ ASSERT_TRUE(selector_.SelectWorkQueueToService(&chosen_work_queue));
+ size_t chosen_queue_index =
+ queue_to_index_map_.find(chosen_work_queue->task_queue())->second;
+ counts[chosen_queue_index]++;
+ // Don't remove task from queue to simulate all queues still being full.
+ }
+
+ // Check high runs more frequently then normal.
+ EXPECT_GT(counts[0], counts[1]);
+
+ // Check low isn't starved.
+ EXPECT_GT(counts[1], 0ul);
+}
+
+TEST_F(TaskQueueSelectorTest, TestHighPriorityDoesNotStarveNormalOrLow) {
+ size_t queue_order[] = {0, 1, 2};
+ PushTasks(queue_order, 3);
+ selector_.SetQueuePriority(task_queues_[0].get(), TaskQueue::kHighPriority);
+ selector_.SetQueuePriority(task_queues_[2].get(), TaskQueue::kLowPriority);
+
+ size_t counts[] = {0, 0, 0};
+ for (int i = 0; i < 100; i++) {
+ WorkQueue* chosen_work_queue = nullptr;
+ ASSERT_TRUE(selector_.SelectWorkQueueToService(&chosen_work_queue));
+ size_t chosen_queue_index =
+ queue_to_index_map_.find(chosen_work_queue->task_queue())->second;
+ counts[chosen_queue_index]++;
+ // Don't remove task from queue to simulate all queues still being full.
+ }
+
+ // Check high runs more frequently than normal.
+ EXPECT_GT(counts[0], counts[1]);
+
+ // Check normal runs more frequently than low.
+ EXPECT_GT(counts[1], counts[2]);
+
+ // Check low isn't starved.
+ EXPECT_GT(counts[2], 0ul);
+}
+
+TEST_F(TaskQueueSelectorTest, TestNormalPriorityDoesNotStarveLow) {
+ size_t queue_order[] = {0, 1, 2};
+ PushTasks(queue_order, 3);
+ selector_.SetQueuePriority(task_queues_[0].get(), TaskQueue::kLowPriority);
+ selector_.SetQueuePriority(task_queues_[1].get(),
+ TaskQueue::kBestEffortPriority);
+ size_t counts[] = {0, 0, 0};
+ for (int i = 0; i < 100; i++) {
+ WorkQueue* chosen_work_queue = nullptr;
+ ASSERT_TRUE(selector_.SelectWorkQueueToService(&chosen_work_queue));
+ size_t chosen_queue_index =
+ queue_to_index_map_.find(chosen_work_queue->task_queue())->second;
+ counts[chosen_queue_index]++;
+ // Don't remove task from queue to simulate all queues still being full.
+ }
+ EXPECT_GT(counts[0], 0ul); // Check normal doesn't starve low.
+ EXPECT_GT(counts[2], counts[0]); // Check normal gets more chance to run.
+ EXPECT_EQ(0ul, counts[1]); // Check best effort is starved.
+}
+
+TEST_F(TaskQueueSelectorTest, TestBestEffortGetsStarved) {
+ size_t queue_order[] = {0, 1};
+ PushTasks(queue_order, 2);
+ selector_.SetQueuePriority(task_queues_[0].get(),
+ TaskQueue::kBestEffortPriority);
+ EXPECT_EQ(TaskQueue::kNormalPriority, task_queues_[1]->GetQueuePriority());
+
+ // Check that normal priority tasks starve best effort.
+ WorkQueue* chosen_work_queue = nullptr;
+ for (int i = 0; i < 100; i++) {
+ ASSERT_TRUE(selector_.SelectWorkQueueToService(&chosen_work_queue));
+ EXPECT_EQ(task_queues_[1].get(), chosen_work_queue->task_queue());
+ // Don't remove task from queue to simulate all queues still being full.
+ }
+
+ // Check that highest priority tasks starve best effort.
+ selector_.SetQueuePriority(task_queues_[1].get(),
+ TaskQueue::kHighestPriority);
+ for (int i = 0; i < 100; i++) {
+ ASSERT_TRUE(selector_.SelectWorkQueueToService(&chosen_work_queue));
+ EXPECT_EQ(task_queues_[1].get(), chosen_work_queue->task_queue());
+ // Don't remove task from queue to simulate all queues still being full.
+ }
+
+ // Check that high priority tasks starve best effort.
+ selector_.SetQueuePriority(task_queues_[1].get(), TaskQueue::kHighPriority);
+ for (int i = 0; i < 100; i++) {
+ ASSERT_TRUE(selector_.SelectWorkQueueToService(&chosen_work_queue));
+ EXPECT_EQ(task_queues_[1].get(), chosen_work_queue->task_queue());
+ // Don't remove task from queue to simulate all queues still being full.
+ }
+
+ // Check that low priority tasks starve best effort.
+ selector_.SetQueuePriority(task_queues_[1].get(), TaskQueue::kLowPriority);
+ for (int i = 0; i < 100; i++) {
+ ASSERT_TRUE(selector_.SelectWorkQueueToService(&chosen_work_queue));
+ EXPECT_EQ(task_queues_[1].get(), chosen_work_queue->task_queue());
+ // Don't remove task from queue to simulate all queues still being full.
+ }
+
+ // Check that control priority tasks starve best effort.
+ selector_.SetQueuePriority(task_queues_[1].get(),
+ TaskQueue::kControlPriority);
+ for (int i = 0; i < 100; i++) {
+ ASSERT_TRUE(selector_.SelectWorkQueueToService(&chosen_work_queue));
+ EXPECT_EQ(task_queues_[1].get(), chosen_work_queue->task_queue());
+ // Don't remove task from queue to simulate all queues still being full.
+ }
+}
+
+TEST_F(TaskQueueSelectorTest, AllEnabledWorkQueuesAreEmpty) {
+ EXPECT_TRUE(selector_.AllEnabledWorkQueuesAreEmpty());
+ size_t queue_order[] = {0, 1};
+ PushTasks(queue_order, 2);
+
+ EXPECT_FALSE(selector_.AllEnabledWorkQueuesAreEmpty());
+ PopTasks();
+ EXPECT_TRUE(selector_.AllEnabledWorkQueuesAreEmpty());
+}
+
+TEST_F(TaskQueueSelectorTest, AllEnabledWorkQueuesAreEmpty_ControlPriority) {
+ size_t queue_order[] = {0};
+ PushTasks(queue_order, 1);
+
+ selector_.SetQueuePriority(task_queues_[0].get(),
+ TaskQueue::kControlPriority);
+
+ EXPECT_FALSE(selector_.AllEnabledWorkQueuesAreEmpty());
+}
+
+TEST_F(TaskQueueSelectorTest, ChooseOldestWithPriority_Empty) {
+ WorkQueue* chosen_work_queue = nullptr;
+ bool chose_delayed_over_immediate = false;
+ EXPECT_FALSE(prioritizing_selector()->ChooseOldestWithPriority(
+ TaskQueue::kNormalPriority, &chose_delayed_over_immediate,
+ &chosen_work_queue));
+ EXPECT_FALSE(chose_delayed_over_immediate);
+}
+
+TEST_F(TaskQueueSelectorTest, ChooseOldestWithPriority_OnlyDelayed) {
+ task_queues_[0]->delayed_work_queue()->Push(
+ TaskQueueImpl::Task(TaskQueue::PostedTask(test_closure_, FROM_HERE),
+ base::TimeTicks(), 0, 0));
+
+ WorkQueue* chosen_work_queue = nullptr;
+ bool chose_delayed_over_immediate = false;
+ EXPECT_TRUE(prioritizing_selector()->ChooseOldestWithPriority(
+ TaskQueue::kNormalPriority, &chose_delayed_over_immediate,
+ &chosen_work_queue));
+ EXPECT_EQ(chosen_work_queue, task_queues_[0]->delayed_work_queue());
+ EXPECT_FALSE(chose_delayed_over_immediate);
+}
+
+TEST_F(TaskQueueSelectorTest, ChooseOldestWithPriority_OnlyImmediate) {
+ task_queues_[0]->immediate_work_queue()->Push(
+ TaskQueueImpl::Task(TaskQueue::PostedTask(test_closure_, FROM_HERE),
+ base::TimeTicks(), 0, 0));
+
+ WorkQueue* chosen_work_queue = nullptr;
+ bool chose_delayed_over_immediate = false;
+ EXPECT_TRUE(prioritizing_selector()->ChooseOldestWithPriority(
+ TaskQueue::kNormalPriority, &chose_delayed_over_immediate,
+ &chosen_work_queue));
+ EXPECT_EQ(chosen_work_queue, task_queues_[0]->immediate_work_queue());
+ EXPECT_FALSE(chose_delayed_over_immediate);
+}
+
+TEST_F(TaskQueueSelectorTest, TestObserverWithOneBlockedQueue) {
+ TaskQueueSelectorForTest selector;
+ MockObserver mock_observer;
+ selector.SetTaskQueueSelectorObserver(&mock_observer);
+
+ EXPECT_CALL(mock_observer, OnTaskQueueEnabled(_)).Times(1);
+
+ std::unique_ptr<TaskQueueImpl> task_queue(NewTaskQueueWithBlockReporting());
+ selector.AddQueue(task_queue.get());
+
+ task_queue->SetQueueEnabledForTest(false);
+ selector.DisableQueue(task_queue.get());
+
+ TaskQueueImpl::Task task(TaskQueue::PostedTask(test_closure_, FROM_HERE),
+ base::TimeTicks(), 0);
+ task.set_enqueue_order(0);
+ task_queue->immediate_work_queue()->Push(std::move(task));
+
+ WorkQueue* chosen_work_queue;
+ EXPECT_FALSE(selector.SelectWorkQueueToService(&chosen_work_queue));
+
+ task_queue->SetQueueEnabledForTest(true);
+ selector.EnableQueue(task_queue.get());
+ selector.RemoveQueue(task_queue.get());
+ task_queue->UnregisterTaskQueue();
+}
+
+TEST_F(TaskQueueSelectorTest, TestObserverWithTwoBlockedQueues) {
+ TaskQueueSelectorForTest selector;
+ MockObserver mock_observer;
+ selector.SetTaskQueueSelectorObserver(&mock_observer);
+
+ std::unique_ptr<TaskQueueImpl> task_queue(NewTaskQueueWithBlockReporting());
+ std::unique_ptr<TaskQueueImpl> task_queue2(NewTaskQueueWithBlockReporting());
+ selector.AddQueue(task_queue.get());
+ selector.AddQueue(task_queue2.get());
+
+ task_queue->SetQueueEnabledForTest(false);
+ task_queue2->SetQueueEnabledForTest(false);
+ selector.DisableQueue(task_queue.get());
+ selector.DisableQueue(task_queue2.get());
+
+ selector.SetQueuePriority(task_queue2.get(), TaskQueue::kControlPriority);
+
+ TaskQueueImpl::Task task1(TaskQueue::PostedTask(test_closure_, FROM_HERE),
+ base::TimeTicks(), 0);
+ TaskQueueImpl::Task task2(TaskQueue::PostedTask(test_closure_, FROM_HERE),
+ base::TimeTicks(), 1);
+ task1.set_enqueue_order(0);
+ task2.set_enqueue_order(1);
+ task_queue->immediate_work_queue()->Push(std::move(task1));
+ task_queue2->immediate_work_queue()->Push(std::move(task2));
+
+ WorkQueue* chosen_work_queue;
+ EXPECT_FALSE(selector.SelectWorkQueueToService(&chosen_work_queue));
+ testing::Mock::VerifyAndClearExpectations(&mock_observer);
+
+ EXPECT_CALL(mock_observer, OnTaskQueueEnabled(_)).Times(2);
+
+ task_queue->SetQueueEnabledForTest(true);
+ selector.EnableQueue(task_queue.get());
+
+ selector.RemoveQueue(task_queue.get());
+ task_queue->UnregisterTaskQueue();
+ EXPECT_FALSE(selector.SelectWorkQueueToService(&chosen_work_queue));
+
+ task_queue2->SetQueueEnabledForTest(true);
+ selector.EnableQueue(task_queue2.get());
+ selector.RemoveQueue(task_queue2.get());
+ task_queue2->UnregisterTaskQueue();
+}
+
+struct ChooseOldestWithPriorityTestParam {
+ int delayed_task_enqueue_order;
+ int immediate_task_enqueue_order;
+ int immediate_starvation_count;
+ const char* expected_work_queue_name;
+ bool expected_did_starve_immediate_queue;
+};
+
+static const ChooseOldestWithPriorityTestParam
+ kChooseOldestWithPriorityTestCases[] = {
+ {1, 2, 0, "delayed", true}, {1, 2, 1, "delayed", true},
+ {1, 2, 2, "delayed", true}, {1, 2, 3, "immediate", false},
+ {1, 2, 4, "immediate", false}, {2, 1, 4, "immediate", false},
+ {2, 1, 4, "immediate", false},
+};
+
+class ChooseOldestWithPriorityTest
+ : public TaskQueueSelectorTest,
+ public testing::WithParamInterface<ChooseOldestWithPriorityTestParam> {};
+
+TEST_P(ChooseOldestWithPriorityTest, RoundRobinTest) {
+ task_queues_[0]->immediate_work_queue()->Push(TaskQueueImpl::Task(
+ TaskQueue::PostedTask(test_closure_, FROM_HERE), base::TimeTicks(),
+ GetParam().immediate_task_enqueue_order,
+ GetParam().immediate_task_enqueue_order));
+
+ task_queues_[0]->delayed_work_queue()->Push(TaskQueueImpl::Task(
+ TaskQueue::PostedTask(test_closure_, FROM_HERE), base::TimeTicks(),
+ GetParam().delayed_task_enqueue_order,
+ GetParam().delayed_task_enqueue_order));
+
+ selector_.SetImmediateStarvationCountForTest(
+ GetParam().immediate_starvation_count);
+
+ WorkQueue* chosen_work_queue = nullptr;
+ bool chose_delayed_over_immediate = false;
+ EXPECT_TRUE(prioritizing_selector()->ChooseOldestWithPriority(
+ TaskQueue::kNormalPriority, &chose_delayed_over_immediate,
+ &chosen_work_queue));
+ EXPECT_EQ(chosen_work_queue->task_queue(), task_queues_[0].get());
+ EXPECT_STREQ(chosen_work_queue->name(), GetParam().expected_work_queue_name);
+ EXPECT_EQ(chose_delayed_over_immediate,
+ GetParam().expected_did_starve_immediate_queue);
+}
+
+INSTANTIATE_TEST_CASE_P(ChooseOldestWithPriorityTest,
+ ChooseOldestWithPriorityTest,
+ testing::ValuesIn(kChooseOldestWithPriorityTestCases));
+
+} // namespace task_queue_selector_unittest
+} // namespace internal
+} // namespace scheduler
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/platform/scheduler/base/task_time_observer.h b/chromium/third_party/blink/renderer/platform/scheduler/base/task_time_observer.h
new file mode 100644
index 00000000000..cde2e569708
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/scheduler/base/task_time_observer.h
@@ -0,0 +1,36 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_PLATFORM_SCHEDULER_BASE_TASK_TIME_OBSERVER_H_
+#define THIRD_PARTY_BLINK_RENDERER_PLATFORM_SCHEDULER_BASE_TASK_TIME_OBSERVER_H_
+
+#include "base/time/time.h"
+#include "third_party/blink/renderer/platform/platform_export.h"
+
+namespace blink {
+namespace scheduler {
+
+// TaskTimeObserver provides an API for observing completion of renderer tasks.
+class PLATFORM_EXPORT TaskTimeObserver {
+ public:
+ TaskTimeObserver() = default;
+ virtual ~TaskTimeObserver() = default;
+
+ // Callback to be called when task is about to start.
+ // |start_time| - time in seconds when task started to run,
+ virtual void WillProcessTask(double start_time) = 0;
+
+ // Callback to be called when task is completed.
+ // |start_time| - time in seconds when task started to run,
+ // |end_time| - time in seconds when task was completed.
+ virtual void DidProcessTask(double start_time, double end_time) = 0;
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(TaskTimeObserver);
+};
+
+} // namespace scheduler
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_PLATFORM_SCHEDULER_BASE_TASK_TIME_OBSERVER_H_
diff --git a/chromium/third_party/blink/renderer/platform/scheduler/base/test_count_uses_time_source.cc b/chromium/third_party/blink/renderer/platform/scheduler/base/test_count_uses_time_source.cc
new file mode 100644
index 00000000000..9b6e17ad504
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/scheduler/base/test_count_uses_time_source.cc
@@ -0,0 +1,22 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "third_party/blink/renderer/platform/scheduler/base/test_count_uses_time_source.h"
+#include "testing/gtest/include/gtest/gtest.h"
+
+namespace blink {
+namespace scheduler {
+
+TestCountUsesTimeSource::TestCountUsesTimeSource() : now_calls_count_(0) {}
+
+TestCountUsesTimeSource::~TestCountUsesTimeSource() = default;
+
+base::TimeTicks TestCountUsesTimeSource::NowTicks() const {
+ now_calls_count_++;
+ // Don't return 0, as it triggers some assertions.
+ return base::TimeTicks() + base::TimeDelta::FromSeconds(1);
+}
+
+} // namespace scheduler
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/platform/scheduler/base/test_count_uses_time_source.h b/chromium/third_party/blink/renderer/platform/scheduler/base/test_count_uses_time_source.h
new file mode 100644
index 00000000000..c953ab16c55
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/scheduler/base/test_count_uses_time_source.h
@@ -0,0 +1,31 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_PLATFORM_SCHEDULER_BASE_TEST_COUNT_USES_TIME_SOURCE_H_
+#define THIRD_PARTY_BLINK_RENDERER_PLATFORM_SCHEDULER_BASE_TEST_COUNT_USES_TIME_SOURCE_H_
+
+#include "base/macros.h"
+#include "base/time/tick_clock.h"
+
+namespace blink {
+namespace scheduler {
+
+class TestCountUsesTimeSource : public base::TickClock {
+ public:
+ explicit TestCountUsesTimeSource();
+ ~TestCountUsesTimeSource() override;
+
+ base::TimeTicks NowTicks() const override;
+ int now_calls_count() const { return now_calls_count_; }
+
+ private:
+ DISALLOW_COPY_AND_ASSIGN(TestCountUsesTimeSource);
+
+ mutable int now_calls_count_;
+};
+
+} // namespace scheduler
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_PLATFORM_SCHEDULER_BASE_TEST_COUNT_USES_TIME_SOURCE_H_
diff --git a/chromium/third_party/blink/renderer/platform/scheduler/base/test_task_time_observer.h b/chromium/third_party/blink/renderer/platform/scheduler/base/test_task_time_observer.h
new file mode 100644
index 00000000000..62a4db3b58f
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/scheduler/base/test_task_time_observer.h
@@ -0,0 +1,23 @@
+// Copyright 2016 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_PLATFORM_SCHEDULER_BASE_TEST_TASK_TIME_OBSERVER_H_
+#define THIRD_PARTY_BLINK_RENDERER_PLATFORM_SCHEDULER_BASE_TEST_TASK_TIME_OBSERVER_H_
+
+#include "base/time/time.h"
+#include "third_party/blink/renderer/platform/scheduler/base/task_time_observer.h"
+
+namespace blink {
+namespace scheduler {
+
+class TestTaskTimeObserver : public TaskTimeObserver {
+ public:
+ void WillProcessTask(double start_time) override {}
+ void DidProcessTask(double start_time, double end_time) override {}
+};
+
+} // namespace scheduler
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_PLATFORM_SCHEDULER_BASE_TEST_TASK_TIME_OBSERVER_H_
diff --git a/chromium/third_party/blink/renderer/platform/scheduler/base/thread_controller.h b/chromium/third_party/blink/renderer/platform/scheduler/base/thread_controller.h
new file mode 100644
index 00000000000..cfae477eb76
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/scheduler/base/thread_controller.h
@@ -0,0 +1,94 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_PLATFORM_SCHEDULER_BASE_THREAD_CONTROLLER_H_
+#define THIRD_PARTY_BLINK_RENDERER_PLATFORM_SCHEDULER_BASE_THREAD_CONTROLLER_H_
+
+#include "base/location.h"
+#include "base/run_loop.h"
+#include "base/single_thread_task_runner.h"
+#include "base/time/time.h"
+#include "third_party/blink/renderer/platform/platform_export.h"
+
+namespace base {
+class TickClock;
+struct PendingTask;
+};
+
+namespace blink {
+namespace scheduler {
+namespace internal {
+
+class SequencedTaskSource;
+
+// Interface for TaskQueueManager to schedule work to be run.
+class PLATFORM_EXPORT ThreadController {
+ public:
+ virtual ~ThreadController() = default;
+
+ // Set the number of tasks executed in a single invocation of DoWork.
+ // Increasing the batch size can reduce the overhead of yielding back to the
+ // main message loop. The batch size is 1 by default.
+ virtual void SetWorkBatchSize(int work_batch_size) = 0;
+
+ // Notifies that |pending_task| was enqueued. Needed for tracing purposes.
+ virtual void DidQueueTask(const base::PendingTask& pending_task) = 0;
+
+ // Notify the controller that its associated sequence has immediate work
+ // to run. Shortly after this is called, the thread associated with this
+ // controller will run a task returned by sequence->TakeTask(). Can be called
+ // from any sequence.
+ //
+ // TODO(altimin): Change this to "the thread associated with this
+ // controller will run tasks returned by sequence->TakeTask() until it
+ // returns null or sequence->DidRunTask() returns false" once the
+ // code is changed to work that way.
+ virtual void ScheduleWork() = 0;
+
+ // Notify the controller that its associated sequence will have
+ // delayed work to run at |run_time|. The thread associated with this
+ // controller will run a task returned by sequence->TakeTask() at that time.
+ // This call cancels any previously scheduled delayed work. Will be called
+ // from the main sequence.
+ //
+ // TODO(altimin): Change this to "the thread associated with this
+ // controller will run tasks returned by sequence->TakeTask() until
+ // it returns null or sequence->DidRunTask() returns false" once the
+ // code is changed to work that way.
+ virtual void ScheduleDelayedWork(base::TimeTicks now,
+ base::TimeTicks run_time) = 0;
+
+ // Notify thread controller that sequence no longer has delayed work at
+ // |run_time| and previously scheduled callbacks should be cancelled.
+ virtual void CancelDelayedWork(base::TimeTicks run_time) = 0;
+
+ // Sets the sequenced task source from which to take tasks after
+ // a Schedule*Work() call is made.
+ // Must be called before the first call to Schedule*Work().
+ virtual void SetSequencedTaskSource(SequencedTaskSource*) = 0;
+
+ // TODO(altimin): Get rid of the methods below.
+ // These methods exist due to current integration of TaskQueueManager
+ // with MessageLoop.
+
+ virtual bool RunsTasksInCurrentSequence() = 0;
+
+ virtual const base::TickClock* GetClock() = 0;
+
+ virtual void SetDefaultTaskRunner(
+ scoped_refptr<base::SingleThreadTaskRunner>) = 0;
+
+ virtual void RestoreDefaultTaskRunner() = 0;
+
+ virtual void AddNestingObserver(base::RunLoop::NestingObserver* observer) = 0;
+
+ virtual void RemoveNestingObserver(
+ base::RunLoop::NestingObserver* observer) = 0;
+};
+
+} // namespace internal
+} // namespace scheduler
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_PLATFORM_SCHEDULER_BASE_THREAD_CONTROLLER_H_
diff --git a/chromium/third_party/blink/renderer/platform/scheduler/base/thread_controller_impl.cc b/chromium/third_party/blink/renderer/platform/scheduler/base/thread_controller_impl.cc
new file mode 100644
index 00000000000..196cd5c5312
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/scheduler/base/thread_controller_impl.cc
@@ -0,0 +1,255 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "third_party/blink/renderer/platform/scheduler/base/thread_controller_impl.h"
+
+#include "base/bind.h"
+#include "base/memory/ptr_util.h"
+#include "base/message_loop/message_loop.h"
+#include "base/run_loop.h"
+#include "base/time/tick_clock.h"
+#include "base/trace_event/trace_event.h"
+#include "third_party/blink/renderer/platform/scheduler/base/lazy_now.h"
+
+namespace blink {
+namespace scheduler {
+namespace internal {
+
+ThreadControllerImpl::ThreadControllerImpl(
+ base::MessageLoop* message_loop,
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner,
+ const base::TickClock* time_source)
+ : message_loop_(message_loop),
+ task_runner_(task_runner),
+ message_loop_task_runner_(message_loop ? message_loop->task_runner()
+ : nullptr),
+ time_source_(time_source),
+ weak_factory_(this) {
+ immediate_do_work_closure_ = base::BindRepeating(
+ &ThreadControllerImpl::DoWork, weak_factory_.GetWeakPtr(),
+ SequencedTaskSource::WorkType::kImmediate);
+ delayed_do_work_closure_ = base::BindRepeating(
+ &ThreadControllerImpl::DoWork, weak_factory_.GetWeakPtr(),
+ SequencedTaskSource::WorkType::kDelayed);
+}
+
+ThreadControllerImpl::~ThreadControllerImpl() = default;
+
+std::unique_ptr<ThreadControllerImpl> ThreadControllerImpl::Create(
+ base::MessageLoop* message_loop,
+ const base::TickClock* time_source) {
+ return base::WrapUnique(new ThreadControllerImpl(
+ message_loop, message_loop->task_runner(), time_source));
+}
+
+void ThreadControllerImpl::SetSequencedTaskSource(
+ SequencedTaskSource* sequence) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DCHECK(sequence);
+ DCHECK(!sequence_);
+ sequence_ = sequence;
+}
+
+void ThreadControllerImpl::ScheduleWork() {
+ DCHECK(sequence_);
+ base::AutoLock lock(any_sequence_lock_);
+ // Don't post a DoWork if there's an immediate DoWork in flight or if we're
+ // inside a top level DoWork. We can rely on a continuation being posted as
+ // needed.
+ if (any_sequence().immediate_do_work_posted ||
+ (any_sequence().do_work_running_count > any_sequence().nesting_depth)) {
+ return;
+ }
+ any_sequence().immediate_do_work_posted = true;
+
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("renderer.scheduler"),
+ "ThreadControllerImpl::ScheduleWork::PostTask");
+ task_runner_->PostTask(FROM_HERE, immediate_do_work_closure_);
+}
+
+void ThreadControllerImpl::ScheduleDelayedWork(base::TimeTicks now,
+ base::TimeTicks run_time) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DCHECK(sequence_);
+
+ // If there's a delayed DoWork scheduled to run sooner, we don't need to do
+ // anything because a delayed continuation will be posted as needed.
+ if (main_sequence_only().next_delayed_do_work <= run_time)
+ return;
+
+ // If DoWork is running then we don't need to do anything because it will post
+ // a continuation as needed. Bailing out here is by far the most common case.
+ if (main_sequence_only().do_work_running_count >
+ main_sequence_only().nesting_depth) {
+ return;
+ }
+
+ // If DoWork is about to run then we also don't need to do anything.
+ {
+ base::AutoLock lock(any_sequence_lock_);
+ if (any_sequence().immediate_do_work_posted)
+ return;
+ }
+
+ base::TimeDelta delay = std::max(base::TimeDelta(), run_time - now);
+ TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("renderer.scheduler"),
+ "ThreadControllerImpl::ScheduleDelayedWork::PostDelayedTask",
+ "delay_ms", delay.InMillisecondsF());
+
+ main_sequence_only().next_delayed_do_work = run_time;
+ cancelable_delayed_do_work_closure_.Reset(delayed_do_work_closure_);
+ task_runner_->PostDelayedTask(
+ FROM_HERE, cancelable_delayed_do_work_closure_.callback(), delay);
+}
+
+void ThreadControllerImpl::CancelDelayedWork(base::TimeTicks run_time) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DCHECK(sequence_);
+ if (main_sequence_only().next_delayed_do_work != run_time)
+ return;
+
+ cancelable_delayed_do_work_closure_.Cancel();
+ main_sequence_only().next_delayed_do_work = base::TimeTicks::Max();
+}
+
+bool ThreadControllerImpl::RunsTasksInCurrentSequence() {
+ return task_runner_->RunsTasksInCurrentSequence();
+}
+
+const base::TickClock* ThreadControllerImpl::GetClock() {
+ return time_source_;
+}
+
+void ThreadControllerImpl::SetDefaultTaskRunner(
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner) {
+ if (!message_loop_)
+ return;
+ message_loop_->SetTaskRunner(task_runner);
+}
+
+void ThreadControllerImpl::RestoreDefaultTaskRunner() {
+ if (!message_loop_)
+ return;
+ message_loop_->SetTaskRunner(message_loop_task_runner_);
+}
+
+void ThreadControllerImpl::DidQueueTask(const base::PendingTask& pending_task) {
+ task_annotator_.DidQueueTask("TaskQueueManager::PostTask", pending_task);
+}
+
+void ThreadControllerImpl::DoWork(SequencedTaskSource::WorkType work_type) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DCHECK(sequence_);
+
+ {
+ base::AutoLock lock(any_sequence_lock_);
+ if (work_type == SequencedTaskSource::WorkType::kImmediate)
+ any_sequence().immediate_do_work_posted = false;
+ any_sequence().do_work_running_count++;
+ }
+
+ main_sequence_only().do_work_running_count++;
+
+ base::WeakPtr<ThreadControllerImpl> weak_ptr = weak_factory_.GetWeakPtr();
+ // TODO(scheduler-dev): Consider moving to a time based work batch instead.
+ for (int i = 0; i < main_sequence_only().work_batch_size_; i++) {
+ base::Optional<base::PendingTask> task = sequence_->TakeTask();
+ if (!task)
+ break;
+
+ TRACE_TASK_EXECUTION("ThreadControllerImpl::DoWork", *task);
+ task_annotator_.RunTask("ThreadControllerImpl::DoWork", &*task);
+
+ if (!weak_ptr)
+ return;
+
+ sequence_->DidRunTask();
+
+ // TODO(alexclarke): Find out why this is needed.
+ if (main_sequence_only().nesting_depth > 0)
+ break;
+ }
+
+ main_sequence_only().do_work_running_count--;
+
+ {
+ base::AutoLock lock(any_sequence_lock_);
+ any_sequence().do_work_running_count--;
+ DCHECK_GE(any_sequence().do_work_running_count, 0);
+ LazyNow lazy_now(time_source_);
+ base::TimeDelta delay_till_next_task =
+ sequence_->DelayTillNextTask(&lazy_now);
+ if (delay_till_next_task <= base::TimeDelta()) {
+ // The next task needs to run immediately, post a continuation if needed.
+ if (!any_sequence().immediate_do_work_posted) {
+ any_sequence().immediate_do_work_posted = true;
+ task_runner_->PostTask(FROM_HERE, immediate_do_work_closure_);
+ }
+ } else if (delay_till_next_task < base::TimeDelta::Max()) {
+ // The next task needs to run after a delay, post a continuation if
+ // needed.
+ base::TimeTicks next_task_at = lazy_now.Now() + delay_till_next_task;
+ if (next_task_at != main_sequence_only().next_delayed_do_work) {
+ main_sequence_only().next_delayed_do_work = next_task_at;
+ cancelable_delayed_do_work_closure_.Reset(delayed_do_work_closure_);
+ task_runner_->PostDelayedTask(
+ FROM_HERE, cancelable_delayed_do_work_closure_.callback(),
+ delay_till_next_task);
+ }
+ } else {
+ // There is no next task scheduled.
+ main_sequence_only().next_delayed_do_work = base::TimeTicks::Max();
+ }
+ }
+}
+
+void ThreadControllerImpl::AddNestingObserver(
+ base::RunLoop::NestingObserver* observer) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ nesting_observer_ = observer;
+ base::RunLoop::AddNestingObserverOnCurrentThread(this);
+}
+
+void ThreadControllerImpl::RemoveNestingObserver(
+ base::RunLoop::NestingObserver* observer) {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ DCHECK_EQ(observer, nesting_observer_);
+ nesting_observer_ = nullptr;
+ base::RunLoop::RemoveNestingObserverOnCurrentThread(this);
+}
+
+void ThreadControllerImpl::OnBeginNestedRunLoop() {
+ main_sequence_only().nesting_depth++;
+ {
+ base::AutoLock lock(any_sequence_lock_);
+ any_sequence().nesting_depth++;
+ if (!any_sequence().immediate_do_work_posted) {
+ any_sequence().immediate_do_work_posted = true;
+ TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("renderer.scheduler"),
+ "ThreadControllerImpl::OnBeginNestedRunLoop::PostTask");
+ task_runner_->PostTask(FROM_HERE, immediate_do_work_closure_);
+ }
+ }
+ if (nesting_observer_)
+ nesting_observer_->OnBeginNestedRunLoop();
+}
+
+void ThreadControllerImpl::OnExitNestedRunLoop() {
+ main_sequence_only().nesting_depth--;
+ {
+ base::AutoLock lock(any_sequence_lock_);
+ any_sequence().nesting_depth--;
+ DCHECK_GE(any_sequence().nesting_depth, 0);
+ }
+ if (nesting_observer_)
+ nesting_observer_->OnExitNestedRunLoop();
+}
+
+void ThreadControllerImpl::SetWorkBatchSize(int work_batch_size) {
+ main_sequence_only().work_batch_size_ = work_batch_size;
+}
+
+} // namespace internal
+} // namespace scheduler
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/platform/scheduler/base/thread_controller_impl.h b/chromium/third_party/blink/renderer/platform/scheduler/base/thread_controller_impl.h
new file mode 100644
index 00000000000..bff48060c17
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/scheduler/base/thread_controller_impl.h
@@ -0,0 +1,132 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_PLATFORM_SCHEDULER_BASE_THREAD_CONTROLLER_IMPL_H_
+#define THIRD_PARTY_BLINK_RENDERER_PLATFORM_SCHEDULER_BASE_THREAD_CONTROLLER_IMPL_H_
+
+#include "third_party/blink/renderer/platform/scheduler/base/thread_controller.h"
+
+#include "base/cancelable_callback.h"
+#include "base/debug/task_annotator.h"
+#include "base/macros.h"
+#include "base/memory/weak_ptr.h"
+#include "base/run_loop.h"
+#include "base/sequence_checker.h"
+#include "base/single_thread_task_runner.h"
+#include "third_party/blink/renderer/platform/platform_export.h"
+#include "third_party/blink/renderer/platform/scheduler/base/sequenced_task_source.h"
+
+namespace base {
+class MessageLoop;
+class TickClock;
+} // namespace base
+
+namespace blink {
+namespace scheduler {
+namespace internal {
+
+class PLATFORM_EXPORT ThreadControllerImpl
+ : public ThreadController,
+ public base::RunLoop::NestingObserver {
+ public:
+ ~ThreadControllerImpl() override;
+
+ static std::unique_ptr<ThreadControllerImpl> Create(
+ base::MessageLoop* message_loop,
+ const base::TickClock* time_source);
+
+ // ThreadController:
+ void SetWorkBatchSize(int work_batch_size) override;
+ void DidQueueTask(const base::PendingTask& pending_task) override;
+ void ScheduleWork() override;
+ void ScheduleDelayedWork(base::TimeTicks now,
+ base::TimeTicks run_timy) override;
+ void CancelDelayedWork(base::TimeTicks run_time) override;
+ void SetSequencedTaskSource(SequencedTaskSource* sequence) override;
+ bool RunsTasksInCurrentSequence() override;
+ const base::TickClock* GetClock() override;
+ void SetDefaultTaskRunner(
+ scoped_refptr<base::SingleThreadTaskRunner>) override;
+ void RestoreDefaultTaskRunner() override;
+ void AddNestingObserver(base::RunLoop::NestingObserver* observer) override;
+ void RemoveNestingObserver(base::RunLoop::NestingObserver* observer) override;
+
+ // base::RunLoop::NestingObserver:
+ void OnBeginNestedRunLoop() override;
+ void OnExitNestedRunLoop() override;
+
+ protected:
+ ThreadControllerImpl(base::MessageLoop* message_loop,
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner,
+ const base::TickClock* time_source);
+
+ // TODO(altimin): Make these const. Blocked on removing
+ // lazy initialisation support.
+ base::MessageLoop* message_loop_;
+ scoped_refptr<base::SingleThreadTaskRunner> task_runner_;
+
+ base::RunLoop::NestingObserver* nesting_observer_ = nullptr;
+
+ private:
+ void DoWork(SequencedTaskSource::WorkType work_type);
+
+ struct AnySequence {
+ AnySequence() = default;
+
+ int do_work_running_count = 0;
+ int nesting_depth = 0;
+ bool immediate_do_work_posted = false;
+ };
+
+ mutable base::Lock any_sequence_lock_;
+ AnySequence any_sequence_;
+
+ struct AnySequence& any_sequence() {
+ any_sequence_lock_.AssertAcquired();
+ return any_sequence_;
+ }
+ const struct AnySequence& any_sequence() const {
+ any_sequence_lock_.AssertAcquired();
+ return any_sequence_;
+ }
+
+ struct MainSequenceOnly {
+ MainSequenceOnly() = default;
+
+ int do_work_running_count = 0;
+ int nesting_depth = 0;
+ int work_batch_size_ = 1;
+
+ base::TimeTicks next_delayed_do_work = base::TimeTicks::Max();
+ };
+
+ SEQUENCE_CHECKER(sequence_checker_);
+ MainSequenceOnly main_sequence_only_;
+ MainSequenceOnly& main_sequence_only() {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ return main_sequence_only_;
+ }
+ const MainSequenceOnly& main_sequence_only() const {
+ DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
+ return main_sequence_only_;
+ }
+
+ scoped_refptr<base::SingleThreadTaskRunner> message_loop_task_runner_;
+ const base::TickClock* time_source_;
+ base::RepeatingClosure immediate_do_work_closure_;
+ base::RepeatingClosure delayed_do_work_closure_;
+ base::CancelableClosure cancelable_delayed_do_work_closure_;
+ SequencedTaskSource* sequence_ = nullptr; // NOT OWNED
+ base::debug::TaskAnnotator task_annotator_;
+
+ base::WeakPtrFactory<ThreadControllerImpl> weak_factory_;
+
+ DISALLOW_COPY_AND_ASSIGN(ThreadControllerImpl);
+};
+
+} // namespace internal
+} // namespace scheduler
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_PLATFORM_SCHEDULER_BASE_THREAD_CONTROLLER_IMPL_H_
diff --git a/chromium/third_party/blink/renderer/platform/scheduler/base/time_domain.cc b/chromium/third_party/blink/renderer/platform/scheduler/base/time_domain.cc
new file mode 100644
index 00000000000..b835c150451
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/scheduler/base/time_domain.cc
@@ -0,0 +1,119 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "third_party/blink/renderer/platform/scheduler/base/time_domain.h"
+
+#include <set>
+
+#include "third_party/blink/renderer/platform/scheduler/base/task_queue_impl.h"
+#include "third_party/blink/renderer/platform/scheduler/base/work_queue.h"
+
+namespace blink {
+namespace scheduler {
+
+TimeDomain::TimeDomain() = default;
+
+TimeDomain::~TimeDomain() {
+ DCHECK(main_thread_checker_.CalledOnValidThread());
+}
+
+void TimeDomain::RegisterQueue(internal::TaskQueueImpl* queue) {
+ DCHECK(main_thread_checker_.CalledOnValidThread());
+ DCHECK_EQ(queue->GetTimeDomain(), this);
+}
+
+void TimeDomain::UnregisterQueue(internal::TaskQueueImpl* queue) {
+ DCHECK(main_thread_checker_.CalledOnValidThread());
+ DCHECK_EQ(queue->GetTimeDomain(), this);
+
+ LazyNow lazy_now = CreateLazyNow();
+ ScheduleWakeUpForQueue(queue, base::nullopt, &lazy_now);
+}
+
+void TimeDomain::ScheduleWakeUpForQueue(
+ internal::TaskQueueImpl* queue,
+ base::Optional<internal::TaskQueueImpl::DelayedWakeUp> wake_up,
+ LazyNow* lazy_now) {
+ DCHECK(main_thread_checker_.CalledOnValidThread());
+ DCHECK_EQ(queue->GetTimeDomain(), this);
+ DCHECK(queue->IsQueueEnabled() || !wake_up);
+
+ base::Optional<base::TimeTicks> previous_wake_up;
+ if (!delayed_wake_up_queue_.empty())
+ previous_wake_up = delayed_wake_up_queue_.Min().wake_up.time;
+
+ if (wake_up) {
+ // Insert a new wake-up into the heap.
+ if (queue->heap_handle().IsValid()) {
+ // O(log n)
+ delayed_wake_up_queue_.ChangeKey(queue->heap_handle(),
+ {wake_up.value(), queue});
+ } else {
+ // O(log n)
+ delayed_wake_up_queue_.insert({wake_up.value(), queue});
+ }
+ } else {
+ // Remove a wake-up from heap if present.
+ if (queue->heap_handle().IsValid())
+ delayed_wake_up_queue_.erase(queue->heap_handle());
+ }
+
+ base::Optional<base::TimeTicks> new_wake_up;
+ if (!delayed_wake_up_queue_.empty())
+ new_wake_up = delayed_wake_up_queue_.Min().wake_up.time;
+
+ if (previous_wake_up == new_wake_up)
+ return;
+
+ if (previous_wake_up)
+ CancelWakeUpAt(previous_wake_up.value());
+ if (new_wake_up)
+ RequestWakeUpAt(lazy_now->Now(), new_wake_up.value());
+}
+
+void TimeDomain::WakeUpReadyDelayedQueues(LazyNow* lazy_now) {
+ DCHECK(main_thread_checker_.CalledOnValidThread());
+ // Wake up any queues with pending delayed work. Note std::multimap stores
+ // the elements sorted by key, so the begin() iterator points to the earliest
+ // queue to wake-up.
+ while (!delayed_wake_up_queue_.empty() &&
+ delayed_wake_up_queue_.Min().wake_up.time <= lazy_now->Now()) {
+ internal::TaskQueueImpl* queue = delayed_wake_up_queue_.Min().queue;
+ queue->WakeUpForDelayedWork(lazy_now);
+ }
+}
+
+bool TimeDomain::NextScheduledRunTime(base::TimeTicks* out_time) const {
+ DCHECK(main_thread_checker_.CalledOnValidThread());
+ if (delayed_wake_up_queue_.empty())
+ return false;
+
+ *out_time = delayed_wake_up_queue_.Min().wake_up.time;
+ return true;
+}
+
+bool TimeDomain::NextScheduledTaskQueue(
+ internal::TaskQueueImpl** out_task_queue) const {
+ DCHECK(main_thread_checker_.CalledOnValidThread());
+ if (delayed_wake_up_queue_.empty())
+ return false;
+
+ *out_task_queue = delayed_wake_up_queue_.Min().queue;
+ return true;
+}
+
+void TimeDomain::AsValueInto(base::trace_event::TracedValue* state) const {
+ state->BeginDictionary();
+ state->SetString("name", GetName());
+ state->SetInteger("registered_delay_count", delayed_wake_up_queue_.size());
+ if (!delayed_wake_up_queue_.empty()) {
+ base::TimeDelta delay = delayed_wake_up_queue_.Min().wake_up.time - Now();
+ state->SetDouble("next_delay_ms", delay.InMillisecondsF());
+ }
+ AsValueIntoInternal(state);
+ state->EndDictionary();
+}
+
+} // namespace scheduler
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/platform/scheduler/base/time_domain.h b/chromium/third_party/blink/renderer/platform/scheduler/base/time_domain.h
new file mode 100644
index 00000000000..5ee0d850ea6
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/scheduler/base/time_domain.h
@@ -0,0 +1,142 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_PLATFORM_SCHEDULER_BASE_TIME_DOMAIN_H_
+#define THIRD_PARTY_BLINK_RENDERER_PLATFORM_SCHEDULER_BASE_TIME_DOMAIN_H_
+
+#include <map>
+
+#include "base/callback.h"
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/time/time.h"
+#include "third_party/blink/renderer/platform/scheduler/base/intrusive_heap.h"
+#include "third_party/blink/renderer/platform/scheduler/base/lazy_now.h"
+#include "third_party/blink/renderer/platform/scheduler/base/task_queue_impl.h"
+
+namespace blink {
+namespace scheduler {
+namespace internal {
+class TaskQueueImpl;
+} // internal
+class TaskQueueManager;
+
+// The TimeDomain's job is to wake task queues up when their next delayed tasks
+// are due to fire. TaskQueues request a wake up via ScheduleDelayedWork, when
+// the wake up is due the TimeDomain calls TaskQueue::WakeUpForDelayedWork.
+// The TimeDomain communicates with the TaskQueueManager to actually schedule
+// the wake-ups on the underlying base::MessageLoop. Various levels of de-duping
+// are employed to prevent unnecessary posting of TaskQueueManager::DoWork.
+//
+// Note the TimeDomain only knows about the first wake-up per queue, it's the
+// responsibility of TaskQueueImpl to keep the time domain up to date if this
+// changes.
+class PLATFORM_EXPORT TimeDomain {
+ public:
+ TimeDomain();
+ virtual ~TimeDomain();
+
+ // Returns a LazyNow that evaluate this TimeDomain's Now. Can be called from
+ // any thread.
+ // TODO(alexclarke): Make this main thread only.
+ virtual LazyNow CreateLazyNow() const = 0;
+
+ // Evaluate this TimeDomain's Now. Can be called from any thread.
+ virtual base::TimeTicks Now() const = 0;
+
+ // Computes the delay until the next task the TimeDomain is aware of, if any.
+ // Note virtual time domains may return base::TimeDelta() if they have any
+ // delayed tasks they deem eligible to run. Virtual time domains are allowed
+ // to advance their internal clock when this method is called.
+ virtual base::Optional<base::TimeDelta> DelayTillNextTask(
+ LazyNow* lazy_now) = 0;
+
+ // Returns the name of this time domain for tracing.
+ virtual const char* GetName() const = 0;
+
+ // If there is a scheduled delayed task, |out_time| is set to the scheduled
+ // runtime for the next one and it returns true. Returns false otherwise.
+ bool NextScheduledRunTime(base::TimeTicks* out_time) const;
+
+ protected:
+ friend class internal::TaskQueueImpl;
+ friend class TaskQueueManagerImpl;
+
+ void AsValueInto(base::trace_event::TracedValue* state) const;
+
+ // If there is a scheduled delayed task, |out_task_queue| is set to the queue
+ // the next task was posted to and it returns true. Returns false otherwise.
+ bool NextScheduledTaskQueue(internal::TaskQueueImpl** out_task_queue) const;
+
+ void ScheduleWakeUpForQueue(
+ internal::TaskQueueImpl* queue,
+ base::Optional<internal::TaskQueueImpl::DelayedWakeUp> wake_up,
+ LazyNow* lazy_now);
+
+ // Registers the |queue|.
+ void RegisterQueue(internal::TaskQueueImpl* queue);
+
+ // Removes |queue| from all internal data structures.
+ void UnregisterQueue(internal::TaskQueueImpl* queue);
+
+ // Called by the TaskQueueManager when the TimeDomain is registered.
+ virtual void OnRegisterWithTaskQueueManager(
+ TaskQueueManagerImpl* task_queue_manager) = 0;
+
+ // The implementation will schedule task processing to run at time |run_time|
+ // within the TimeDomain's time line. Only called from the main thread.
+ // NOTE this is only called by ScheduleDelayedWork if the scheduled runtime
+ // is sooner than any previously sheduled work or if there is no other
+ // scheduled work.
+ virtual void RequestWakeUpAt(base::TimeTicks now,
+ base::TimeTicks run_time) = 0;
+
+ // The implementation will cancel a wake up previously requested by
+ // RequestWakeUpAt. It's expected this will be a NOP for most virtual time
+ // domains.
+ virtual void CancelWakeUpAt(base::TimeTicks run_time) = 0;
+
+ // For implementation specific tracing.
+ virtual void AsValueIntoInternal(
+ base::trace_event::TracedValue* state) const = 0;
+
+ // Call TaskQueueImpl::UpdateDelayedWorkQueue for each queue where the delay
+ // has elapsed.
+ void WakeUpReadyDelayedQueues(LazyNow* lazy_now);
+
+ size_t NumberOfScheduledWakeUps() const {
+ return delayed_wake_up_queue_.size();
+ }
+
+ private:
+ struct ScheduledDelayedWakeUp {
+ internal::TaskQueueImpl::DelayedWakeUp wake_up;
+ internal::TaskQueueImpl* queue;
+
+ bool operator<=(const ScheduledDelayedWakeUp& other) const {
+ return wake_up <= other.wake_up;
+ }
+
+ void SetHeapHandle(HeapHandle handle) {
+ DCHECK(handle.IsValid());
+ queue->set_heap_handle(handle);
+ }
+
+ void ClearHeapHandle() {
+ DCHECK(queue->heap_handle().IsValid());
+ queue->set_heap_handle(HeapHandle());
+ }
+ };
+
+ IntrusiveHeap<ScheduledDelayedWakeUp> delayed_wake_up_queue_;
+
+ base::ThreadChecker main_thread_checker_;
+
+ DISALLOW_COPY_AND_ASSIGN(TimeDomain);
+};
+
+} // namespace scheduler
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_PLATFORM_SCHEDULER_BASE_TIME_DOMAIN_H_
diff --git a/chromium/third_party/blink/renderer/platform/scheduler/base/time_domain_unittest.cc b/chromium/third_party/blink/renderer/platform/scheduler/base/time_domain_unittest.cc
new file mode 100644
index 00000000000..a16adba9f08
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/scheduler/base/time_domain_unittest.cc
@@ -0,0 +1,348 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "third_party/blink/renderer/platform/scheduler/base/time_domain.h"
+
+#include <memory>
+#include "base/macros.h"
+#include "base/memory/ptr_util.h"
+#include "base/test/simple_test_tick_clock.h"
+#include "components/viz/test/ordered_simple_task_runner.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "third_party/blink/renderer/platform/scheduler/base/task_queue_impl.h"
+#include "third_party/blink/renderer/platform/scheduler/base/task_queue_manager.h"
+#include "third_party/blink/renderer/platform/scheduler/base/work_queue.h"
+
+using testing::_;
+using testing::AnyNumber;
+using testing::Mock;
+
+namespace blink {
+namespace scheduler {
+
+class TaskQueueImplForTest : public internal::TaskQueueImpl {
+ public:
+ TaskQueueImplForTest(TaskQueueManagerImpl* task_queue_manager,
+ TimeDomain* time_domain,
+ const TaskQueue::Spec& spec)
+ : TaskQueueImpl(task_queue_manager, time_domain, spec) {}
+ ~TaskQueueImplForTest() {}
+
+ using TaskQueueImpl::SetDelayedWakeUpForTesting;
+};
+
+class MockTimeDomain : public TimeDomain {
+ public:
+ MockTimeDomain()
+ : now_(base::TimeTicks() + base::TimeDelta::FromSeconds(1)) {}
+
+ ~MockTimeDomain() override = default;
+
+ using TimeDomain::NextScheduledRunTime;
+ using TimeDomain::NextScheduledTaskQueue;
+ using TimeDomain::UnregisterQueue;
+ using TimeDomain::WakeUpReadyDelayedQueues;
+
+ // TimeSource implementation:
+ LazyNow CreateLazyNow() const override { return LazyNow(now_); }
+ base::TimeTicks Now() const override { return now_; }
+
+ void AsValueIntoInternal(
+ base::trace_event::TracedValue* state) const override {}
+
+ base::Optional<base::TimeDelta> DelayTillNextTask(
+ LazyNow* lazy_now) override {
+ return base::Optional<base::TimeDelta>();
+ }
+ const char* GetName() const override { return "Test"; }
+ void OnRegisterWithTaskQueueManager(
+ TaskQueueManagerImpl* task_queue_manager) override {}
+
+ MOCK_METHOD2(RequestWakeUpAt,
+ void(base::TimeTicks now, base::TimeTicks run_time));
+
+ MOCK_METHOD1(CancelWakeUpAt, void(base::TimeTicks run_time));
+
+ void SetNow(base::TimeTicks now) { now_ = now; }
+
+ private:
+ base::TimeTicks now_;
+
+ DISALLOW_COPY_AND_ASSIGN(MockTimeDomain);
+};
+
+class TimeDomainTest : public testing::Test {
+ public:
+ void SetUp() final {
+ time_domain_ = base::WrapUnique(CreateMockTimeDomain());
+ task_queue_ = std::make_unique<TaskQueueImplForTest>(
+ nullptr, time_domain_.get(), TaskQueue::Spec("test"));
+ }
+
+ void TearDown() final {
+ if (task_queue_)
+ task_queue_->UnregisterTaskQueue();
+ }
+
+ virtual MockTimeDomain* CreateMockTimeDomain() {
+ return new MockTimeDomain();
+ }
+
+ std::unique_ptr<MockTimeDomain> time_domain_;
+ std::unique_ptr<TaskQueueImplForTest> task_queue_;
+};
+
+TEST_F(TimeDomainTest, ScheduleWakeUpForQueue) {
+ base::TimeDelta delay = base::TimeDelta::FromMilliseconds(10);
+ base::TimeTicks delayed_runtime = time_domain_->Now() + delay;
+ EXPECT_CALL(*time_domain_.get(), RequestWakeUpAt(_, delayed_runtime));
+ base::TimeTicks now = time_domain_->Now();
+ LazyNow lazy_now(now);
+ task_queue_->SetDelayedWakeUpForTesting(
+ internal::TaskQueueImpl::DelayedWakeUp{now + delay, 0});
+
+ base::TimeTicks next_scheduled_runtime;
+ EXPECT_TRUE(time_domain_->NextScheduledRunTime(&next_scheduled_runtime));
+ EXPECT_EQ(delayed_runtime, next_scheduled_runtime);
+
+ internal::TaskQueueImpl* next_task_queue;
+ EXPECT_TRUE(time_domain_->NextScheduledTaskQueue(&next_task_queue));
+ EXPECT_EQ(task_queue_.get(), next_task_queue);
+ Mock::VerifyAndClearExpectations(time_domain_.get());
+
+ EXPECT_CALL(*time_domain_.get(), CancelWakeUpAt(_)).Times(AnyNumber());
+}
+
+TEST_F(TimeDomainTest, ScheduleWakeUpForQueueSupersedesPreviousWakeUp) {
+ base::TimeDelta delay1 = base::TimeDelta::FromMilliseconds(10);
+ base::TimeDelta delay2 = base::TimeDelta::FromMilliseconds(100);
+ base::TimeTicks delayed_runtime1 = time_domain_->Now() + delay1;
+ base::TimeTicks delayed_runtime2 = time_domain_->Now() + delay2;
+ EXPECT_CALL(*time_domain_.get(), RequestWakeUpAt(_, delayed_runtime1));
+ base::TimeTicks now = time_domain_->Now();
+ LazyNow lazy_now(now);
+ task_queue_->SetDelayedWakeUpForTesting(
+ internal::TaskQueueImpl::DelayedWakeUp{delayed_runtime1, 0});
+
+ base::TimeTicks next_scheduled_runtime;
+ EXPECT_TRUE(time_domain_->NextScheduledRunTime(&next_scheduled_runtime));
+ EXPECT_EQ(delayed_runtime1, next_scheduled_runtime);
+
+ Mock::VerifyAndClearExpectations(time_domain_.get());
+
+ // Now schedule a later wake_up, which should replace the previously
+ // requested one.
+ EXPECT_CALL(*time_domain_.get(), RequestWakeUpAt(_, delayed_runtime2));
+ task_queue_->SetDelayedWakeUpForTesting(
+ internal::TaskQueueImpl::DelayedWakeUp{delayed_runtime2, 0});
+
+ EXPECT_TRUE(time_domain_->NextScheduledRunTime(&next_scheduled_runtime));
+ EXPECT_EQ(delayed_runtime2, next_scheduled_runtime);
+ Mock::VerifyAndClearExpectations(time_domain_.get());
+
+ EXPECT_CALL(*time_domain_.get(), CancelWakeUpAt(_)).Times(AnyNumber());
+}
+
+TEST_F(TimeDomainTest, RequestWakeUpAt_OnlyCalledForEarlierTasks) {
+ std::unique_ptr<TaskQueueImplForTest> task_queue2 =
+ std::make_unique<TaskQueueImplForTest>(nullptr, time_domain_.get(),
+ TaskQueue::Spec("test"));
+
+ std::unique_ptr<TaskQueueImplForTest> task_queue3 =
+ std::make_unique<TaskQueueImplForTest>(nullptr, time_domain_.get(),
+ TaskQueue::Spec("test"));
+
+ std::unique_ptr<TaskQueueImplForTest> task_queue4 =
+ std::make_unique<TaskQueueImplForTest>(nullptr, time_domain_.get(),
+ TaskQueue::Spec("test"));
+
+ base::TimeDelta delay1 = base::TimeDelta::FromMilliseconds(10);
+ base::TimeDelta delay2 = base::TimeDelta::FromMilliseconds(20);
+ base::TimeDelta delay3 = base::TimeDelta::FromMilliseconds(30);
+ base::TimeDelta delay4 = base::TimeDelta::FromMilliseconds(1);
+
+ // RequestWakeUpAt should always be called if there are no other wake-ups.
+ base::TimeTicks now = time_domain_->Now();
+ LazyNow lazy_now(now);
+ EXPECT_CALL(*time_domain_.get(), RequestWakeUpAt(_, now + delay1));
+ task_queue_->SetDelayedWakeUpForTesting(
+ internal::TaskQueueImpl::DelayedWakeUp{now + delay1, 0});
+
+ Mock::VerifyAndClearExpectations(time_domain_.get());
+
+ // RequestWakeUpAt should not be called when scheduling later tasks.
+ EXPECT_CALL(*time_domain_.get(), RequestWakeUpAt(_, _)).Times(0);
+ task_queue2->SetDelayedWakeUpForTesting(
+ internal::TaskQueueImpl::DelayedWakeUp{now + delay2, 0});
+ task_queue3->SetDelayedWakeUpForTesting(
+ internal::TaskQueueImpl::DelayedWakeUp{now + delay3, 0});
+
+ // RequestWakeUpAt should be called when scheduling earlier tasks.
+ Mock::VerifyAndClearExpectations(time_domain_.get());
+ EXPECT_CALL(*time_domain_.get(), RequestWakeUpAt(_, now + delay4));
+ task_queue4->SetDelayedWakeUpForTesting(
+ internal::TaskQueueImpl::DelayedWakeUp{now + delay4, 0});
+
+ Mock::VerifyAndClearExpectations(time_domain_.get());
+
+ EXPECT_CALL(*time_domain_.get(), RequestWakeUpAt(_, _));
+ EXPECT_CALL(*time_domain_.get(), CancelWakeUpAt(_)).Times(2);
+ task_queue2->UnregisterTaskQueue();
+ task_queue3->UnregisterTaskQueue();
+ task_queue4->UnregisterTaskQueue();
+}
+
+TEST_F(TimeDomainTest, UnregisterQueue) {
+ std::unique_ptr<TaskQueueImplForTest> task_queue2_ =
+ std::make_unique<TaskQueueImplForTest>(nullptr, time_domain_.get(),
+ TaskQueue::Spec("test"));
+
+ base::TimeTicks now = time_domain_->Now();
+ LazyNow lazy_now(now);
+ base::TimeTicks wake_up1 = now + base::TimeDelta::FromMilliseconds(10);
+ EXPECT_CALL(*time_domain_.get(), RequestWakeUpAt(_, wake_up1)).Times(1);
+ task_queue_->SetDelayedWakeUpForTesting(
+ internal::TaskQueueImpl::DelayedWakeUp{wake_up1, 0});
+ base::TimeTicks wake_up2 = now + base::TimeDelta::FromMilliseconds(100);
+ task_queue2_->SetDelayedWakeUpForTesting(
+ internal::TaskQueueImpl::DelayedWakeUp{wake_up2, 0});
+
+ internal::TaskQueueImpl* next_task_queue;
+ EXPECT_TRUE(time_domain_->NextScheduledTaskQueue(&next_task_queue));
+ EXPECT_EQ(task_queue_.get(), next_task_queue);
+
+ testing::Mock::VerifyAndClearExpectations(time_domain_.get());
+
+ EXPECT_CALL(*time_domain_.get(), CancelWakeUpAt(wake_up1)).Times(1);
+ EXPECT_CALL(*time_domain_.get(), RequestWakeUpAt(_, wake_up2)).Times(1);
+
+ time_domain_->UnregisterQueue(task_queue_.get());
+ task_queue_ = std::unique_ptr<TaskQueueImplForTest>();
+ EXPECT_TRUE(time_domain_->NextScheduledTaskQueue(&next_task_queue));
+ EXPECT_EQ(task_queue2_.get(), next_task_queue);
+
+ testing::Mock::VerifyAndClearExpectations(time_domain_.get());
+
+ EXPECT_CALL(*time_domain_.get(), CancelWakeUpAt(wake_up2)).Times(1);
+
+ time_domain_->UnregisterQueue(task_queue2_.get());
+ EXPECT_FALSE(time_domain_->NextScheduledTaskQueue(&next_task_queue));
+}
+
+TEST_F(TimeDomainTest, WakeUpReadyDelayedQueues) {
+ base::TimeDelta delay = base::TimeDelta::FromMilliseconds(50);
+ base::TimeTicks now = time_domain_->Now();
+ LazyNow lazy_now(now);
+ base::TimeTicks delayed_runtime = now + delay;
+ EXPECT_CALL(*time_domain_.get(), RequestWakeUpAt(_, delayed_runtime));
+ task_queue_->SetDelayedWakeUpForTesting(
+ internal::TaskQueueImpl::DelayedWakeUp{delayed_runtime, 0});
+
+ base::TimeTicks next_run_time;
+ ASSERT_TRUE(time_domain_->NextScheduledRunTime(&next_run_time));
+ EXPECT_EQ(delayed_runtime, next_run_time);
+
+ time_domain_->WakeUpReadyDelayedQueues(&lazy_now);
+ ASSERT_TRUE(time_domain_->NextScheduledRunTime(&next_run_time));
+ EXPECT_EQ(delayed_runtime, next_run_time);
+
+ time_domain_->SetNow(delayed_runtime);
+ lazy_now = time_domain_->CreateLazyNow();
+ time_domain_->WakeUpReadyDelayedQueues(&lazy_now);
+ ASSERT_FALSE(time_domain_->NextScheduledRunTime(&next_run_time));
+}
+
+TEST_F(TimeDomainTest, WakeUpReadyDelayedQueuesWithIdenticalRuntimes) {
+ int sequence_num = 0;
+ base::TimeDelta delay = base::TimeDelta::FromMilliseconds(50);
+ base::TimeTicks now = time_domain_->Now();
+ LazyNow lazy_now(now);
+ base::TimeTicks delayed_runtime = now + delay;
+ EXPECT_CALL(*time_domain_.get(), RequestWakeUpAt(_, delayed_runtime));
+ EXPECT_CALL(*time_domain_.get(), CancelWakeUpAt(delayed_runtime));
+
+ std::unique_ptr<TaskQueueImplForTest> task_queue2 =
+ std::make_unique<TaskQueueImplForTest>(nullptr, time_domain_.get(),
+ TaskQueue::Spec("test"));
+
+ task_queue2->SetDelayedWakeUpForTesting(
+ internal::TaskQueueImpl::DelayedWakeUp{delayed_runtime, ++sequence_num});
+ task_queue_->SetDelayedWakeUpForTesting(
+ internal::TaskQueueImpl::DelayedWakeUp{delayed_runtime, ++sequence_num});
+
+ time_domain_->WakeUpReadyDelayedQueues(&lazy_now);
+
+ // The second task queue should wake up first since it has a lower sequence
+ // number.
+ internal::TaskQueueImpl* next_task_queue;
+ EXPECT_TRUE(time_domain_->NextScheduledTaskQueue(&next_task_queue));
+ EXPECT_EQ(task_queue2.get(), next_task_queue);
+
+ task_queue2->UnregisterTaskQueue();
+}
+
+TEST_F(TimeDomainTest, CancelDelayedWork) {
+ base::TimeTicks now = time_domain_->Now();
+ LazyNow lazy_now(now);
+ base::TimeTicks run_time = now + base::TimeDelta::FromMilliseconds(20);
+
+ EXPECT_CALL(*time_domain_.get(), RequestWakeUpAt(_, run_time));
+ task_queue_->SetDelayedWakeUpForTesting(
+ internal::TaskQueueImpl::DelayedWakeUp{run_time, 0});
+
+ internal::TaskQueueImpl* next_task_queue;
+ EXPECT_TRUE(time_domain_->NextScheduledTaskQueue(&next_task_queue));
+ EXPECT_EQ(task_queue_.get(), next_task_queue);
+
+ EXPECT_CALL(*time_domain_.get(), CancelWakeUpAt(run_time));
+ task_queue_->SetDelayedWakeUpForTesting(base::nullopt);
+ EXPECT_FALSE(time_domain_->NextScheduledTaskQueue(&next_task_queue));
+}
+
+TEST_F(TimeDomainTest, CancelDelayedWork_TwoQueues) {
+ std::unique_ptr<TaskQueueImplForTest> task_queue2 =
+ std::make_unique<TaskQueueImplForTest>(nullptr, time_domain_.get(),
+ TaskQueue::Spec("test"));
+
+ base::TimeTicks now = time_domain_->Now();
+ LazyNow lazy_now(now);
+ base::TimeTicks run_time1 = now + base::TimeDelta::FromMilliseconds(20);
+ base::TimeTicks run_time2 = now + base::TimeDelta::FromMilliseconds(40);
+ EXPECT_CALL(*time_domain_.get(), RequestWakeUpAt(_, run_time1));
+ task_queue_->SetDelayedWakeUpForTesting(
+ internal::TaskQueueImpl::DelayedWakeUp{run_time1, 0});
+ Mock::VerifyAndClearExpectations(time_domain_.get());
+
+ EXPECT_CALL(*time_domain_.get(), RequestWakeUpAt(_, _)).Times(0);
+ task_queue2->SetDelayedWakeUpForTesting(
+ internal::TaskQueueImpl::DelayedWakeUp{run_time2, 0});
+ Mock::VerifyAndClearExpectations(time_domain_.get());
+
+ internal::TaskQueueImpl* next_task_queue;
+ EXPECT_TRUE(time_domain_->NextScheduledTaskQueue(&next_task_queue));
+ EXPECT_EQ(task_queue_.get(), next_task_queue);
+
+ base::TimeTicks next_run_time;
+ ASSERT_TRUE(time_domain_->NextScheduledRunTime(&next_run_time));
+ EXPECT_EQ(run_time1, next_run_time);
+
+ EXPECT_CALL(*time_domain_.get(), CancelWakeUpAt(run_time1));
+ EXPECT_CALL(*time_domain_.get(), RequestWakeUpAt(_, run_time2));
+ task_queue_->SetDelayedWakeUpForTesting(base::nullopt);
+ EXPECT_TRUE(time_domain_->NextScheduledTaskQueue(&next_task_queue));
+ EXPECT_EQ(task_queue2.get(), next_task_queue);
+
+ ASSERT_TRUE(time_domain_->NextScheduledRunTime(&next_run_time));
+ EXPECT_EQ(run_time2, next_run_time);
+
+ Mock::VerifyAndClearExpectations(time_domain_.get());
+ EXPECT_CALL(*time_domain_.get(), RequestWakeUpAt(_, _)).Times(AnyNumber());
+ EXPECT_CALL(*time_domain_.get(), CancelWakeUpAt(_)).Times(AnyNumber());
+
+ // Tidy up.
+ task_queue2->UnregisterTaskQueue();
+}
+
+} // namespace scheduler
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/platform/scheduler/base/virtual_time_domain.cc b/chromium/third_party/blink/renderer/platform/scheduler/base/virtual_time_domain.cc
new file mode 100644
index 00000000000..55d425b4dde
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/scheduler/base/virtual_time_domain.cc
@@ -0,0 +1,69 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "third_party/blink/renderer/platform/scheduler/base/virtual_time_domain.h"
+
+#include "base/bind.h"
+#include "third_party/blink/renderer/platform/scheduler/base/task_queue_impl.h"
+#include "third_party/blink/renderer/platform/scheduler/base/task_queue_manager_impl.h"
+
+namespace blink {
+namespace scheduler {
+
+VirtualTimeDomain::VirtualTimeDomain(base::TimeTicks initial_time_ticks)
+ : now_ticks_(initial_time_ticks), task_queue_manager_(nullptr) {}
+
+VirtualTimeDomain::~VirtualTimeDomain() = default;
+
+void VirtualTimeDomain::OnRegisterWithTaskQueueManager(
+ TaskQueueManagerImpl* task_queue_manager) {
+ task_queue_manager_ = task_queue_manager;
+ DCHECK(task_queue_manager_);
+}
+
+LazyNow VirtualTimeDomain::CreateLazyNow() const {
+ base::AutoLock lock(lock_);
+ return LazyNow(now_ticks_);
+}
+
+base::TimeTicks VirtualTimeDomain::Now() const {
+ base::AutoLock lock(lock_);
+ return now_ticks_;
+}
+
+void VirtualTimeDomain::RequestWakeUpAt(base::TimeTicks now,
+ base::TimeTicks run_time) {
+ // We don't need to do anything here because the caller of AdvanceTo is
+ // responsible for calling TaskQueueManagerImpl::MaybeScheduleImmediateWork if
+ // needed.
+}
+
+void VirtualTimeDomain::CancelWakeUpAt(base::TimeTicks run_time) {
+ // We ignore this because RequestWakeUpAt is a NOP.
+}
+
+base::Optional<base::TimeDelta> VirtualTimeDomain::DelayTillNextTask(
+ LazyNow* lazy_now) {
+ return base::nullopt;
+}
+
+void VirtualTimeDomain::AsValueIntoInternal(
+ base::trace_event::TracedValue* state) const {}
+
+void VirtualTimeDomain::AdvanceNowTo(base::TimeTicks now) {
+ base::AutoLock lock(lock_);
+ DCHECK_GE(now, now_ticks_);
+ now_ticks_ = now;
+}
+
+void VirtualTimeDomain::RequestDoWork() {
+ task_queue_manager_->MaybeScheduleImmediateWork(FROM_HERE);
+}
+
+const char* VirtualTimeDomain::GetName() const {
+ return "VirtualTimeDomain";
+}
+
+} // namespace scheduler
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/platform/scheduler/base/virtual_time_domain.h b/chromium/third_party/blink/renderer/platform/scheduler/base/virtual_time_domain.h
new file mode 100644
index 00000000000..c4f37699830
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/scheduler/base/virtual_time_domain.h
@@ -0,0 +1,55 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_PLATFORM_SCHEDULER_BASE_VIRTUAL_TIME_DOMAIN_H_
+#define THIRD_PARTY_BLINK_RENDERER_PLATFORM_SCHEDULER_BASE_VIRTUAL_TIME_DOMAIN_H_
+
+#include "base/callback.h"
+#include "base/macros.h"
+#include "base/threading/thread_checker.h"
+#include "third_party/blink/renderer/platform/scheduler/base/time_domain.h"
+
+namespace blink {
+namespace scheduler {
+
+class PLATFORM_EXPORT VirtualTimeDomain : public TimeDomain {
+ public:
+ explicit VirtualTimeDomain(base::TimeTicks initial_time_ticks);
+ ~VirtualTimeDomain() override;
+
+ // TimeDomain implementation:
+ LazyNow CreateLazyNow() const override;
+ base::TimeTicks Now() const override;
+ base::Optional<base::TimeDelta> DelayTillNextTask(LazyNow* lazy_now) override;
+ const char* GetName() const override;
+
+ // Advances this time domain to |now|. NOTE |now| is supposed to be
+ // monotonically increasing. NOTE it's the responsibility of the caller to
+ // call TaskQueueManager::MaybeScheduleImmediateWork if needed.
+ void AdvanceNowTo(base::TimeTicks now);
+
+ protected:
+ void OnRegisterWithTaskQueueManager(
+ TaskQueueManagerImpl* task_queue_manager) override;
+ void RequestWakeUpAt(base::TimeTicks now, base::TimeTicks run_time) override;
+ void CancelWakeUpAt(base::TimeTicks run_time) override;
+ void AsValueIntoInternal(
+ base::trace_event::TracedValue* state) const override;
+
+ void RequestDoWork();
+
+ private:
+ mutable base::Lock lock_; // Protects |now_ticks_|
+ base::TimeTicks now_ticks_;
+
+ TaskQueueManagerImpl* task_queue_manager_; // NOT OWNED
+ base::Closure do_work_closure_;
+
+ DISALLOW_COPY_AND_ASSIGN(VirtualTimeDomain);
+};
+
+} // namespace scheduler
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_PLATFORM_SCHEDULER_BASE_VIRTUAL_TIME_DOMAIN_H_
diff --git a/chromium/third_party/blink/renderer/platform/scheduler/base/work_queue.cc b/chromium/third_party/blink/renderer/platform/scheduler/base/work_queue.cc
new file mode 100644
index 00000000000..462d5c07eb2
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/scheduler/base/work_queue.cc
@@ -0,0 +1,234 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "third_party/blink/renderer/platform/scheduler/base/work_queue.h"
+
+#include "third_party/blink/renderer/platform/scheduler/base/work_queue_sets.h"
+
+namespace blink {
+namespace scheduler {
+namespace internal {
+
+WorkQueue::WorkQueue(TaskQueueImpl* task_queue,
+ const char* name,
+ QueueType queue_type)
+ : task_queue_(task_queue), name_(name), queue_type_(queue_type) {}
+
+void WorkQueue::AsValueInto(base::TimeTicks now,
+ base::trace_event::TracedValue* state) const {
+ for (const TaskQueueImpl::Task& task : tasks_) {
+ TaskQueueImpl::TaskAsValueInto(task, now, state);
+ }
+}
+
+WorkQueue::~WorkQueue() {
+ DCHECK(!work_queue_sets_) << task_queue_->GetName() << " : "
+ << work_queue_sets_->GetName() << " : " << name_;
+}
+
+const TaskQueueImpl::Task* WorkQueue::GetFrontTask() const {
+ if (tasks_.empty())
+ return nullptr;
+ return &tasks_.front();
+}
+
+const TaskQueueImpl::Task* WorkQueue::GetBackTask() const {
+ if (tasks_.empty())
+ return nullptr;
+ return &tasks_.back();
+}
+
+bool WorkQueue::BlockedByFence() const {
+ if (!fence_)
+ return false;
+
+ // If the queue is empty then any future tasks will have a higher enqueue
+ // order and will be blocked. The queue is also blocked if the head is past
+ // the fence.
+ return tasks_.empty() || tasks_.front().enqueue_order() >= fence_;
+}
+
+bool WorkQueue::GetFrontTaskEnqueueOrder(EnqueueOrder* enqueue_order) const {
+ if (tasks_.empty() || BlockedByFence())
+ return false;
+ // Quick sanity check.
+ DCHECK_LE(tasks_.front().enqueue_order(), tasks_.back().enqueue_order())
+ << task_queue_->GetName() << " : " << work_queue_sets_->GetName() << " : "
+ << name_;
+ *enqueue_order = tasks_.front().enqueue_order();
+ return true;
+}
+
+void WorkQueue::Push(TaskQueueImpl::Task task) {
+ bool was_empty = tasks_.empty();
+#ifndef NDEBUG
+ DCHECK(task.enqueue_order_set());
+#endif
+
+ // Make sure the |enqueue_order()| is monotonically increasing.
+ DCHECK(was_empty || tasks_.rbegin()->enqueue_order() < task.enqueue_order());
+
+ // Amoritized O(1).
+ tasks_.push_back(std::move(task));
+
+ if (!was_empty)
+ return;
+
+ // If we hit the fence, pretend to WorkQueueSets that we're empty.
+ if (work_queue_sets_ && !BlockedByFence())
+ work_queue_sets_->OnTaskPushedToEmptyQueue(this);
+}
+
+void WorkQueue::PushNonNestableTaskToFront(TaskQueueImpl::Task task) {
+ DCHECK(task.nestable == base::Nestable::kNonNestable);
+
+ bool was_empty = tasks_.empty();
+ bool was_blocked = BlockedByFence();
+#ifndef NDEBUG
+ DCHECK(task.enqueue_order_set());
+#endif
+
+ if (!was_empty) {
+ // Make sure the |enqueue_order()| is monotonically increasing.
+ DCHECK_LE(task.enqueue_order(), tasks_.front().enqueue_order())
+ << task_queue_->GetName() << " : " << work_queue_sets_->GetName()
+ << " : " << name_;
+ }
+
+ // Amoritized O(1).
+ tasks_.push_front(std::move(task));
+
+ if (!work_queue_sets_)
+ return;
+
+ // Pretend to WorkQueueSets that nothing has changed if we're blocked.
+ if (BlockedByFence())
+ return;
+
+ // Pushing task to front may unblock the fence.
+ if (was_empty || was_blocked) {
+ work_queue_sets_->OnTaskPushedToEmptyQueue(this);
+ } else {
+ work_queue_sets_->OnFrontTaskChanged(this);
+ }
+}
+
+void WorkQueue::ReloadEmptyImmediateQueue() {
+ DCHECK(tasks_.empty());
+
+ tasks_ = task_queue_->TakeImmediateIncomingQueue();
+ if (tasks_.empty())
+ return;
+
+ // If we hit the fence, pretend to WorkQueueSets that we're empty.
+ if (work_queue_sets_ && !BlockedByFence())
+ work_queue_sets_->OnTaskPushedToEmptyQueue(this);
+}
+
+TaskQueueImpl::Task WorkQueue::TakeTaskFromWorkQueue() {
+ DCHECK(work_queue_sets_);
+ DCHECK(!tasks_.empty());
+
+ TaskQueueImpl::Task pending_task = tasks_.TakeFirst();
+ // NB immediate tasks have a different pipeline to delayed ones.
+ if (queue_type_ == QueueType::kImmediate && tasks_.empty()) {
+ // Short-circuit the queue reload so that OnPopQueue does the right thing.
+ tasks_ = task_queue_->TakeImmediateIncomingQueue();
+ }
+ // OnPopQueue calls GetFrontTaskEnqueueOrder which checks BlockedByFence() so
+ // we don't need to here.
+ work_queue_sets_->OnPopQueue(this);
+ task_queue_->TraceQueueSize();
+ return pending_task;
+}
+
+bool WorkQueue::RemoveAllCanceledTasksFromFront() {
+ DCHECK(work_queue_sets_);
+ bool task_removed = false;
+ while (!tasks_.empty() &&
+ (!tasks_.front().task || tasks_.front().task.IsCancelled())) {
+ tasks_.pop_front();
+ task_removed = true;
+ }
+ if (task_removed) {
+ // NB immediate tasks have a different pipeline to delayed ones.
+ if (queue_type_ == QueueType::kImmediate && tasks_.empty()) {
+ // Short-circuit the queue reload so that OnPopQueue does the right thing.
+ tasks_ = task_queue_->TakeImmediateIncomingQueue();
+ }
+ work_queue_sets_->OnPopQueue(this);
+ task_queue_->TraceQueueSize();
+ }
+ return task_removed;
+}
+
+void WorkQueue::AssignToWorkQueueSets(WorkQueueSets* work_queue_sets) {
+ work_queue_sets_ = work_queue_sets;
+}
+
+void WorkQueue::AssignSetIndex(size_t work_queue_set_index) {
+ work_queue_set_index_ = work_queue_set_index;
+}
+
+bool WorkQueue::InsertFenceImpl(EnqueueOrder fence) {
+ DCHECK_NE(fence, 0u);
+ DCHECK(fence >= fence_ || fence == 1u);
+ bool was_blocked_by_fence = BlockedByFence();
+ fence_ = fence;
+ return was_blocked_by_fence;
+}
+
+void WorkQueue::InsertFenceSilently(EnqueueOrder fence) {
+ // Ensure that there is no fence present or a new one blocks queue completely.
+ DCHECK(fence_ == 0u || fence == 1u);
+ InsertFenceImpl(fence);
+}
+
+bool WorkQueue::InsertFence(EnqueueOrder fence) {
+ bool was_blocked_by_fence = InsertFenceImpl(fence);
+
+ // Moving the fence forward may unblock some tasks.
+ if (work_queue_sets_ && !tasks_.empty() && was_blocked_by_fence &&
+ !BlockedByFence()) {
+ work_queue_sets_->OnTaskPushedToEmptyQueue(this);
+ return true;
+ }
+ // Fence insertion may have blocked all tasks in this work queue.
+ if (BlockedByFence())
+ work_queue_sets_->OnQueueBlocked(this);
+ return false;
+}
+
+bool WorkQueue::RemoveFence() {
+ bool was_blocked_by_fence = BlockedByFence();
+ fence_ = 0;
+ if (work_queue_sets_ && !tasks_.empty() && was_blocked_by_fence) {
+ work_queue_sets_->OnTaskPushedToEmptyQueue(this);
+ return true;
+ }
+ return false;
+}
+
+bool WorkQueue::ShouldRunBefore(const WorkQueue* other_queue) const {
+ DCHECK(!tasks_.empty());
+ DCHECK(!other_queue->tasks_.empty());
+ EnqueueOrder enqueue_order = 0;
+ EnqueueOrder other_enqueue_order = 0;
+ bool have_task = GetFrontTaskEnqueueOrder(&enqueue_order);
+ bool have_other_task =
+ other_queue->GetFrontTaskEnqueueOrder(&other_enqueue_order);
+ DCHECK(have_task);
+ DCHECK(have_other_task);
+ return enqueue_order < other_enqueue_order;
+}
+
+void WorkQueue::PopTaskForTesting() {
+ if (tasks_.empty())
+ return;
+ tasks_.pop_front();
+}
+
+} // namespace internal
+} // namespace scheduler
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/platform/scheduler/base/work_queue.h b/chromium/third_party/blink/renderer/platform/scheduler/base/work_queue.h
new file mode 100644
index 00000000000..9b3b8010aec
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/scheduler/base/work_queue.h
@@ -0,0 +1,156 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_PLATFORM_SCHEDULER_BASE_WORK_QUEUE_H_
+#define THIRD_PARTY_BLINK_RENDERER_PLATFORM_SCHEDULER_BASE_WORK_QUEUE_H_
+
+#include <stddef.h>
+
+#include <set>
+
+#include "base/trace_event/trace_event.h"
+#include "base/trace_event/trace_event_argument.h"
+#include "third_party/blink/renderer/platform/scheduler/base/enqueue_order.h"
+#include "third_party/blink/renderer/platform/scheduler/base/intrusive_heap.h"
+#include "third_party/blink/renderer/platform/scheduler/base/sequenced_task_source.h"
+#include "third_party/blink/renderer/platform/scheduler/base/task_queue_impl.h"
+
+namespace blink {
+namespace scheduler {
+namespace internal {
+
+class WorkQueueSets;
+
+// This class keeps track of immediate and delayed tasks which are due to run
+// now. It interfaces deeply with WorkQueueSets which keeps track of which queue
+// (with a given priority) contains the oldest task.
+//
+// If a fence is inserted, WorkQueue behaves normally up until
+// TakeTaskFromWorkQueue reaches or exceeds the fence. At that point it the
+// API subset used by WorkQueueSets pretends the WorkQueue is empty until the
+// fence is removed. This functionality is a primitive intended for use by
+// throttling mechanisms.
+class PLATFORM_EXPORT WorkQueue {
+ public:
+ using QueueType = SequencedTaskSource::WorkType;
+
+ // Note |task_queue| can be null if queue_type is kNonNestable.
+ WorkQueue(TaskQueueImpl* task_queue, const char* name, QueueType queue_type);
+ ~WorkQueue();
+
+ // Associates this work queue with the given work queue sets. This must be
+ // called before any tasks can be inserted into this work queue.
+ void AssignToWorkQueueSets(WorkQueueSets* work_queue_sets);
+
+ // Assigns the current set index.
+ void AssignSetIndex(size_t work_queue_set_index);
+
+ void AsValueInto(base::TimeTicks now,
+ base::trace_event::TracedValue* state) const;
+
+ // Returns true if the |tasks_| is empty. This method ignores any fences.
+ bool Empty() const { return tasks_.empty(); }
+
+ // If the |tasks_| isn't empty and a fence hasn't been reached,
+ // |enqueue_order| gets set to the enqueue order of the front task and the
+ // function returns true. Otherwise the function returns false.
+ bool GetFrontTaskEnqueueOrder(EnqueueOrder* enqueue_order) const;
+
+ // Returns the first task in this queue or null if the queue is empty. This
+ // method ignores any fences.
+ const TaskQueueImpl::Task* GetFrontTask() const;
+
+ // Returns the last task in this queue or null if the queue is empty. This
+ // method ignores any fences.
+ const TaskQueueImpl::Task* GetBackTask() const;
+
+ // Pushes the task onto the |tasks_| and if a fence hasn't been reached
+ // it informs the WorkQueueSets if the head changed.
+ void Push(TaskQueueImpl::Task task);
+
+ // Pushes the task onto the front of the |tasks_| and if it's before any
+ // fence it informs the WorkQueueSets the head changed. Use with caution this
+ // API can easily lead to task starvation if misused.
+ void PushNonNestableTaskToFront(TaskQueueImpl::Task task);
+
+ // Reloads the empty |tasks_| with
+ // |task_queue_->TakeImmediateIncomingQueue| and if a fence hasn't been
+ // reached it informs the WorkQueueSets if the head changed.
+ void ReloadEmptyImmediateQueue();
+
+ size_t Size() const { return tasks_.size(); }
+
+ // Pulls a task off the |tasks_| and informs the WorkQueueSets. If the
+ // task removed had an enqueue order >= the current fence then WorkQueue
+ // pretends to be empty as far as the WorkQueueSets is concerned.
+ TaskQueueImpl::Task TakeTaskFromWorkQueue();
+
+ // Removes all canceled tasks from the head of the list. Returns true if any
+ // tasks were removed.
+ bool RemoveAllCanceledTasksFromFront();
+
+ const char* name() const { return name_; }
+
+ TaskQueueImpl* task_queue() const { return task_queue_; }
+
+ WorkQueueSets* work_queue_sets() const { return work_queue_sets_; }
+
+ size_t work_queue_set_index() const { return work_queue_set_index_; }
+
+ HeapHandle heap_handle() const { return heap_handle_; }
+
+ void set_heap_handle(HeapHandle handle) { heap_handle_ = handle; }
+
+ QueueType queue_type() const { return queue_type_; }
+
+ // Returns true if the front task in this queue has an older enqueue order
+ // than the front task of |other_queue|. Both queue are assumed to be
+ // non-empty. This method ignores any fences.
+ bool ShouldRunBefore(const WorkQueue* other_queue) const;
+
+ // Submit a fence. When TakeTaskFromWorkQueue encounters a task whose
+ // enqueue_order is >= |fence| then the WorkQueue will start pretending to be.
+ // empty.
+ // Inserting a fence may supersede a previous one and unblock some tasks.
+ // Returns true if any tasks where unblocked, returns false otherwise.
+ bool InsertFence(EnqueueOrder fence);
+
+ // Submit a fence without triggering a WorkQueueSets notification.
+ // Caller must ensure that WorkQueueSets are properly updated.
+ // This method should not be called when a fence is already present.
+ void InsertFenceSilently(EnqueueOrder fence);
+
+ // Removes any fences that where added and if WorkQueue was pretending to be
+ // empty, then the real value is reported to WorkQueueSets. Returns true if
+ // any tasks where unblocked.
+ bool RemoveFence();
+
+ // Returns true if any tasks are blocked by the fence. Returns true if the
+ // queue is empty and fence has been set (i.e. future tasks would be blocked).
+ // Otherwise returns false.
+ bool BlockedByFence() const;
+
+ // Test support function. This should not be used in production code.
+ void PopTaskForTesting();
+
+ private:
+ bool InsertFenceImpl(EnqueueOrder fence);
+
+ TaskQueueImpl::TaskDeque tasks_;
+ WorkQueueSets* work_queue_sets_ = nullptr; // NOT OWNED.
+ TaskQueueImpl* const task_queue_; // NOT OWNED.
+ size_t work_queue_set_index_ = 0;
+ HeapHandle heap_handle_;
+ const char* const name_;
+ EnqueueOrder fence_ = 0;
+ const QueueType queue_type_;
+
+ DISALLOW_COPY_AND_ASSIGN(WorkQueue);
+};
+
+} // namespace internal
+} // namespace scheduler
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_PLATFORM_SCHEDULER_BASE_WORK_QUEUE_H_
diff --git a/chromium/third_party/blink/renderer/platform/scheduler/base/work_queue_sets.cc b/chromium/third_party/blink/renderer/platform/scheduler/base/work_queue_sets.cc
new file mode 100644
index 00000000000..04f64aaa021
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/scheduler/base/work_queue_sets.cc
@@ -0,0 +1,172 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "third_party/blink/renderer/platform/scheduler/base/work_queue_sets.h"
+
+#include "base/logging.h"
+
+namespace blink {
+namespace scheduler {
+namespace internal {
+
+WorkQueueSets::WorkQueueSets(size_t num_sets, const char* name)
+ : work_queue_heaps_(num_sets), name_(name) {}
+
+WorkQueueSets::~WorkQueueSets() = default;
+
+void WorkQueueSets::AddQueue(WorkQueue* work_queue, size_t set_index) {
+ DCHECK(!work_queue->work_queue_sets());
+ DCHECK_LT(set_index, work_queue_heaps_.size());
+ EnqueueOrder enqueue_order;
+ bool has_enqueue_order = work_queue->GetFrontTaskEnqueueOrder(&enqueue_order);
+ work_queue->AssignToWorkQueueSets(this);
+ work_queue->AssignSetIndex(set_index);
+ if (!has_enqueue_order)
+ return;
+ work_queue_heaps_[set_index].insert({enqueue_order, work_queue});
+}
+
+void WorkQueueSets::RemoveQueue(WorkQueue* work_queue) {
+ DCHECK_EQ(this, work_queue->work_queue_sets());
+ work_queue->AssignToWorkQueueSets(nullptr);
+ HeapHandle heap_handle = work_queue->heap_handle();
+ if (!heap_handle.IsValid())
+ return;
+ size_t set_index = work_queue->work_queue_set_index();
+ DCHECK_LT(set_index, work_queue_heaps_.size());
+ work_queue_heaps_[set_index].erase(heap_handle);
+}
+
+void WorkQueueSets::ChangeSetIndex(WorkQueue* work_queue, size_t set_index) {
+ DCHECK_EQ(this, work_queue->work_queue_sets());
+ DCHECK_LT(set_index, work_queue_heaps_.size());
+ EnqueueOrder enqueue_order;
+ bool has_enqueue_order = work_queue->GetFrontTaskEnqueueOrder(&enqueue_order);
+ size_t old_set = work_queue->work_queue_set_index();
+ DCHECK_LT(old_set, work_queue_heaps_.size());
+ DCHECK_NE(old_set, set_index);
+ work_queue->AssignSetIndex(set_index);
+ if (!has_enqueue_order)
+ return;
+ work_queue_heaps_[old_set].erase(work_queue->heap_handle());
+ work_queue_heaps_[set_index].insert({enqueue_order, work_queue});
+}
+
+void WorkQueueSets::OnFrontTaskChanged(WorkQueue* work_queue) {
+ EnqueueOrder enqueue_order;
+ bool has_enqueue_order = work_queue->GetFrontTaskEnqueueOrder(&enqueue_order);
+ DCHECK(has_enqueue_order);
+ size_t set = work_queue->work_queue_set_index();
+ work_queue_heaps_[set].ChangeKey(work_queue->heap_handle(),
+ {enqueue_order, work_queue});
+}
+
+void WorkQueueSets::OnTaskPushedToEmptyQueue(WorkQueue* work_queue) {
+ // NOTE if this function changes, we need to keep |WorkQueueSets::AddQueue| in
+ // sync.
+ DCHECK_EQ(this, work_queue->work_queue_sets());
+ EnqueueOrder enqueue_order;
+ bool has_enqueue_order = work_queue->GetFrontTaskEnqueueOrder(&enqueue_order);
+ DCHECK(has_enqueue_order);
+ size_t set_index = work_queue->work_queue_set_index();
+ DCHECK_LT(set_index, work_queue_heaps_.size()) << " set_index = "
+ << set_index;
+ // |work_queue| should not be in work_queue_heaps_[set_index].
+ DCHECK(!work_queue->heap_handle().IsValid());
+ work_queue_heaps_[set_index].insert({enqueue_order, work_queue});
+}
+
+void WorkQueueSets::OnPopQueue(WorkQueue* work_queue) {
+ // Assume that |work_queue| contains the lowest enqueue_order.
+ size_t set_index = work_queue->work_queue_set_index();
+ DCHECK_EQ(this, work_queue->work_queue_sets());
+ DCHECK_LT(set_index, work_queue_heaps_.size());
+ DCHECK(!work_queue_heaps_[set_index].empty()) << " set_index = " << set_index;
+ DCHECK_EQ(work_queue_heaps_[set_index].Min().value, work_queue)
+ << " set_index = " << set_index;
+ DCHECK(work_queue->heap_handle().IsValid());
+ EnqueueOrder enqueue_order;
+ if (work_queue->GetFrontTaskEnqueueOrder(&enqueue_order)) {
+ // O(log n)
+ work_queue_heaps_[set_index].ReplaceMin({enqueue_order, work_queue});
+ } else {
+ // O(log n)
+ work_queue_heaps_[set_index].Pop();
+ DCHECK(work_queue_heaps_[set_index].empty() ||
+ work_queue_heaps_[set_index].Min().value != work_queue);
+ }
+}
+
+void WorkQueueSets::OnQueueBlocked(WorkQueue* work_queue) {
+ DCHECK_EQ(this, work_queue->work_queue_sets());
+ HeapHandle heap_handle = work_queue->heap_handle();
+ if (!heap_handle.IsValid())
+ return;
+ size_t set_index = work_queue->work_queue_set_index();
+ DCHECK_LT(set_index, work_queue_heaps_.size());
+ work_queue_heaps_[set_index].erase(heap_handle);
+}
+
+bool WorkQueueSets::GetOldestQueueInSet(size_t set_index,
+ WorkQueue** out_work_queue) const {
+ DCHECK_LT(set_index, work_queue_heaps_.size());
+ if (work_queue_heaps_[set_index].empty())
+ return false;
+ *out_work_queue = work_queue_heaps_[set_index].Min().value;
+ DCHECK_EQ(set_index, (*out_work_queue)->work_queue_set_index());
+ DCHECK((*out_work_queue)->heap_handle().IsValid());
+ return true;
+}
+
+bool WorkQueueSets::GetOldestQueueAndEnqueueOrderInSet(
+ size_t set_index,
+ WorkQueue** out_work_queue,
+ EnqueueOrder* out_enqueue_order) const {
+ DCHECK_LT(set_index, work_queue_heaps_.size());
+ if (work_queue_heaps_[set_index].empty())
+ return false;
+ const OldestTaskEnqueueOrder& oldest = work_queue_heaps_[set_index].Min();
+ *out_work_queue = oldest.value;
+ *out_enqueue_order = oldest.key;
+ EnqueueOrder enqueue_order;
+ DCHECK(oldest.value->GetFrontTaskEnqueueOrder(&enqueue_order) &&
+ oldest.key == enqueue_order);
+ return true;
+}
+
+bool WorkQueueSets::IsSetEmpty(size_t set_index) const {
+ DCHECK_LT(set_index, work_queue_heaps_.size()) << " set_index = "
+ << set_index;
+ return work_queue_heaps_[set_index].empty();
+}
+
+#if DCHECK_IS_ON() || !defined(NDEBUG)
+bool WorkQueueSets::ContainsWorkQueueForTest(
+ const WorkQueue* work_queue) const {
+ EnqueueOrder enqueue_order;
+ bool has_enqueue_order = work_queue->GetFrontTaskEnqueueOrder(&enqueue_order);
+
+ for (const IntrusiveHeap<OldestTaskEnqueueOrder>& heap : work_queue_heaps_) {
+ for (const OldestTaskEnqueueOrder& heap_value_pair : heap) {
+ if (heap_value_pair.value == work_queue) {
+ DCHECK(has_enqueue_order);
+ DCHECK_EQ(heap_value_pair.key, enqueue_order);
+ DCHECK_EQ(this, work_queue->work_queue_sets());
+ return true;
+ }
+ }
+ }
+
+ if (work_queue->work_queue_sets() == this) {
+ DCHECK(!has_enqueue_order);
+ return true;
+ }
+
+ return false;
+}
+#endif
+
+} // namespace internal
+} // namespace scheduler
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/platform/scheduler/base/work_queue_sets.h b/chromium/third_party/blink/renderer/platform/scheduler/base/work_queue_sets.h
new file mode 100644
index 00000000000..aa51cacf170
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/scheduler/base/work_queue_sets.h
@@ -0,0 +1,104 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#ifndef THIRD_PARTY_BLINK_RENDERER_PLATFORM_SCHEDULER_BASE_WORK_QUEUE_SETS_H_
+#define THIRD_PARTY_BLINK_RENDERER_PLATFORM_SCHEDULER_BASE_WORK_QUEUE_SETS_H_
+
+#include <stddef.h>
+
+#include <map>
+#include <vector>
+
+#include "base/logging.h"
+#include "base/macros.h"
+#include "base/trace_event/trace_event_argument.h"
+#include "third_party/blink/renderer/platform/platform_export.h"
+#include "third_party/blink/renderer/platform/scheduler/base/intrusive_heap.h"
+#include "third_party/blink/renderer/platform/scheduler/base/task_queue_impl.h"
+#include "third_party/blink/renderer/platform/scheduler/base/work_queue.h"
+
+namespace blink {
+namespace scheduler {
+namespace internal {
+
+// There is a WorkQueueSet for each scheduler priority and each WorkQueueSet
+// uses a EnqueueOrderToWorkQueueMap to keep track of which queue in the set has
+// the oldest task (i.e. the one that should be run next if the
+// TaskQueueSelector chooses to run a task a given priority). The reason this
+// works is because std::map is a tree based associative container and all the
+// values are kept in sorted order.
+class PLATFORM_EXPORT WorkQueueSets {
+ public:
+ WorkQueueSets(size_t num_sets, const char* name);
+ ~WorkQueueSets();
+
+ // O(log num queues)
+ void AddQueue(WorkQueue* queue, size_t set_index);
+
+ // O(log num queues)
+ void RemoveQueue(WorkQueue* work_queue);
+
+ // O(log num queues)
+ void ChangeSetIndex(WorkQueue* queue, size_t set_index);
+
+ // O(log num queues)
+ void OnFrontTaskChanged(WorkQueue* queue);
+
+ // O(log num queues)
+ void OnTaskPushedToEmptyQueue(WorkQueue* work_queue);
+
+ // If empty it's O(1) amortized, otherwise it's O(log num queues)
+ // Assumes |work_queue| contains the lowest enqueue order in the set.
+ void OnPopQueue(WorkQueue* work_queue);
+
+ // O(log num queues)
+ void OnQueueBlocked(WorkQueue* work_queue);
+
+ // O(1)
+ bool GetOldestQueueInSet(size_t set_index, WorkQueue** out_work_queue) const;
+
+ // O(1)
+ bool GetOldestQueueAndEnqueueOrderInSet(
+ size_t set_index,
+ WorkQueue** out_work_queue,
+ EnqueueOrder* out_enqueue_order) const;
+
+ // O(1)
+ bool IsSetEmpty(size_t set_index) const;
+
+#if DCHECK_IS_ON() || !defined(NDEBUG)
+ // Note this iterates over everything in |work_queue_heaps_|.
+ // It's intended for use with DCHECKS and for testing
+ bool ContainsWorkQueueForTest(const WorkQueue* queue) const;
+#endif
+
+ const char* GetName() const { return name_; }
+
+ private:
+ struct OldestTaskEnqueueOrder {
+ EnqueueOrder key;
+ WorkQueue* value;
+
+ bool operator<=(const OldestTaskEnqueueOrder& other) const {
+ return key <= other.key;
+ }
+
+ void SetHeapHandle(HeapHandle handle) { value->set_heap_handle(handle); }
+
+ void ClearHeapHandle() { value->set_heap_handle(HeapHandle()); }
+ };
+
+ // For each set |work_queue_heaps_| has a queue of WorkQueue ordered by the
+ // oldest task in each WorkQueue.
+ std::vector<IntrusiveHeap<OldestTaskEnqueueOrder>> work_queue_heaps_;
+ const char* const name_;
+
+ DISALLOW_COPY_AND_ASSIGN(WorkQueueSets);
+};
+
+} // namespace internal
+} // namespace scheduler
+} // namespace blink
+
+#endif // THIRD_PARTY_BLINK_RENDERER_PLATFORM_SCHEDULER_BASE_WORK_QUEUE_SETS_H_
diff --git a/chromium/third_party/blink/renderer/platform/scheduler/base/work_queue_sets_unittest.cc b/chromium/third_party/blink/renderer/platform/scheduler/base/work_queue_sets_unittest.cc
new file mode 100644
index 00000000000..fe4794da7b9
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/scheduler/base/work_queue_sets_unittest.cc
@@ -0,0 +1,329 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "third_party/blink/renderer/platform/scheduler/base/work_queue_sets.h"
+
+#include <stddef.h>
+
+#include "base/memory/ptr_util.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "third_party/blink/renderer/platform/scheduler/base/work_queue.h"
+
+namespace blink {
+namespace scheduler {
+class TimeDomain;
+
+namespace internal {
+
+class WorkQueueSetsTest : public testing::Test {
+ public:
+ void SetUp() override {
+ work_queue_sets_.reset(new WorkQueueSets(kNumSets, "test"));
+ }
+
+ void TearDown() override {
+ for (std::unique_ptr<WorkQueue>& work_queue : work_queues_) {
+ if (work_queue->work_queue_sets())
+ work_queue_sets_->RemoveQueue(work_queue.get());
+ }
+ }
+
+ protected:
+ enum {
+ kNumSets = 5 // An arbitary choice.
+ };
+
+ WorkQueue* NewTaskQueue(const char* queue_name) {
+ WorkQueue* queue =
+ new WorkQueue(nullptr, "test", WorkQueue::QueueType::kImmediate);
+ work_queues_.push_back(base::WrapUnique(queue));
+ work_queue_sets_->AddQueue(queue, TaskQueue::kControlPriority);
+ return queue;
+ }
+
+ TaskQueueImpl::Task FakeTaskWithEnqueueOrder(int enqueue_order) {
+ TaskQueueImpl::Task fake_task(
+ TaskQueue::PostedTask(base::BindOnce([] {}), FROM_HERE),
+ base::TimeTicks(), 0);
+ fake_task.set_enqueue_order(enqueue_order);
+ return fake_task;
+ }
+
+ TaskQueueImpl::Task FakeNonNestableTaskWithEnqueueOrder(int enqueue_order) {
+ TaskQueueImpl::Task fake_task(
+ TaskQueue::PostedTask(base::BindOnce([] {}), FROM_HERE),
+ base::TimeTicks(), 0);
+ fake_task.set_enqueue_order(enqueue_order);
+ fake_task.nestable = base::Nestable::kNonNestable;
+ return fake_task;
+ }
+
+ std::vector<std::unique_ptr<WorkQueue>> work_queues_;
+ std::unique_ptr<WorkQueueSets> work_queue_sets_;
+};
+
+TEST_F(WorkQueueSetsTest, ChangeSetIndex) {
+ WorkQueue* work_queue = NewTaskQueue("queue");
+ size_t set = TaskQueue::kNormalPriority;
+ work_queue_sets_->ChangeSetIndex(work_queue, set);
+
+ EXPECT_EQ(set, work_queue->work_queue_set_index());
+}
+
+TEST_F(WorkQueueSetsTest, GetOldestQueueInSet_QueueEmpty) {
+ WorkQueue* work_queue = NewTaskQueue("queue");
+ size_t set = TaskQueue::kNormalPriority;
+ work_queue_sets_->ChangeSetIndex(work_queue, set);
+
+ WorkQueue* selected_work_queue;
+ EXPECT_FALSE(
+ work_queue_sets_->GetOldestQueueInSet(set, &selected_work_queue));
+}
+
+TEST_F(WorkQueueSetsTest, OnTaskPushedToEmptyQueue) {
+ WorkQueue* work_queue = NewTaskQueue("queue");
+ size_t set = TaskQueue::kNormalPriority;
+ work_queue_sets_->ChangeSetIndex(work_queue, set);
+
+ WorkQueue* selected_work_queue;
+ EXPECT_FALSE(
+ work_queue_sets_->GetOldestQueueInSet(set, &selected_work_queue));
+
+ // Calls OnTaskPushedToEmptyQueue.
+ work_queue->Push(FakeTaskWithEnqueueOrder(10));
+
+ EXPECT_TRUE(work_queue_sets_->GetOldestQueueInSet(set, &selected_work_queue));
+ EXPECT_EQ(work_queue, selected_work_queue);
+}
+
+TEST_F(WorkQueueSetsTest, GetOldestQueueInSet_SingleTaskInSet) {
+ WorkQueue* work_queue = NewTaskQueue("queue");
+ work_queue->Push(FakeTaskWithEnqueueOrder(10));
+ size_t set = 1;
+ work_queue_sets_->ChangeSetIndex(work_queue, set);
+
+ WorkQueue* selected_work_queue;
+ EXPECT_TRUE(work_queue_sets_->GetOldestQueueInSet(set, &selected_work_queue));
+ EXPECT_EQ(work_queue, selected_work_queue);
+}
+
+TEST_F(WorkQueueSetsTest, GetOldestQueueAndEnqueueOrderInSet) {
+ WorkQueue* work_queue = NewTaskQueue("queue");
+ work_queue->Push(FakeTaskWithEnqueueOrder(10));
+ size_t set = 1;
+ work_queue_sets_->ChangeSetIndex(work_queue, set);
+
+ WorkQueue* selected_work_queue;
+ EnqueueOrder enqueue_order;
+ EXPECT_TRUE(work_queue_sets_->GetOldestQueueAndEnqueueOrderInSet(
+ set, &selected_work_queue, &enqueue_order));
+ EXPECT_EQ(work_queue, selected_work_queue);
+ EXPECT_EQ(10u, enqueue_order);
+}
+
+TEST_F(WorkQueueSetsTest, GetOldestQueueInSet_MultipleAgesInSet) {
+ WorkQueue* queue1 = NewTaskQueue("queue1");
+ WorkQueue* queue2 = NewTaskQueue("queue2");
+ WorkQueue* queue3 = NewTaskQueue("queue2");
+ queue1->Push(FakeTaskWithEnqueueOrder(6));
+ queue2->Push(FakeTaskWithEnqueueOrder(5));
+ queue3->Push(FakeTaskWithEnqueueOrder(4));
+ size_t set = 2;
+ work_queue_sets_->ChangeSetIndex(queue1, set);
+ work_queue_sets_->ChangeSetIndex(queue2, set);
+ work_queue_sets_->ChangeSetIndex(queue3, set);
+
+ WorkQueue* selected_work_queue;
+ EXPECT_TRUE(work_queue_sets_->GetOldestQueueInSet(set, &selected_work_queue));
+ EXPECT_EQ(queue3, selected_work_queue);
+}
+
+TEST_F(WorkQueueSetsTest, OnPopQueue) {
+ WorkQueue* queue1 = NewTaskQueue("queue1");
+ WorkQueue* queue2 = NewTaskQueue("queue2");
+ WorkQueue* queue3 = NewTaskQueue("queue3");
+ queue1->Push(FakeTaskWithEnqueueOrder(6));
+ queue2->Push(FakeTaskWithEnqueueOrder(1));
+ queue2->Push(FakeTaskWithEnqueueOrder(3));
+ queue3->Push(FakeTaskWithEnqueueOrder(4));
+ size_t set = 3;
+ work_queue_sets_->ChangeSetIndex(queue1, set);
+ work_queue_sets_->ChangeSetIndex(queue2, set);
+ work_queue_sets_->ChangeSetIndex(queue3, set);
+
+ WorkQueue* selected_work_queue;
+ EXPECT_TRUE(work_queue_sets_->GetOldestQueueInSet(set, &selected_work_queue));
+ EXPECT_EQ(queue2, selected_work_queue);
+
+ queue2->PopTaskForTesting();
+ work_queue_sets_->OnPopQueue(queue2);
+
+ EXPECT_TRUE(work_queue_sets_->GetOldestQueueInSet(set, &selected_work_queue));
+ EXPECT_EQ(queue2, selected_work_queue);
+}
+
+TEST_F(WorkQueueSetsTest, OnPopQueue_QueueBecomesEmpty) {
+ WorkQueue* queue1 = NewTaskQueue("queue1");
+ WorkQueue* queue2 = NewTaskQueue("queue2");
+ WorkQueue* queue3 = NewTaskQueue("queue3");
+ queue1->Push(FakeTaskWithEnqueueOrder(6));
+ queue2->Push(FakeTaskWithEnqueueOrder(5));
+ queue3->Push(FakeTaskWithEnqueueOrder(4));
+ size_t set = 4;
+ work_queue_sets_->ChangeSetIndex(queue1, set);
+ work_queue_sets_->ChangeSetIndex(queue2, set);
+ work_queue_sets_->ChangeSetIndex(queue3, set);
+
+ WorkQueue* selected_work_queue;
+ EXPECT_TRUE(work_queue_sets_->GetOldestQueueInSet(set, &selected_work_queue));
+ EXPECT_EQ(queue3, selected_work_queue);
+
+ queue3->PopTaskForTesting();
+ work_queue_sets_->OnPopQueue(queue3);
+
+ EXPECT_TRUE(work_queue_sets_->GetOldestQueueInSet(set, &selected_work_queue));
+ EXPECT_EQ(queue2, selected_work_queue);
+}
+
+TEST_F(WorkQueueSetsTest,
+ GetOldestQueueInSet_MultipleAgesInSetIntegerRollover) {
+ WorkQueue* queue1 = NewTaskQueue("queue1");
+ WorkQueue* queue2 = NewTaskQueue("queue2");
+ WorkQueue* queue3 = NewTaskQueue("queue3");
+ queue1->Push(FakeTaskWithEnqueueOrder(0x7ffffff1));
+ queue2->Push(FakeTaskWithEnqueueOrder(0x7ffffff0));
+ queue3->Push(FakeTaskWithEnqueueOrder(-0x7ffffff1));
+ size_t set = 1;
+ work_queue_sets_->ChangeSetIndex(queue1, set);
+ work_queue_sets_->ChangeSetIndex(queue2, set);
+ work_queue_sets_->ChangeSetIndex(queue3, set);
+
+ WorkQueue* selected_work_queue;
+ EXPECT_TRUE(work_queue_sets_->GetOldestQueueInSet(set, &selected_work_queue));
+ EXPECT_EQ(queue2, selected_work_queue);
+}
+
+TEST_F(WorkQueueSetsTest, GetOldestQueueInSet_MultipleAgesInSet_RemoveQueue) {
+ WorkQueue* queue1 = NewTaskQueue("queue1");
+ WorkQueue* queue2 = NewTaskQueue("queue2");
+ WorkQueue* queue3 = NewTaskQueue("queue3");
+ queue1->Push(FakeTaskWithEnqueueOrder(6));
+ queue2->Push(FakeTaskWithEnqueueOrder(5));
+ queue3->Push(FakeTaskWithEnqueueOrder(4));
+ size_t set = 1;
+ work_queue_sets_->ChangeSetIndex(queue1, set);
+ work_queue_sets_->ChangeSetIndex(queue2, set);
+ work_queue_sets_->ChangeSetIndex(queue3, set);
+ work_queue_sets_->RemoveQueue(queue3);
+
+ WorkQueue* selected_work_queue;
+ EXPECT_TRUE(work_queue_sets_->GetOldestQueueInSet(set, &selected_work_queue));
+ EXPECT_EQ(queue2, selected_work_queue);
+}
+
+TEST_F(WorkQueueSetsTest, ChangeSetIndex_Complex) {
+ WorkQueue* queue1 = NewTaskQueue("queue1");
+ WorkQueue* queue2 = NewTaskQueue("queue2");
+ WorkQueue* queue3 = NewTaskQueue("queue3");
+ WorkQueue* queue4 = NewTaskQueue("queue4");
+ queue1->Push(FakeTaskWithEnqueueOrder(6));
+ queue2->Push(FakeTaskWithEnqueueOrder(5));
+ queue3->Push(FakeTaskWithEnqueueOrder(4));
+ queue4->Push(FakeTaskWithEnqueueOrder(3));
+ size_t set1 = 1;
+ size_t set2 = 2;
+ work_queue_sets_->ChangeSetIndex(queue1, set1);
+ work_queue_sets_->ChangeSetIndex(queue2, set1);
+ work_queue_sets_->ChangeSetIndex(queue3, set2);
+ work_queue_sets_->ChangeSetIndex(queue4, set2);
+
+ WorkQueue* selected_work_queue;
+ EXPECT_TRUE(
+ work_queue_sets_->GetOldestQueueInSet(set1, &selected_work_queue));
+ EXPECT_EQ(queue2, selected_work_queue);
+
+ EXPECT_TRUE(
+ work_queue_sets_->GetOldestQueueInSet(set2, &selected_work_queue));
+ EXPECT_EQ(queue4, selected_work_queue);
+
+ work_queue_sets_->ChangeSetIndex(queue4, set1);
+
+ EXPECT_TRUE(
+ work_queue_sets_->GetOldestQueueInSet(set1, &selected_work_queue));
+ EXPECT_EQ(queue4, selected_work_queue);
+
+ EXPECT_TRUE(
+ work_queue_sets_->GetOldestQueueInSet(set2, &selected_work_queue));
+ EXPECT_EQ(queue3, selected_work_queue);
+}
+
+TEST_F(WorkQueueSetsTest, IsSetEmpty_NoWork) {
+ size_t set = 2;
+ EXPECT_TRUE(work_queue_sets_->IsSetEmpty(set));
+
+ WorkQueue* work_queue = NewTaskQueue("queue");
+ work_queue_sets_->ChangeSetIndex(work_queue, set);
+ EXPECT_TRUE(work_queue_sets_->IsSetEmpty(set));
+}
+
+TEST_F(WorkQueueSetsTest, IsSetEmpty_Work) {
+ size_t set = 2;
+ EXPECT_TRUE(work_queue_sets_->IsSetEmpty(set));
+
+ WorkQueue* work_queue = NewTaskQueue("queue");
+ work_queue->Push(FakeTaskWithEnqueueOrder(1));
+ work_queue_sets_->ChangeSetIndex(work_queue, set);
+ EXPECT_FALSE(work_queue_sets_->IsSetEmpty(set));
+
+ work_queue->PopTaskForTesting();
+ work_queue_sets_->OnPopQueue(work_queue);
+ EXPECT_TRUE(work_queue_sets_->IsSetEmpty(set));
+}
+
+TEST_F(WorkQueueSetsTest, BlockQueuesByFence) {
+ WorkQueue* queue1 = NewTaskQueue("queue1");
+ WorkQueue* queue2 = NewTaskQueue("queue2");
+
+ queue1->Push(FakeTaskWithEnqueueOrder(6));
+ queue2->Push(FakeTaskWithEnqueueOrder(7));
+ queue1->Push(FakeTaskWithEnqueueOrder(8));
+ queue2->Push(FakeTaskWithEnqueueOrder(9));
+
+ size_t set = TaskQueue::kControlPriority;
+
+ WorkQueue* selected_work_queue;
+ EXPECT_TRUE(work_queue_sets_->GetOldestQueueInSet(set, &selected_work_queue));
+ EXPECT_EQ(selected_work_queue, queue1);
+
+ queue1->InsertFence(1);
+
+ EXPECT_TRUE(work_queue_sets_->GetOldestQueueInSet(set, &selected_work_queue));
+ EXPECT_EQ(selected_work_queue, queue2);
+}
+
+TEST_F(WorkQueueSetsTest, PushNonNestableTaskToFront) {
+ WorkQueue* queue1 = NewTaskQueue("queue1");
+ WorkQueue* queue2 = NewTaskQueue("queue2");
+ WorkQueue* queue3 = NewTaskQueue("queue3");
+ queue1->Push(FakeTaskWithEnqueueOrder(6));
+ queue2->Push(FakeTaskWithEnqueueOrder(5));
+ queue3->Push(FakeTaskWithEnqueueOrder(4));
+ size_t set = 4;
+ work_queue_sets_->ChangeSetIndex(queue1, set);
+ work_queue_sets_->ChangeSetIndex(queue2, set);
+ work_queue_sets_->ChangeSetIndex(queue3, set);
+
+ WorkQueue* selected_work_queue;
+ EXPECT_TRUE(work_queue_sets_->GetOldestQueueInSet(set, &selected_work_queue));
+ EXPECT_EQ(queue3, selected_work_queue);
+
+ queue1->PushNonNestableTaskToFront(FakeNonNestableTaskWithEnqueueOrder(2));
+
+ EXPECT_TRUE(work_queue_sets_->GetOldestQueueInSet(set, &selected_work_queue));
+ EXPECT_EQ(queue1, selected_work_queue);
+}
+
+} // namespace internal
+} // namespace scheduler
+} // namespace blink
diff --git a/chromium/third_party/blink/renderer/platform/scheduler/base/work_queue_unittest.cc b/chromium/third_party/blink/renderer/platform/scheduler/base/work_queue_unittest.cc
new file mode 100644
index 00000000000..91624b2a65b
--- /dev/null
+++ b/chromium/third_party/blink/renderer/platform/scheduler/base/work_queue_unittest.cc
@@ -0,0 +1,474 @@
+// Copyright 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "third_party/blink/renderer/platform/scheduler/base/work_queue.h"
+
+#include <stddef.h>
+#include <memory>
+
+#include "base/bind.h"
+#include "testing/gmock/include/gmock/gmock.h"
+#include "third_party/blink/renderer/platform/scheduler/base/real_time_domain.h"
+#include "third_party/blink/renderer/platform/scheduler/base/task_queue_impl.h"
+#include "third_party/blink/renderer/platform/scheduler/base/work_queue_sets.h"
+
+namespace blink {
+namespace scheduler {
+namespace internal {
+namespace {
+void NopTask() {}
+
+struct Cancelable {
+ Cancelable() : weak_ptr_factory(this) {}
+
+ void NopTask() {}
+
+ base::WeakPtrFactory<Cancelable> weak_ptr_factory;
+};
+} // namespace
+
+class WorkQueueTest : public testing::Test {
+ public:
+ void SetUp() override {
+ time_domain_.reset(new RealTimeDomain());
+ task_queue_ = std::make_unique<TaskQueueImpl>(nullptr, time_domain_.get(),
+ TaskQueue::Spec("test"));
+
+ work_queue_.reset(new WorkQueue(task_queue_.get(), "test",
+ WorkQueue::QueueType::kImmediate));
+ work_queue_sets_.reset(new WorkQueueSets(1, "test"));
+ work_queue_sets_->AddQueue(work_queue_.get(), 0);
+ }
+
+ void TearDown() override { work_queue_sets_->RemoveQueue(work_queue_.get()); }
+
+ protected:
+ TaskQueueImpl::Task FakeCancelableTaskWithEnqueueOrder(
+ int enqueue_order,
+ base::WeakPtr<Cancelable> weak_ptr) {
+ TaskQueueImpl::Task fake_task(
+ TaskQueue::PostedTask(base::BindOnce(&Cancelable::NopTask, weak_ptr),
+ FROM_HERE),
+ base::TimeTicks(), 0);
+ fake_task.set_enqueue_order(enqueue_order);
+ return fake_task;
+ }
+
+ TaskQueueImpl::Task FakeTaskWithEnqueueOrder(int enqueue_order) {
+ TaskQueueImpl::Task fake_task(
+ TaskQueue::PostedTask(base::BindOnce(&NopTask), FROM_HERE),
+ base::TimeTicks(), 0);
+ fake_task.set_enqueue_order(enqueue_order);
+ return fake_task;
+ }
+
+ TaskQueueImpl::Task FakeNonNestableTaskWithEnqueueOrder(int enqueue_order) {
+ TaskQueueImpl::Task fake_task(
+ TaskQueue::PostedTask(base::BindOnce(&NopTask), FROM_HERE),
+ base::TimeTicks(), 0);
+ fake_task.set_enqueue_order(enqueue_order);
+ fake_task.nestable = base::Nestable::kNonNestable;
+ return fake_task;
+ }
+
+ std::unique_ptr<RealTimeDomain> time_domain_;
+ std::unique_ptr<TaskQueueImpl> task_queue_;
+ std::unique_ptr<WorkQueue> work_queue_;
+ std::unique_ptr<WorkQueueSets> work_queue_sets_;
+ std::unique_ptr<TaskQueueImpl::TaskDeque> incoming_queue_;
+};
+
+TEST_F(WorkQueueTest, Empty) {
+ EXPECT_TRUE(work_queue_->Empty());
+ work_queue_->Push(FakeTaskWithEnqueueOrder(1));
+ EXPECT_FALSE(work_queue_->Empty());
+}
+
+TEST_F(WorkQueueTest, Empty_IgnoresFences) {
+ work_queue_->Push(FakeTaskWithEnqueueOrder(1));
+ work_queue_->InsertFence(1);
+ EXPECT_FALSE(work_queue_->Empty());
+}
+
+TEST_F(WorkQueueTest, GetFrontTaskEnqueueOrderQueueEmpty) {
+ EnqueueOrder enqueue_order;
+ EXPECT_FALSE(work_queue_->GetFrontTaskEnqueueOrder(&enqueue_order));
+}
+
+TEST_F(WorkQueueTest, GetFrontTaskEnqueueOrder) {
+ work_queue_->Push(FakeTaskWithEnqueueOrder(2));
+ work_queue_->Push(FakeTaskWithEnqueueOrder(3));
+ work_queue_->Push(FakeTaskWithEnqueueOrder(4));
+
+ EnqueueOrder enqueue_order;
+ EXPECT_TRUE(work_queue_->GetFrontTaskEnqueueOrder(&enqueue_order));
+ EXPECT_EQ(2ull, enqueue_order);
+}
+
+TEST_F(WorkQueueTest, GetFrontTaskQueueEmpty) {
+ EXPECT_EQ(nullptr, work_queue_->GetFrontTask());
+}
+
+TEST_F(WorkQueueTest, GetFrontTask) {
+ work_queue_->Push(FakeTaskWithEnqueueOrder(2));
+ work_queue_->Push(FakeTaskWithEnqueueOrder(3));
+ work_queue_->Push(FakeTaskWithEnqueueOrder(4));
+
+ ASSERT_NE(nullptr, work_queue_->GetFrontTask());
+ EXPECT_EQ(2ull, work_queue_->GetFrontTask()->enqueue_order());
+}
+
+TEST_F(WorkQueueTest, GetBackTask_Empty) {
+ EXPECT_EQ(nullptr, work_queue_->GetBackTask());
+}
+
+TEST_F(WorkQueueTest, GetBackTask) {
+ work_queue_->Push(FakeTaskWithEnqueueOrder(2));
+ work_queue_->Push(FakeTaskWithEnqueueOrder(3));
+ work_queue_->Push(FakeTaskWithEnqueueOrder(4));
+
+ ASSERT_NE(nullptr, work_queue_->GetBackTask());
+ EXPECT_EQ(4ull, work_queue_->GetBackTask()->enqueue_order());
+}
+
+TEST_F(WorkQueueTest, Push) {
+ WorkQueue* work_queue;
+ EXPECT_FALSE(work_queue_sets_->GetOldestQueueInSet(0, &work_queue));
+
+ work_queue_->Push(FakeTaskWithEnqueueOrder(2));
+ EXPECT_TRUE(work_queue_sets_->GetOldestQueueInSet(0, &work_queue));
+ EXPECT_EQ(work_queue_.get(), work_queue);
+}
+
+TEST_F(WorkQueueTest, PushAfterFenceHit) {
+ work_queue_->InsertFence(1);
+ WorkQueue* work_queue;
+ EXPECT_FALSE(work_queue_sets_->GetOldestQueueInSet(0, &work_queue));
+
+ work_queue_->Push(FakeTaskWithEnqueueOrder(2));
+ EXPECT_FALSE(work_queue_sets_->GetOldestQueueInSet(0, &work_queue));
+}
+
+TEST_F(WorkQueueTest, PushNonNestableTaskToFront) {
+ WorkQueue* work_queue;
+ EXPECT_FALSE(work_queue_sets_->GetOldestQueueInSet(0, &work_queue));
+
+ work_queue_->PushNonNestableTaskToFront(
+ FakeNonNestableTaskWithEnqueueOrder(3));
+ EXPECT_TRUE(work_queue_sets_->GetOldestQueueInSet(0, &work_queue));
+ EXPECT_EQ(work_queue_.get(), work_queue);
+
+ work_queue_->PushNonNestableTaskToFront(
+ FakeNonNestableTaskWithEnqueueOrder(2));
+
+ EXPECT_EQ(2ull, work_queue_->GetFrontTask()->enqueue_order());
+ EXPECT_EQ(3ull, work_queue_->GetBackTask()->enqueue_order());
+}
+
+TEST_F(WorkQueueTest, PushNonNestableTaskToFrontAfterFenceHit) {
+ work_queue_->InsertFence(1);
+ WorkQueue* work_queue;
+ EXPECT_FALSE(work_queue_sets_->GetOldestQueueInSet(0, &work_queue));
+
+ work_queue_->PushNonNestableTaskToFront(
+ FakeNonNestableTaskWithEnqueueOrder(2));
+ EXPECT_FALSE(work_queue_sets_->GetOldestQueueInSet(0, &work_queue));
+}
+
+TEST_F(WorkQueueTest, PushNonNestableTaskToFrontBeforeFenceHit) {
+ work_queue_->InsertFence(3);
+ WorkQueue* work_queue;
+ EXPECT_FALSE(work_queue_sets_->GetOldestQueueInSet(0, &work_queue));
+
+ work_queue_->PushNonNestableTaskToFront(
+ FakeNonNestableTaskWithEnqueueOrder(2));
+ EXPECT_TRUE(work_queue_sets_->GetOldestQueueInSet(0, &work_queue));
+}
+
+TEST_F(WorkQueueTest, ReloadEmptyImmediateQueue) {
+ task_queue_->PushImmediateIncomingTaskForTest(FakeTaskWithEnqueueOrder(2));
+ task_queue_->PushImmediateIncomingTaskForTest(FakeTaskWithEnqueueOrder(3));
+ task_queue_->PushImmediateIncomingTaskForTest(FakeTaskWithEnqueueOrder(4));
+
+ WorkQueue* work_queue;
+ EXPECT_FALSE(work_queue_sets_->GetOldestQueueInSet(0, &work_queue));
+ EXPECT_TRUE(work_queue_->Empty());
+ work_queue_->ReloadEmptyImmediateQueue();
+
+ EXPECT_TRUE(work_queue_sets_->GetOldestQueueInSet(0, &work_queue));
+ EXPECT_FALSE(work_queue_->Empty());
+
+ ASSERT_NE(nullptr, work_queue_->GetFrontTask());
+ EXPECT_EQ(2ull, work_queue_->GetFrontTask()->enqueue_order());
+
+ ASSERT_NE(nullptr, work_queue_->GetBackTask());
+ EXPECT_EQ(4ull, work_queue_->GetBackTask()->enqueue_order());
+}
+
+TEST_F(WorkQueueTest, ReloadEmptyImmediateQueueAfterFenceHit) {
+ work_queue_->InsertFence(1);
+ task_queue_->PushImmediateIncomingTaskForTest(FakeTaskWithEnqueueOrder(2));
+ task_queue_->PushImmediateIncomingTaskForTest(FakeTaskWithEnqueueOrder(3));
+ task_queue_->PushImmediateIncomingTaskForTest(FakeTaskWithEnqueueOrder(4));
+
+ WorkQueue* work_queue;
+ EXPECT_FALSE(work_queue_sets_->GetOldestQueueInSet(0, &work_queue));
+ EXPECT_TRUE(work_queue_->Empty());
+ work_queue_->ReloadEmptyImmediateQueue();
+
+ EXPECT_FALSE(work_queue_sets_->GetOldestQueueInSet(0, &work_queue));
+ EXPECT_FALSE(work_queue_->Empty());
+
+ ASSERT_NE(nullptr, work_queue_->GetFrontTask());
+ EXPECT_EQ(2ull, work_queue_->GetFrontTask()->enqueue_order());
+
+ ASSERT_NE(nullptr, work_queue_->GetBackTask());
+ EXPECT_EQ(4ull, work_queue_->GetBackTask()->enqueue_order());
+}
+
+TEST_F(WorkQueueTest, TakeTaskFromWorkQueue) {
+ work_queue_->Push(FakeTaskWithEnqueueOrder(2));
+ work_queue_->Push(FakeTaskWithEnqueueOrder(3));
+ work_queue_->Push(FakeTaskWithEnqueueOrder(4));
+
+ WorkQueue* work_queue;
+ EXPECT_TRUE(work_queue_sets_->GetOldestQueueInSet(0, &work_queue));
+ EXPECT_FALSE(work_queue_->Empty());
+
+ EXPECT_EQ(2ull, work_queue_->TakeTaskFromWorkQueue().enqueue_order());
+ EXPECT_EQ(3ull, work_queue_->TakeTaskFromWorkQueue().enqueue_order());
+ EXPECT_EQ(4ull, work_queue_->TakeTaskFromWorkQueue().enqueue_order());
+
+ EXPECT_FALSE(work_queue_sets_->GetOldestQueueInSet(0, &work_queue));
+ EXPECT_TRUE(work_queue_->Empty());
+}
+
+TEST_F(WorkQueueTest, TakeTaskFromWorkQueue_HitFence) {
+ work_queue_->InsertFence(3);
+ work_queue_->Push(FakeTaskWithEnqueueOrder(2));
+ work_queue_->Push(FakeTaskWithEnqueueOrder(4));
+ EXPECT_FALSE(work_queue_->BlockedByFence());
+
+ WorkQueue* work_queue;
+ EXPECT_TRUE(work_queue_sets_->GetOldestQueueInSet(0, &work_queue));
+ EXPECT_FALSE(work_queue_->Empty());
+ EXPECT_FALSE(work_queue_->BlockedByFence());
+
+ EXPECT_EQ(2ull, work_queue_->TakeTaskFromWorkQueue().enqueue_order());
+ EXPECT_FALSE(work_queue_sets_->GetOldestQueueInSet(0, &work_queue));
+ EXPECT_FALSE(work_queue_->Empty());
+ EXPECT_TRUE(work_queue_->BlockedByFence());
+}
+
+TEST_F(WorkQueueTest, InsertFenceBeforeEnqueueing) {
+ EXPECT_FALSE(work_queue_->InsertFence(1));
+ EXPECT_TRUE(work_queue_->BlockedByFence());
+
+ work_queue_->Push(FakeTaskWithEnqueueOrder(2));
+ work_queue_->Push(FakeTaskWithEnqueueOrder(3));
+ work_queue_->Push(FakeTaskWithEnqueueOrder(4));
+
+ EnqueueOrder enqueue_order;
+ EXPECT_FALSE(work_queue_->GetFrontTaskEnqueueOrder(&enqueue_order));
+}
+
+TEST_F(WorkQueueTest, InsertFenceAfterEnqueueingNonBlocking) {
+ work_queue_->Push(FakeTaskWithEnqueueOrder(2));
+ work_queue_->Push(FakeTaskWithEnqueueOrder(3));
+ work_queue_->Push(FakeTaskWithEnqueueOrder(4));
+
+ EXPECT_FALSE(work_queue_->InsertFence(5));
+ EXPECT_FALSE(work_queue_->BlockedByFence());
+
+ EnqueueOrder enqueue_order;
+ EXPECT_TRUE(work_queue_->GetFrontTaskEnqueueOrder(&enqueue_order));
+ EXPECT_EQ(2ull, work_queue_->TakeTaskFromWorkQueue().enqueue_order());
+}
+
+TEST_F(WorkQueueTest, InsertFenceAfterEnqueueing) {
+ work_queue_->Push(FakeTaskWithEnqueueOrder(2));
+ work_queue_->Push(FakeTaskWithEnqueueOrder(3));
+ work_queue_->Push(FakeTaskWithEnqueueOrder(4));
+
+ // NB in reality a fence will always be greater than any currently enqueued
+ // tasks.
+ EXPECT_FALSE(work_queue_->InsertFence(1));
+ EXPECT_TRUE(work_queue_->BlockedByFence());
+
+ EnqueueOrder enqueue_order;
+ EXPECT_FALSE(work_queue_->GetFrontTaskEnqueueOrder(&enqueue_order));
+}
+
+TEST_F(WorkQueueTest, InsertNewFence) {
+ work_queue_->Push(FakeTaskWithEnqueueOrder(2));
+ work_queue_->Push(FakeTaskWithEnqueueOrder(4));
+ work_queue_->Push(FakeTaskWithEnqueueOrder(5));
+
+ EXPECT_FALSE(work_queue_->InsertFence(3));
+ EXPECT_FALSE(work_queue_->BlockedByFence());
+
+ // Note until TakeTaskFromWorkQueue() is called we don't hit the fence.
+ EnqueueOrder enqueue_order;
+ EXPECT_TRUE(work_queue_->GetFrontTaskEnqueueOrder(&enqueue_order));
+ EXPECT_EQ(2ull, enqueue_order);
+
+ EXPECT_EQ(2ull, work_queue_->TakeTaskFromWorkQueue().enqueue_order());
+ EXPECT_FALSE(work_queue_->GetFrontTaskEnqueueOrder(&enqueue_order));
+ EXPECT_TRUE(work_queue_->BlockedByFence());
+
+ // Inserting the new fence should temporarily unblock the queue until the new
+ // one is hit.
+ EXPECT_TRUE(work_queue_->InsertFence(6));
+ EXPECT_FALSE(work_queue_->BlockedByFence());
+
+ EXPECT_TRUE(work_queue_->GetFrontTaskEnqueueOrder(&enqueue_order));
+ EXPECT_EQ(4ull, enqueue_order);
+ EXPECT_EQ(4ull, work_queue_->TakeTaskFromWorkQueue().enqueue_order());
+ EXPECT_TRUE(work_queue_->GetFrontTaskEnqueueOrder(&enqueue_order));
+ EXPECT_FALSE(work_queue_->BlockedByFence());
+}
+
+TEST_F(WorkQueueTest, PushWithNonEmptyQueueDoesNotHitFence) {
+ work_queue_->Push(FakeTaskWithEnqueueOrder(1));
+ EXPECT_FALSE(work_queue_->InsertFence(2));
+ work_queue_->Push(FakeTaskWithEnqueueOrder(3));
+ EXPECT_FALSE(work_queue_->BlockedByFence());
+}
+
+TEST_F(WorkQueueTest, RemoveFence) {
+ work_queue_->Push(FakeTaskWithEnqueueOrder(2));
+ work_queue_->Push(FakeTaskWithEnqueueOrder(4));
+ work_queue_->Push(FakeTaskWithEnqueueOrder(5));
+ work_queue_->InsertFence(3);
+
+ WorkQueue* work_queue;
+ EXPECT_TRUE(work_queue_sets_->GetOldestQueueInSet(0, &work_queue));
+ EXPECT_FALSE(work_queue_->Empty());
+
+ EXPECT_EQ(2ull, work_queue_->TakeTaskFromWorkQueue().enqueue_order());
+ EXPECT_FALSE(work_queue_sets_->GetOldestQueueInSet(0, &work_queue));
+ EXPECT_FALSE(work_queue_->Empty());
+ EXPECT_TRUE(work_queue_->BlockedByFence());
+
+ EXPECT_TRUE(work_queue_->RemoveFence());
+ EXPECT_EQ(4ull, work_queue_->TakeTaskFromWorkQueue().enqueue_order());
+ EXPECT_TRUE(work_queue_sets_->GetOldestQueueInSet(0, &work_queue));
+ EXPECT_FALSE(work_queue_->BlockedByFence());
+}
+
+TEST_F(WorkQueueTest, RemoveFenceButNoFence) {
+ EXPECT_FALSE(work_queue_->RemoveFence());
+}
+
+TEST_F(WorkQueueTest, RemoveFenceNothingUnblocked) {
+ EXPECT_FALSE(work_queue_->InsertFence(1));
+ EXPECT_TRUE(work_queue_->BlockedByFence());
+
+ EXPECT_FALSE(work_queue_->RemoveFence());
+ EXPECT_FALSE(work_queue_->BlockedByFence());
+}
+
+TEST_F(WorkQueueTest, BlockedByFence) {
+ EXPECT_FALSE(work_queue_->BlockedByFence());
+ EXPECT_FALSE(work_queue_->InsertFence(1));
+ EXPECT_TRUE(work_queue_->BlockedByFence());
+}
+
+TEST_F(WorkQueueTest, BlockedByFencePopBecomesEmpty) {
+ work_queue_->Push(FakeTaskWithEnqueueOrder(1));
+ EXPECT_FALSE(work_queue_->InsertFence(2));
+ EXPECT_FALSE(work_queue_->BlockedByFence());
+
+ EXPECT_EQ(1ull, work_queue_->TakeTaskFromWorkQueue().enqueue_order());
+ EXPECT_TRUE(work_queue_->BlockedByFence());
+}
+
+TEST_F(WorkQueueTest, BlockedByFencePop) {
+ work_queue_->Push(FakeTaskWithEnqueueOrder(1));
+ EXPECT_FALSE(work_queue_->InsertFence(2));
+ EXPECT_FALSE(work_queue_->BlockedByFence());
+
+ work_queue_->Push(FakeTaskWithEnqueueOrder(3));
+ EXPECT_FALSE(work_queue_->BlockedByFence());
+
+ EXPECT_EQ(1ull, work_queue_->TakeTaskFromWorkQueue().enqueue_order());
+ EXPECT_TRUE(work_queue_->BlockedByFence());
+}
+
+TEST_F(WorkQueueTest, InitiallyEmptyBlockedByFenceNewFenceUnblocks) {
+ EXPECT_FALSE(work_queue_->InsertFence(1));
+ EXPECT_TRUE(work_queue_->BlockedByFence());
+
+ work_queue_->Push(FakeTaskWithEnqueueOrder(2));
+ EXPECT_TRUE(work_queue_->InsertFence(3));
+ EXPECT_FALSE(work_queue_->BlockedByFence());
+}
+
+TEST_F(WorkQueueTest, BlockedByFenceNewFenceUnblocks) {
+ work_queue_->Push(FakeTaskWithEnqueueOrder(1));
+ EXPECT_FALSE(work_queue_->InsertFence(2));
+ EXPECT_FALSE(work_queue_->BlockedByFence());
+
+ work_queue_->Push(FakeTaskWithEnqueueOrder(3));
+ EXPECT_FALSE(work_queue_->BlockedByFence());
+
+ EXPECT_EQ(1ull, work_queue_->TakeTaskFromWorkQueue().enqueue_order());
+ EXPECT_TRUE(work_queue_->BlockedByFence());
+
+ EXPECT_TRUE(work_queue_->InsertFence(4));
+ EXPECT_FALSE(work_queue_->BlockedByFence());
+}
+
+TEST_F(WorkQueueTest, InsertFenceAfterEnqueuing) {
+ work_queue_->Push(FakeTaskWithEnqueueOrder(2));
+ work_queue_->Push(FakeTaskWithEnqueueOrder(3));
+ work_queue_->Push(FakeTaskWithEnqueueOrder(4));
+ EXPECT_FALSE(work_queue_->BlockedByFence());
+
+ EXPECT_FALSE(work_queue_->InsertFence(1));
+ EXPECT_TRUE(work_queue_->BlockedByFence());
+
+ EnqueueOrder enqueue_order;
+ EXPECT_FALSE(work_queue_->GetFrontTaskEnqueueOrder(&enqueue_order));
+}
+
+TEST_F(WorkQueueTest, RemoveAllCanceledTasksFromFront) {
+ {
+ Cancelable cancelable;
+ work_queue_->Push(FakeCancelableTaskWithEnqueueOrder(
+ 2, cancelable.weak_ptr_factory.GetWeakPtr()));
+ work_queue_->Push(FakeCancelableTaskWithEnqueueOrder(
+ 3, cancelable.weak_ptr_factory.GetWeakPtr()));
+ work_queue_->Push(FakeCancelableTaskWithEnqueueOrder(
+ 4, cancelable.weak_ptr_factory.GetWeakPtr()));
+ work_queue_->Push(FakeTaskWithEnqueueOrder(5));
+ }
+ EXPECT_TRUE(work_queue_->RemoveAllCanceledTasksFromFront());
+
+ EnqueueOrder enqueue_order;
+ EXPECT_TRUE(work_queue_->GetFrontTaskEnqueueOrder(&enqueue_order));
+ EXPECT_EQ(5ull, enqueue_order);
+}
+
+TEST_F(WorkQueueTest, RemoveAllCanceledTasksFromFrontTasksNotCanceled) {
+ {
+ Cancelable cancelable;
+ work_queue_->Push(FakeCancelableTaskWithEnqueueOrder(
+ 2, cancelable.weak_ptr_factory.GetWeakPtr()));
+ work_queue_->Push(FakeCancelableTaskWithEnqueueOrder(
+ 3, cancelable.weak_ptr_factory.GetWeakPtr()));
+ work_queue_->Push(FakeCancelableTaskWithEnqueueOrder(
+ 4, cancelable.weak_ptr_factory.GetWeakPtr()));
+ work_queue_->Push(FakeTaskWithEnqueueOrder(5));
+ EXPECT_FALSE(work_queue_->RemoveAllCanceledTasksFromFront());
+
+ EnqueueOrder enqueue_order;
+ EXPECT_TRUE(work_queue_->GetFrontTaskEnqueueOrder(&enqueue_order));
+ EXPECT_EQ(2ull, enqueue_order);
+ }
+}
+
+} // namespace internal
+} // namespace scheduler
+} // namespace blink