Repo created

This commit is contained in:
Fr4nz D13trich 2025-11-22 14:04:28 +01:00
parent 81b91f4139
commit f8c34fa5ee
22732 changed files with 4815320 additions and 2 deletions

View file

@ -0,0 +1,6 @@
altimin@chromium.org
carlscab@google.com
skyostil@chromium.org
# TEAM: scheduler-dev@chromium.org
# Component: Internals>SequenceManager

View file

@ -0,0 +1,34 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/task/sequence_manager/associated_thread_id.h"
namespace base {
namespace sequence_manager {
namespace internal {
AssociatedThreadId::AssociatedThreadId() = default;
AssociatedThreadId::~AssociatedThreadId() = default;
void AssociatedThreadId::BindToCurrentThread() {
// TODO(altimin): Remove this after MessageLoopImpl is gone and
// initialisation is simplified.
auto current_thread_id = PlatformThread::CurrentId();
auto prev_thread_id =
thread_id_.exchange(current_thread_id, std::memory_order_release);
ANALYZER_ALLOW_UNUSED(prev_thread_id);
DCHECK(prev_thread_id == current_thread_id ||
prev_thread_id == kInvalidThreadId);
// Rebind the thread and sequence checkers to the current thread/sequence.
DETACH_FROM_THREAD(thread_checker);
DCHECK_CALLED_ON_VALID_THREAD(thread_checker);
DETACH_FROM_SEQUENCE(sequence_checker);
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker);
}
} // namespace internal
} // namespace sequence_manager
} // namespace base

View file

@ -0,0 +1,113 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TASK_SEQUENCE_MANAGER_ASSOCIATED_THREAD_ID_H_
#define BASE_TASK_SEQUENCE_MANAGER_ASSOCIATED_THREAD_ID_H_
#include <atomic>
#include <memory>
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "base/optional.h"
#include "base/sequence_checker.h"
#include "base/threading/platform_thread.h"
#include "base/threading/thread_checker.h"
namespace base {
namespace sequence_manager {
namespace internal {
// TODO(eseckler): Make this owned by SequenceManager once the TaskQueue
// refactor has happened (https://crbug.com/865411).
//
// This class is thread-safe. But see notes about memory ordering guarantees for
// the various methods.
class BASE_EXPORT AssociatedThreadId
: public base::RefCountedThreadSafe<AssociatedThreadId> {
public:
AssociatedThreadId();
// TODO(eseckler): Replace thread_checker with sequence_checker everywhere.
THREAD_CHECKER(thread_checker);
SEQUENCE_CHECKER(sequence_checker);
static scoped_refptr<AssociatedThreadId> CreateUnbound() {
return MakeRefCounted<AssociatedThreadId>();
}
static scoped_refptr<AssociatedThreadId> CreateBound() {
auto associated_thread = MakeRefCounted<AssociatedThreadId>();
associated_thread->BindToCurrentThread();
return associated_thread;
}
// Rebind the associated thread to the current thread. This allows creating
// the SequenceManager and TaskQueues on a different thread/sequence than the
// one it will manage.
//
// Can only be called once.
void BindToCurrentThread();
// Returns the id of the thread bound to this object via a previous call to
// BindToCurrentThread(), nullopt if no thread was bound yet.
//
// This method guarantees a happens-before ordering with
// BindToCurrentThread(), that is all memory writes that happened-before the
// call to BindToCurrentThread() will become visible side-effects in the
// current thread.
//
// Attention: The result might be stale by the time this method returns.
Optional<PlatformThreadId> GetBoundThreadId() const {
auto thread_id = thread_id_.load(std::memory_order_acquire);
if (thread_id == kInvalidThreadId) {
return nullopt;
} else {
return thread_id;
}
}
// Checks whether this object has already been bound to a thread.
//
// This method guarantees a happens-before ordering with
// BindToCurrentThread(), that is all memory writes that happened-before the
// call to BindToCurrentThread() will become visible side-effects in the
// current thread.
//
// Attention: The result might be stale by the time this method returns.
bool IsBound() const {
return thread_id_.load(std::memory_order_acquire) != kInvalidThreadId;
}
// Checks whether this object is bound to the current thread. Returns false if
// this object is not bound to any thread.
//
// Note that this method provides no memory ordering guarantees but those are
// not really needed. If this method returns true we are on the same thread
// that called BindToCurrentThread(). If the method returns false this object
// could be unbound, so there is no possible ordering.
//
// Attention:: The result might be stale by the time this method returns.
bool IsBoundToCurrentThread() const {
return thread_id_.load(std::memory_order_relaxed) ==
PlatformThread::CurrentId();
}
// TODO(eseckler): Add a method that checks that we are either bound already
// or on the thread which created us and use it in any_thread() accessors.
private:
friend class base::RefCountedThreadSafe<AssociatedThreadId>;
~AssociatedThreadId();
// All access to this member can be std::memory_order_relaxed as this class
// provides no ordering guarantees.
std::atomic<PlatformThreadId> thread_id_{kInvalidThreadId};
};
} // namespace internal
} // namespace sequence_manager
} // namespace base
#endif // BASE_TASK_SEQUENCE_MANAGER_ASSOCIATED_THREAD_ID_H_

View file

@ -0,0 +1,212 @@
// Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/task/sequence_manager/atomic_flag_set.h"
#include <utility>
#include "base/bits.h"
#include "base/callback.h"
#include "base/logging.h"
namespace base {
namespace sequence_manager {
namespace internal {
AtomicFlagSet::AtomicFlagSet(
scoped_refptr<AssociatedThreadId> associated_thread)
: associated_thread_(std::move(associated_thread)) {}
AtomicFlagSet::~AtomicFlagSet() {
DCHECK(!alloc_list_head_);
DCHECK(!partially_free_list_head_);
}
AtomicFlagSet::AtomicFlag::AtomicFlag() = default;
AtomicFlagSet::AtomicFlag::~AtomicFlag() {
ReleaseAtomicFlag();
}
AtomicFlagSet::AtomicFlag::AtomicFlag(AtomicFlagSet* outer,
Group* element,
size_t flag_bit)
: outer_(outer), group_(element), flag_bit_(flag_bit) {}
AtomicFlagSet::AtomicFlag::AtomicFlag(AtomicFlag&& other)
: outer_(other.outer_), group_(other.group_), flag_bit_(other.flag_bit_) {
other.outer_ = nullptr;
other.group_ = nullptr;
}
void AtomicFlagSet::AtomicFlag::SetActive(bool active) {
DCHECK(group_);
if (active) {
// Release semantics are required to ensure that all memory accesses made on
// this thread happen-before any others done on the thread running the
// active callbacks.
group_->flags.fetch_or(flag_bit_, std::memory_order_release);
} else {
// No operation is being performed based on the bit *not* being set (i.e.
// state of other memory is irrelevant); hence no memory order is required
// when unsetting it.
group_->flags.fetch_and(~flag_bit_, std::memory_order_relaxed);
}
}
void AtomicFlagSet::AtomicFlag::ReleaseAtomicFlag() {
if (!group_)
return;
DCHECK_CALLED_ON_VALID_THREAD(outer_->associated_thread_->thread_checker);
SetActive(false);
// If |group_| was full then add it on the partially free list.
if (group_->IsFull())
outer_->AddToPartiallyFreeList(group_);
int index = Group::IndexOfFirstFlagSet(flag_bit_);
DCHECK(!group_->flag_callbacks[index].is_null());
group_->flag_callbacks[index] = RepeatingClosure();
group_->allocated_flags &= ~flag_bit_;
// If |group_| has become empty delete it.
if (group_->IsEmpty()) {
outer_->RemoveFromPartiallyFreeList(group_);
outer_->RemoveFromAllocList(group_);
}
outer_ = nullptr;
group_ = nullptr;
}
AtomicFlagSet::AtomicFlag AtomicFlagSet::AddFlag(RepeatingClosure callback) {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
// Allocate a new Group if needed.
if (!partially_free_list_head_) {
AddToAllocList(std::make_unique<Group>());
AddToPartiallyFreeList(alloc_list_head_.get());
}
DCHECK(partially_free_list_head_);
Group* group = partially_free_list_head_;
size_t first_unoccupied_index =
static_cast<size_t>(group->FindFirstUnallocatedFlag());
DCHECK(!group->flag_callbacks[first_unoccupied_index]);
group->flag_callbacks[first_unoccupied_index] = std::move(callback);
size_t flag_bit = size_t{1} << first_unoccupied_index;
group->allocated_flags |= flag_bit;
if (group->IsFull())
RemoveFromPartiallyFreeList(group);
return AtomicFlag(this, group, flag_bit);
}
void AtomicFlagSet::RunActiveCallbacks() const {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
for (Group* iter = alloc_list_head_.get(); iter; iter = iter->next.get()) {
// Acquire semantics are required to guarantee that all memory side-effects
// made by other threads that were allowed to perform operations are
// synchronized with this thread before it returns from this method.
size_t active_flags = std::atomic_exchange_explicit(
&iter->flags, size_t{0}, std::memory_order_acquire);
// This is O(number of bits set).
while (active_flags) {
int index = Group::IndexOfFirstFlagSet(active_flags);
// Clear the flag.
active_flags ^= size_t{1} << index;
iter->flag_callbacks[index].Run();
}
}
}
AtomicFlagSet::Group::Group() = default;
AtomicFlagSet::Group::~Group() {
DCHECK_EQ(allocated_flags, 0u);
DCHECK(!partially_free_list_prev);
DCHECK(!partially_free_list_next);
}
bool AtomicFlagSet::Group::IsFull() const {
return (~allocated_flags) == 0u;
}
bool AtomicFlagSet::Group::IsEmpty() const {
return allocated_flags == 0u;
}
int AtomicFlagSet::Group::FindFirstUnallocatedFlag() const {
size_t unallocated_flags = ~allocated_flags;
DCHECK_NE(unallocated_flags, 0u);
int index = IndexOfFirstFlagSet(unallocated_flags);
DCHECK_LT(index, kNumFlags);
return index;
}
// static
int AtomicFlagSet::Group::IndexOfFirstFlagSet(size_t flag) {
DCHECK_NE(flag, 0u);
return bits::CountTrailingZeroBits(flag);
}
void AtomicFlagSet::AddToAllocList(std::unique_ptr<Group> group) {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
if (alloc_list_head_)
alloc_list_head_->prev = group.get();
group->next = std::move(alloc_list_head_);
alloc_list_head_ = std::move(group);
}
void AtomicFlagSet::RemoveFromAllocList(Group* group) {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
if (group->next)
group->next->prev = group->prev;
if (group->prev) {
group->prev->next = std::move(group->next);
} else {
alloc_list_head_ = std::move(group->next);
}
}
void AtomicFlagSet::AddToPartiallyFreeList(Group* group) {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
DCHECK_NE(partially_free_list_head_, group);
DCHECK(!group->partially_free_list_prev);
DCHECK(!group->partially_free_list_next);
if (partially_free_list_head_)
partially_free_list_head_->partially_free_list_prev = group;
group->partially_free_list_next = partially_free_list_head_;
partially_free_list_head_ = group;
}
void AtomicFlagSet::RemoveFromPartiallyFreeList(Group* group) {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
DCHECK(partially_free_list_head_);
// Check |group| is in the list.
DCHECK(partially_free_list_head_ == group || group->partially_free_list_prev);
if (group->partially_free_list_next) {
group->partially_free_list_next->partially_free_list_prev =
group->partially_free_list_prev;
}
if (group->partially_free_list_prev) {
group->partially_free_list_prev->partially_free_list_next =
group->partially_free_list_next;
} else {
partially_free_list_head_ = group->partially_free_list_next;
}
group->partially_free_list_prev = nullptr;
group->partially_free_list_next = nullptr;
}
} // namespace internal
} // namespace sequence_manager
} // namespace base

View file

@ -0,0 +1,142 @@
// Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TASK_SEQUENCE_MANAGER_ATOMIC_FLAG_SET_H_
#define BASE_TASK_SEQUENCE_MANAGER_ATOMIC_FLAG_SET_H_
#include <atomic>
#include <memory>
#include "base/base_export.h"
#include "base/callback.h"
#include "base/task/sequence_manager/associated_thread_id.h"
namespace base {
namespace sequence_manager {
namespace internal {
// This class maintains a set of AtomicFlags which can be activated or
// deactivated at any time by any thread. When a flag is created a callback is
// specified and the RunActiveCallbacks method can be invoked to fire callbacks
// for all active flags. Creating releasing or destroying an AtomicFlag must be
// done on the associated thread, as must calling RunActiveCallbacks. This
// class is thread-affine.
class BASE_EXPORT AtomicFlagSet {
protected:
struct Group;
public:
explicit AtomicFlagSet(scoped_refptr<AssociatedThreadId> associated_thread);
// AtomicFlags need to be released (or deleted) before this can be deleted.
~AtomicFlagSet();
// This class is thread-affine in addition SetActive can be called
// concurrently from any thread.
class BASE_EXPORT AtomicFlag {
public:
AtomicFlag();
// Automatically releases the AtomicFlag.
~AtomicFlag();
AtomicFlag(const AtomicFlag&) = delete;
AtomicFlag(AtomicFlag&& other);
// Can be called on any thread. Marks whether the flag is active or not,
// which controls whether RunActiveCallbacks() will fire the associated
// callback or not. In the absence of external synchronization, the value
// set by this call might not immediately be visible to a thread calling
// RunActiveCallbacks(); the only guarantee is that a value set by this will
// eventually be visible to other threads due to cache coherency. Release /
// acquire semantics are used on the underlying atomic operations so if
// RunActiveCallbacks sees the value set by a call to SetActive(), it will
// also see the memory changes that happened prior to that SetActive() call.
void SetActive(bool active);
// Releases the flag. Must be called on the associated thread. SetActive
// can't be called after this.
void ReleaseAtomicFlag();
private:
friend AtomicFlagSet;
AtomicFlag(AtomicFlagSet* outer, Group* element, size_t flag_bit);
AtomicFlagSet* outer_ = nullptr;
Group* group_ = nullptr; // Null when AtomicFlag is invalid.
size_t flag_bit_ = 0; // This is 1 << index of this flag within the group.
};
// Adds a new flag to the set. The |callback| will be fired by
// RunActiveCallbacks if the flag is active. Must be called on the associated
// thread.
AtomicFlag AddFlag(RepeatingClosure callback);
// Runs the registered callback for all flags marked as active and atomically
// resets all flags to inactive. Must be called on the associated thread.
void RunActiveCallbacks() const;
protected:
Group* GetAllocListForTesting() const { return alloc_list_head_.get(); }
Group* GetPartiallyFreeListForTesting() const {
return partially_free_list_head_;
}
// Wraps a single std::atomic<size_t> which is shared by a number of
// AtomicFlag's with one bit per flag.
struct BASE_EXPORT Group {
Group();
~Group();
static constexpr int kNumFlags = sizeof(size_t) * 8;
std::atomic<size_t> flags = {0};
size_t allocated_flags = 0;
RepeatingClosure flag_callbacks[kNumFlags];
Group* prev = nullptr;
std::unique_ptr<Group> next;
Group* partially_free_list_prev = nullptr;
Group* partially_free_list_next = nullptr;
bool IsFull() const;
bool IsEmpty() const;
// Returns the index of the first unallocated flag. Must not be called when
// all flags are set.
int FindFirstUnallocatedFlag() const;
// Computes the index of the |flag_callbacks| based on the number of leading
// zero bits in |flag|.
static int IndexOfFirstFlagSet(size_t flag);
private:
DISALLOW_COPY_AND_ASSIGN(Group);
};
private:
void AddToAllocList(std::unique_ptr<Group> element);
// This deletes |element|.
void RemoveFromAllocList(Group* element);
void AddToPartiallyFreeList(Group* element);
// This does not delete |element|.
void RemoveFromPartiallyFreeList(Group* element);
scoped_refptr<AssociatedThreadId> associated_thread_;
std::unique_ptr<Group> alloc_list_head_;
Group* partially_free_list_head_ = nullptr;
DISALLOW_COPY_AND_ASSIGN(AtomicFlagSet);
};
} // namespace internal
} // namespace sequence_manager
} // namespace base
#endif // BASE_TASK_SEQUENCE_MANAGER_ATOMIC_FLAG_SET_H_

View file

@ -0,0 +1,61 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TASK_SEQUENCE_MANAGER_ENQUEUE_ORDER_H_
#define BASE_TASK_SEQUENCE_MANAGER_ENQUEUE_ORDER_H_
#include <stdint.h>
#include <limits>
namespace base {
namespace sequence_manager {
namespace internal {
class EnqueueOrderGenerator;
}
// 64-bit number which is used to order tasks.
// SequenceManager assumes this number will never overflow.
class EnqueueOrder {
public:
EnqueueOrder() : value_(kNone) {}
~EnqueueOrder() = default;
static EnqueueOrder none() { return EnqueueOrder(kNone); }
static EnqueueOrder blocking_fence() { return EnqueueOrder(kBlockingFence); }
// Returns an EnqueueOrder that compares greater than any other EnqueueOrder.
static EnqueueOrder max() {
return EnqueueOrder(std::numeric_limits<uint64_t>::max());
}
// It's okay to use EnqueueOrder in boolean expressions keeping in mind
// that some non-zero values have a special meaning.
operator uint64_t() const { return value_; }
static EnqueueOrder FromIntForTesting(uint64_t value) {
return EnqueueOrder(value);
}
private:
// EnqueueOrderGenerator is the only class allowed to create an EnqueueOrder
// with a non-default constructor.
friend class internal::EnqueueOrderGenerator;
explicit EnqueueOrder(uint64_t value) : value_(value) {}
enum SpecialValues : uint64_t {
kNone = 0,
kBlockingFence = 1,
kFirst = 2,
};
uint64_t value_;
};
} // namespace sequence_manager
} // namespace base
#endif // BASE_TASK_SEQUENCE_MANAGER_ENQUEUE_ORDER_H_

View file

@ -0,0 +1,18 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/task/sequence_manager/enqueue_order_generator.h"
namespace base {
namespace sequence_manager {
namespace internal {
EnqueueOrderGenerator::EnqueueOrderGenerator()
: counter_(EnqueueOrder::kFirst) {}
EnqueueOrderGenerator::~EnqueueOrderGenerator() = default;
} // namespace internal
} // namespace sequence_manager
} // namespace base

View file

@ -0,0 +1,43 @@
// Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TASK_SEQUENCE_MANAGER_ENQUEUE_ORDER_GENERATOR_H_
#define BASE_TASK_SEQUENCE_MANAGER_ENQUEUE_ORDER_GENERATOR_H_
#include <stdint.h>
#include <atomic>
#include "base/base_export.h"
#include "base/macros.h"
#include "base/task/sequence_manager/enqueue_order.h"
namespace base {
namespace sequence_manager {
namespace internal {
// EnqueueOrder can't be created from a raw number in non-test code.
// EnqueueOrderGenerator is used to create it with strictly monotonic guarantee.
class BASE_EXPORT EnqueueOrderGenerator {
public:
EnqueueOrderGenerator();
~EnqueueOrderGenerator();
// Can be called from any thread.
EnqueueOrder GenerateNext() {
return EnqueueOrder(std::atomic_fetch_add_explicit(
&counter_, uint64_t(1), std::memory_order_relaxed));
}
private:
std::atomic<uint64_t> counter_;
DISALLOW_COPY_AND_ASSIGN(EnqueueOrderGenerator);
};
} // namespace internal
} // namespace sequence_manager
} // namespace base
#endif // BASE_TASK_SEQUENCE_MANAGER_ENQUEUE_ORDER_GENERATOR_H_

View file

@ -0,0 +1,380 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TASK_SEQUENCE_MANAGER_LAZILY_DEALLOCATED_DEQUE_H_
#define BASE_TASK_SEQUENCE_MANAGER_LAZILY_DEALLOCATED_DEQUE_H_
#include <algorithm>
#include <cmath>
#include <memory>
#include <vector>
#include "base/debug/alias.h"
#include "base/gtest_prod_util.h"
#include "base/logging.h"
#include "base/time/time.h"
namespace base {
namespace sequence_manager {
namespace internal {
// A LazilyDeallocatedDeque specialized for the SequenceManager's usage
// patterns. The queue generally grows while tasks are added and then removed
// until empty and the cycle repeats.
//
// The main difference between sequence_manager::LazilyDeallocatedDeque and
// others is memory management. For performance (memory allocation isn't free)
// we don't automatically reclaiming memory when the queue becomes empty.
// Instead we rely on the surrounding code periodically calling
// MaybeShrinkQueue, ideally when the queue is empty.
//
// We keep track of the maximum recent queue size and rate limit
// MaybeShrinkQueue to avoid unnecessary churn.
//
// NB this queue isn't by itself thread safe.
template <typename T, TimeTicks (*now_source)() = TimeTicks::Now>
class LazilyDeallocatedDeque {
public:
enum {
// Minimum allocation for a ring. Note a ring of size 4 will only hold up to
// 3 elements.
kMinimumRingSize = 4,
// Maximum "wasted" capacity allowed when considering if we should resize
// the backing store.
kReclaimThreshold = 16,
// Used to rate limit how frequently MaybeShrinkQueue actually shrinks the
// queue.
kMinimumShrinkIntervalInSeconds = 5
};
LazilyDeallocatedDeque() {}
~LazilyDeallocatedDeque() { clear(); }
bool empty() const { return size_ == 0; }
size_t max_size() const { return max_size_; }
size_t size() const { return size_; }
size_t capacity() const {
size_t capacity = 0;
for (const Ring* iter = head_.get(); iter; iter = iter->next_.get()) {
capacity += iter->capacity();
}
return capacity;
}
void clear() {
while (head_) {
head_ = std::move(head_->next_);
}
tail_ = nullptr;
size_ = 0;
}
// Assumed to be an uncommon operation.
void push_front(T t) {
if (!head_) {
DCHECK(!tail_);
head_ = std::make_unique<Ring>(kMinimumRingSize);
tail_ = head_.get();
}
// Grow if needed, by the minimum amount.
if (!head_->CanPush()) {
// TODO(alexclarke): Remove once we've understood the OOMs.
size_t size = size_;
base::debug::Alias(&size);
std::unique_ptr<Ring> new_ring = std::make_unique<Ring>(kMinimumRingSize);
new_ring->next_ = std::move(head_);
head_ = std::move(new_ring);
}
head_->push_front(std::move(t));
max_size_ = std::max(max_size_, ++size_);
}
// Assumed to be a common operation.
void push_back(T t) {
if (!head_) {
DCHECK(!tail_);
head_ = std::make_unique<Ring>(kMinimumRingSize);
tail_ = head_.get();
}
// Grow if needed.
if (!tail_->CanPush()) {
// TODO(alexclarke): Remove once we've understood the OOMs.
size_t size = size_;
base::debug::Alias(&size);
// Doubling the size is a common strategy, but one which can be wasteful
// so we use a (somewhat) slower growth curve.
tail_->next_ = std::make_unique<Ring>(2 + tail_->capacity() +
(tail_->capacity() / 2));
tail_ = tail_->next_.get();
}
tail_->push_back(std::move(t));
max_size_ = std::max(max_size_, ++size_);
}
T& front() {
DCHECK(head_);
return head_->front();
}
const T& front() const {
DCHECK(head_);
return head_->front();
}
T& back() {
DCHECK(tail_);
return tail_->back();
}
const T& back() const {
DCHECK(tail_);
return tail_->back();
}
void pop_front() {
DCHECK(head_);
DCHECK(!head_->empty());
DCHECK(tail_);
DCHECK_GT(size_, 0u);
head_->pop_front();
// If the ring has become empty and we have several rings then, remove the
// head one (which we expect to have lower capacity than the remaining
// ones).
if (head_->empty() && head_->next_) {
head_ = std::move(head_->next_);
}
--size_;
}
void swap(LazilyDeallocatedDeque& other) {
std::swap(head_, other.head_);
std::swap(tail_, other.tail_);
std::swap(size_, other.size_);
std::swap(max_size_, other.max_size_);
std::swap(next_resize_time_, other.next_resize_time_);
}
void MaybeShrinkQueue() {
if (!tail_)
return;
DCHECK_GE(max_size_, size_);
// Rate limit how often we shrink the queue because it's somewhat expensive.
TimeTicks current_time = now_source();
if (current_time < next_resize_time_)
return;
// Due to the way the Ring works we need 1 more slot than is used.
size_t new_capacity = max_size_ + 1;
if (new_capacity < kMinimumRingSize)
new_capacity = kMinimumRingSize;
// Reset |max_size_| so that unless usage has spiked up we will consider
// reclaiming it next time.
max_size_ = size_;
// Only realloc if the current capacity is sufficiently greater than the
// observed maximum size for the previous period.
if (new_capacity + kReclaimThreshold >= capacity())
return;
SetCapacity(new_capacity);
next_resize_time_ =
current_time + TimeDelta::FromSeconds(kMinimumShrinkIntervalInSeconds);
}
void SetCapacity(size_t new_capacity) {
std::unique_ptr<Ring> new_ring = std::make_unique<Ring>(new_capacity);
DCHECK_GE(new_capacity, size_ + 1);
// Preserve the |size_| which counts down to zero in the while loop.
size_t real_size = size_;
while (!empty()) {
DCHECK(new_ring->CanPush());
new_ring->push_back(std::move(head_->front()));
pop_front();
}
size_ = real_size;
DCHECK_EQ(head_.get(), tail_);
head_ = std::move(new_ring);
tail_ = head_.get();
}
private:
FRIEND_TEST_ALL_PREFIXES(LazilyDeallocatedDequeTest, RingPushFront);
FRIEND_TEST_ALL_PREFIXES(LazilyDeallocatedDequeTest, RingPushBack);
FRIEND_TEST_ALL_PREFIXES(LazilyDeallocatedDequeTest, RingCanPush);
FRIEND_TEST_ALL_PREFIXES(LazilyDeallocatedDequeTest, RingPushPopPushPop);
struct Ring {
explicit Ring(size_t capacity)
: capacity_(capacity),
front_index_(0),
back_index_(0),
data_(reinterpret_cast<T*>(new char[sizeof(T) * capacity])),
next_(nullptr) {
DCHECK_GE(capacity_, kMinimumRingSize);
}
~Ring() {
while (!empty()) {
pop_front();
}
delete[] reinterpret_cast<char*>(data_);
}
bool empty() const { return back_index_ == front_index_; }
size_t capacity() const { return capacity_; }
bool CanPush() const {
return front_index_ != CircularIncrement(back_index_);
}
void push_front(T&& t) {
// Mustn't appear to become empty.
DCHECK_NE(CircularDecrement(front_index_), back_index_);
new (&data_[front_index_]) T(std::move(t));
front_index_ = CircularDecrement(front_index_);
}
void push_back(T&& t) {
back_index_ = CircularIncrement(back_index_);
DCHECK(!empty()); // Mustn't appear to become empty.
new (&data_[back_index_]) T(std::move(t));
}
bool CanPop() const { return front_index_ != back_index_; }
void pop_front() {
DCHECK(!empty());
front_index_ = CircularIncrement(front_index_);
data_[front_index_].~T();
}
T& front() {
DCHECK(!empty());
return data_[CircularIncrement(front_index_)];
}
const T& front() const {
DCHECK(!empty());
return data_[CircularIncrement(front_index_)];
}
T& back() {
DCHECK(!empty());
return data_[back_index_];
}
const T& back() const {
DCHECK(!empty());
return data_[back_index_];
}
size_t CircularDecrement(size_t index) const {
if (index == 0)
return capacity_ - 1;
return index - 1;
}
size_t CircularIncrement(size_t index) const {
DCHECK_LT(index, capacity_);
++index;
if (index == capacity_)
return 0;
return index;
}
size_t capacity_;
size_t front_index_;
size_t back_index_;
T* data_;
std::unique_ptr<Ring> next_;
DISALLOW_COPY_AND_ASSIGN(Ring);
};
public:
class Iterator {
public:
using value_type = T;
using pointer = const T*;
using reference = const T&;
const T& operator->() const { return ring_->data_[index_]; }
const T& operator*() const { return ring_->data_[index_]; }
Iterator& operator++() {
if (index_ == ring_->back_index_) {
ring_ = ring_->next_.get();
index_ = ring_ ? ring_->CircularIncrement(ring_->front_index_) : 0;
} else {
index_ = ring_->CircularIncrement(index_);
}
return *this;
}
operator bool() const { return !!ring_; }
private:
explicit Iterator(const Ring* ring) {
if (!ring || ring->empty()) {
ring_ = nullptr;
index_ = 0;
return;
}
ring_ = ring;
index_ = ring_->CircularIncrement(ring->front_index_);
}
const Ring* ring_;
size_t index_;
friend class LazilyDeallocatedDeque;
};
Iterator begin() const { return Iterator(head_.get()); }
Iterator end() const { return Iterator(nullptr); }
private:
// We maintain a list of Ring buffers, to enable us to grow without copying,
// but most of the time we aim to have only one active Ring.
std::unique_ptr<Ring> head_;
Ring* tail_ = nullptr;
size_t size_ = 0;
size_t max_size_ = 0;
TimeTicks next_resize_time_;
DISALLOW_COPY_AND_ASSIGN(LazilyDeallocatedDeque);
};
} // namespace internal
} // namespace sequence_manager
} // namespace base
#endif // BASE_TASK_SEQUENCE_MANAGER_LAZILY_DEALLOCATED_DEQUE_H_

View file

@ -0,0 +1,36 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/task/sequence_manager/lazy_now.h"
#include "base/time/tick_clock.h"
namespace base {
namespace sequence_manager {
LazyNow::LazyNow(TimeTicks now) : tick_clock_(nullptr), now_(now) {}
LazyNow::LazyNow(const TickClock* tick_clock)
: tick_clock_(tick_clock), now_() {
DCHECK(tick_clock);
}
LazyNow::LazyNow(LazyNow&& move_from) noexcept
: tick_clock_(move_from.tick_clock_), now_(move_from.now_) {
move_from.tick_clock_ = nullptr;
move_from.now_ = nullopt;
}
TimeTicks LazyNow::Now() {
// It looks tempting to avoid using Optional and to rely on is_null() instead,
// but in some test environments clock intentionally starts from zero.
if (!now_) {
DCHECK(tick_clock_); // It can fire only on use after std::move.
now_ = tick_clock_->NowTicks();
}
return *now_;
}
} // namespace sequence_manager
} // namespace base

View file

@ -0,0 +1,43 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TASK_SEQUENCE_MANAGER_LAZY_NOW_H_
#define BASE_TASK_SEQUENCE_MANAGER_LAZY_NOW_H_
#include "base/base_export.h"
#include "base/optional.h"
#include "base/time/time.h"
namespace base {
class TickClock;
namespace sequence_manager {
// Now() is somewhat expensive so it makes sense not to call Now() unless we
// really need to and to avoid subsequent calls if already called once.
// LazyNow objects are expected to be short-living to represent accurate time.
class BASE_EXPORT LazyNow {
public:
explicit LazyNow(TimeTicks now);
explicit LazyNow(const TickClock* tick_clock);
LazyNow(LazyNow&& move_from) noexcept;
// Result will not be updated on any subsesequent calls.
TimeTicks Now();
bool has_value() const { return !!now_; }
private:
const TickClock* tick_clock_; // Not owned.
Optional<TimeTicks> now_;
DISALLOW_COPY_AND_ASSIGN(LazyNow);
};
} // namespace sequence_manager
} // namespace base
#endif // BASE_TASK_SEQUENCE_MANAGER_LAZY_NOW_H_

View file

@ -0,0 +1,58 @@
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/task/sequence_manager/real_time_domain.h"
#include "base/task/sequence_manager/sequence_manager_impl.h"
namespace base {
namespace sequence_manager {
namespace internal {
RealTimeDomain::RealTimeDomain() {}
RealTimeDomain::~RealTimeDomain() = default;
void RealTimeDomain::OnRegisterWithSequenceManager(
SequenceManagerImpl* sequence_manager) {
TimeDomain::OnRegisterWithSequenceManager(sequence_manager);
tick_clock_ = sequence_manager->GetTickClock();
}
LazyNow RealTimeDomain::CreateLazyNow() const {
return LazyNow(tick_clock_);
}
TimeTicks RealTimeDomain::Now() const {
return tick_clock_->NowTicks();
}
Optional<TimeDelta> RealTimeDomain::DelayTillNextTask(LazyNow* lazy_now) {
Optional<TimeTicks> next_run_time = NextScheduledRunTime();
if (!next_run_time)
return nullopt;
TimeTicks now = lazy_now->Now();
if (now >= next_run_time) {
// Overdue work needs to be run immediately.
return TimeDelta();
}
TimeDelta delay = *next_run_time - now;
TRACE_EVENT1("sequence_manager", "RealTimeDomain::DelayTillNextTask",
"delay_ms", delay.InMillisecondsF());
return delay;
}
bool RealTimeDomain::MaybeFastForwardToNextTask(bool quit_when_idle_requested) {
return false;
}
const char* RealTimeDomain::GetName() const {
return "RealTimeDomain";
}
} // namespace internal
} // namespace sequence_manager
} // namespace base

View file

@ -0,0 +1,42 @@
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TASK_SEQUENCE_MANAGER_REAL_TIME_DOMAIN_H_
#define BASE_TASK_SEQUENCE_MANAGER_REAL_TIME_DOMAIN_H_
#include "base/base_export.h"
#include "base/macros.h"
#include "base/task/sequence_manager/time_domain.h"
namespace base {
namespace sequence_manager {
namespace internal {
class BASE_EXPORT RealTimeDomain : public TimeDomain {
public:
RealTimeDomain();
~RealTimeDomain() override;
// TimeDomain implementation:
LazyNow CreateLazyNow() const override;
TimeTicks Now() const override;
Optional<TimeDelta> DelayTillNextTask(LazyNow* lazy_now) override;
bool MaybeFastForwardToNextTask(bool quit_when_idle_requested) override;
protected:
void OnRegisterWithSequenceManager(
SequenceManagerImpl* sequence_manager) override;
const char* GetName() const override;
private:
const TickClock* tick_clock_ = nullptr;
DISALLOW_COPY_AND_ASSIGN(RealTimeDomain);
};
} // namespace internal
} // namespace sequence_manager
} // namespace base
#endif // BASE_TASK_SEQUENCE_MANAGER_REAL_TIME_DOMAIN_H_

View file

@ -0,0 +1,115 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/task/sequence_manager/sequence_manager.h"
namespace base {
namespace sequence_manager {
NativeWorkHandle::~NativeWorkHandle() = default;
SequenceManager::MetricRecordingSettings::MetricRecordingSettings(
double task_thread_time_sampling_rate)
: task_sampling_rate_for_recording_cpu_time(
base::ThreadTicks::IsSupported() ? task_thread_time_sampling_rate
: 0) {}
SequenceManager::Settings::Settings() = default;
SequenceManager::Settings::Settings(Settings&& move_from) noexcept = default;
SequenceManager::Settings::Builder::Builder() = default;
SequenceManager::Settings::Builder::~Builder() = default;
SequenceManager::Settings::Builder&
SequenceManager::Settings::Builder::SetMessagePumpType(
MessagePumpType message_loop_type_val) {
settings_.message_loop_type = message_loop_type_val;
return *this;
}
SequenceManager::Settings::Builder&
SequenceManager::Settings::Builder::SetRandomisedSamplingEnabled(
bool randomised_sampling_enabled_val) {
settings_.randomised_sampling_enabled = randomised_sampling_enabled_val;
return *this;
}
SequenceManager::Settings::Builder&
SequenceManager::Settings::Builder::SetTickClock(const TickClock* clock_val) {
settings_.clock = clock_val;
return *this;
}
SequenceManager::Settings::Builder&
SequenceManager::Settings::Builder::SetAddQueueTimeToTasks(
bool add_queue_time_to_tasks_val) {
settings_.add_queue_time_to_tasks = add_queue_time_to_tasks_val;
return *this;
}
#if DCHECK_IS_ON()
SequenceManager::Settings::Builder&
SequenceManager::Settings::Builder::SetTaskLogging(
TaskLogging task_execution_logging_val) {
settings_.task_execution_logging = task_execution_logging_val;
return *this;
}
SequenceManager::Settings::Builder&
SequenceManager::Settings::Builder::SetLogPostTask(bool log_post_task_val) {
settings_.log_post_task = log_post_task_val;
return *this;
}
SequenceManager::Settings::Builder&
SequenceManager::Settings::Builder::SetLogTaskDelayExpiry(
bool log_task_delay_expiry_val) {
settings_.log_task_delay_expiry = log_task_delay_expiry_val;
return *this;
}
SequenceManager::Settings::Builder&
SequenceManager::Settings::Builder::SetLogRunloopQuitAndQuitWhenIdle(
bool log_runloop_quit_and_quit_when_idle_val) {
settings_.log_runloop_quit_and_quit_when_idle =
log_runloop_quit_and_quit_when_idle_val;
return *this;
}
SequenceManager::Settings::Builder&
SequenceManager::Settings::Builder::SetPerPriorityCrossThreadTaskDelay(
std::array<TimeDelta, TaskQueue::kQueuePriorityCount>
per_priority_cross_thread_task_delay_val) {
settings_.per_priority_cross_thread_task_delay =
per_priority_cross_thread_task_delay_val;
return *this;
}
SequenceManager::Settings::Builder&
SequenceManager::Settings::Builder::SetPerPrioritySameThreadTaskDelay(
std::array<TimeDelta, TaskQueue::kQueuePriorityCount>
per_priority_same_thread_task_delay_val) {
settings_.per_priority_same_thread_task_delay =
per_priority_same_thread_task_delay_val;
return *this;
}
SequenceManager::Settings::Builder&
SequenceManager::Settings::Builder::SetRandomTaskSelectionSeed(
int random_task_selection_seed_val) {
settings_.random_task_selection_seed = random_task_selection_seed_val;
return *this;
}
#endif // DCHECK_IS_ON()
SequenceManager::Settings SequenceManager::Settings::Builder::Build() {
return std::move(settings_);
}
} // namespace sequence_manager
} // namespace base

View file

@ -0,0 +1,339 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TASK_SEQUENCE_MANAGER_SEQUENCE_MANAGER_H_
#define BASE_TASK_SEQUENCE_MANAGER_SEQUENCE_MANAGER_H_
#include <memory>
#include <utility>
#include "base/macros.h"
#include "base/message_loop/message_pump_type.h"
#include "base/message_loop/timer_slack.h"
#include "base/sequenced_task_runner.h"
#include "base/single_thread_task_runner.h"
#include "base/task/sequence_manager/task_queue_impl.h"
#include "base/task/sequence_manager/task_time_observer.h"
#include "base/time/default_tick_clock.h"
namespace base {
class MessagePump;
class TaskObserver;
namespace sequence_manager {
class TimeDomain;
// Represent outstanding work the sequence underlying a SequenceManager (e.g.,
// a native system task for drawing the UI). As long as this handle is alive,
// the work is considered to be pending.
class NativeWorkHandle {
public:
virtual ~NativeWorkHandle();
NativeWorkHandle(const NativeWorkHandle&) = delete;
protected:
NativeWorkHandle() = default;
};
// SequenceManager manages TaskQueues which have different properties
// (e.g. priority, common task type) multiplexing all posted tasks into
// a single backing sequence (currently bound to a single thread, which is
// refererred as *main thread* in the comments below). SequenceManager
// implementation can be used in a various ways to apply scheduling logic.
class BASE_EXPORT SequenceManager {
public:
class Observer {
public:
virtual ~Observer() = default;
// Called back on the main thread.
virtual void OnBeginNestedRunLoop() = 0;
virtual void OnExitNestedRunLoop() = 0;
};
struct MetricRecordingSettings {
// This parameter will be updated for consistency on creation (setting
// value to 0 when ThreadTicks are not supported).
MetricRecordingSettings(double task_sampling_rate_for_recording_cpu_time);
// The proportion of the tasks for which the cpu time will be
// sampled or 0 if this is not enabled.
// Since randomised sampling requires the use of Rand(), it is enabled only
// on platforms which support it.
// If it is 1 then cpu time is measured for each task, so the integral
// metrics (as opposed to per-task metrics) can be recorded.
double task_sampling_rate_for_recording_cpu_time = 0;
bool records_cpu_time_for_some_tasks() const {
return task_sampling_rate_for_recording_cpu_time > 0.0;
}
bool records_cpu_time_for_all_tasks() const {
return task_sampling_rate_for_recording_cpu_time == 1.0;
}
};
// Settings defining the desired SequenceManager behaviour: the type of the
// MessageLoop and whether randomised sampling should be enabled.
struct BASE_EXPORT Settings {
class Builder;
Settings();
// In the future MessagePump (which is move-only) will also be a setting,
// so we are making Settings move-only in preparation.
Settings(Settings&& move_from) noexcept;
MessagePumpType message_loop_type = MessagePumpType::DEFAULT;
bool randomised_sampling_enabled = false;
const TickClock* clock = DefaultTickClock::GetInstance();
// If true, add the timestamp the task got queued to the task.
bool add_queue_time_to_tasks = false;
#if DCHECK_IS_ON()
// TODO(alexclarke): Consider adding command line flags to control these.
enum class TaskLogging {
kNone,
kEnabled,
kEnabledWithBacktrace,
// Logs high priority tasks and the lower priority tasks they skipped
// past. Useful for debugging test failures caused by scheduler policy
// changes.
kReorderedOnly,
};
TaskLogging task_execution_logging = TaskLogging::kNone;
// If true PostTask will emit a debug log.
bool log_post_task = false;
// If true debug logs will be emitted when a delayed task becomes eligible
// to run.
bool log_task_delay_expiry = false;
// If true usages of the RunLoop API will be logged.
bool log_runloop_quit_and_quit_when_idle = false;
// Scheduler policy induced raciness is an area of concern. This lets us
// apply an extra delay per priority for cross thread posting.
std::array<TimeDelta, TaskQueue::kQueuePriorityCount>
per_priority_cross_thread_task_delay;
// Like the above but for same thread posting.
std::array<TimeDelta, TaskQueue::kQueuePriorityCount>
per_priority_same_thread_task_delay;
// If not zero this seeds a PRNG used by the task selection logic to choose
// a random TaskQueue for a given priority rather than the TaskQueue with
// the oldest EnqueueOrder.
int random_task_selection_seed = 0;
#endif // DCHECK_IS_ON()
DISALLOW_COPY_AND_ASSIGN(Settings);
};
virtual ~SequenceManager() = default;
// Binds the SequenceManager and its TaskQueues to the current thread. Should
// only be called once. Note that CreateSequenceManagerOnCurrentThread()
// performs this initialization automatically.
virtual void BindToCurrentThread() = 0;
// Returns the task runner the current task was posted on. Returns null if no
// task is currently running. Must be called on the bound thread.
virtual scoped_refptr<SequencedTaskRunner> GetTaskRunnerForCurrentTask() = 0;
// Finishes the initialization for a SequenceManager created via
// CreateUnboundSequenceManager(). Must not be called in any other
// circumstances. The ownership of the pump is transferred to SequenceManager.
virtual void BindToMessagePump(std::unique_ptr<MessagePump> message_pump) = 0;
// Must be called on the main thread.
// Can be called only once, before creating TaskQueues.
// Observer must outlive the SequenceManager.
virtual void SetObserver(Observer* observer) = 0;
// Must be called on the main thread.
virtual void AddTaskTimeObserver(TaskTimeObserver* task_time_observer) = 0;
virtual void RemoveTaskTimeObserver(TaskTimeObserver* task_time_observer) = 0;
// Registers a TimeDomain with SequenceManager.
// TaskQueues must only be created with a registered TimeDomain.
// Conversely, any TimeDomain must remain registered until no
// TaskQueues (using that TimeDomain) remain.
virtual void RegisterTimeDomain(TimeDomain* time_domain) = 0;
virtual void UnregisterTimeDomain(TimeDomain* time_domain) = 0;
virtual TimeDomain* GetRealTimeDomain() const = 0;
virtual const TickClock* GetTickClock() const = 0;
virtual TimeTicks NowTicks() const = 0;
// Sets the SingleThreadTaskRunner that will be returned by
// ThreadTaskRunnerHandle::Get on the main thread.
virtual void SetDefaultTaskRunner(
scoped_refptr<SingleThreadTaskRunner> task_runner) = 0;
// Removes all canceled delayed tasks, and considers resizing to fit all
// internal queues.
virtual void ReclaimMemory() = 0;
// Returns true if no tasks were executed in TaskQueues that monitor
// quiescence since the last call to this method.
virtual bool GetAndClearSystemIsQuiescentBit() = 0;
// Set the number of tasks executed in a single SequenceManager invocation.
// Increasing this number reduces the overhead of the tasks dispatching
// logic at the cost of a potentially worse latency. 1 by default.
virtual void SetWorkBatchSize(int work_batch_size) = 0;
// Requests desired timer precision from the OS.
// Has no effect on some platforms.
virtual void SetTimerSlack(TimerSlack timer_slack) = 0;
// Enables crash keys that can be set in the scope of a task which help
// to identify the culprit if upcoming work results in a crash.
// Key names must be thread-specific to avoid races and corrupted crash dumps.
virtual void EnableCrashKeys(const char* async_stack_crash_key) = 0;
// Returns the metric recording configuration for the current SequenceManager.
virtual const MetricRecordingSettings& GetMetricRecordingSettings() const = 0;
// Creates a task queue with the given type, |spec| and args.
// Must be called on the main thread.
// TODO(scheduler-dev): SequenceManager should not create TaskQueues.
template <typename TaskQueueType, typename... Args>
scoped_refptr<TaskQueueType> CreateTaskQueueWithType(
const TaskQueue::Spec& spec,
Args&&... args) {
return WrapRefCounted(new TaskQueueType(CreateTaskQueueImpl(spec), spec,
std::forward<Args>(args)...));
}
// Creates a vanilla TaskQueue rather than a user type derived from it. This
// should be used if you don't wish to sub class TaskQueue.
// Must be called on the main thread.
virtual scoped_refptr<TaskQueue> CreateTaskQueue(
const TaskQueue::Spec& spec) = 0;
// Returns true iff this SequenceManager has no immediate work to do. I.e.
// there are no pending non-delayed tasks or delayed tasks that are due to
// run. This method ignores any pending delayed tasks that might have become
// eligible to run since the last task was executed. This is important because
// if it did tests would become flaky depending on the exact timing of this
// call. This is moderately expensive.
virtual bool IsIdleForTesting() = 0;
// The total number of posted tasks that haven't executed yet.
virtual size_t GetPendingTaskCountForTesting() const = 0;
// Returns a JSON string which describes all pending tasks.
virtual std::string DescribeAllPendingTasks() const = 0;
// Indicates that the underlying sequence (e.g., the message pump) has pending
// work at priority |priority|. If the priority of the work in this
// SequenceManager is lower, it will yield to let the native work run. The
// native work is assumed to remain pending while the returned handle is
// valid.
//
// Must be called on the main thread, and the returned handle must also be
// deleted on the main thread.
virtual std::unique_ptr<NativeWorkHandle> OnNativeWorkPending(
TaskQueue::QueuePriority priority) = 0;
// Adds an observer which reports task execution. Can only be called on the
// same thread that |this| is running on.
virtual void AddTaskObserver(TaskObserver* task_observer) = 0;
// Removes an observer which reports task execution. Can only be called on the
// same thread that |this| is running on.
virtual void RemoveTaskObserver(TaskObserver* task_observer) = 0;
protected:
virtual std::unique_ptr<internal::TaskQueueImpl> CreateTaskQueueImpl(
const TaskQueue::Spec& spec) = 0;
};
class BASE_EXPORT SequenceManager::Settings::Builder {
public:
Builder();
~Builder();
// Sets the MessagePumpType which is used to create a MessagePump.
Builder& SetMessagePumpType(MessagePumpType message_loop_type);
Builder& SetRandomisedSamplingEnabled(bool randomised_sampling_enabled);
// Sets the TickClock the SequenceManager uses to obtain Now.
Builder& SetTickClock(const TickClock* clock);
// Whether or not queueing timestamp will be added to tasks.
Builder& SetAddQueueTimeToTasks(bool add_queue_time_to_tasks);
#if DCHECK_IS_ON()
// Controls task execution logging.
Builder& SetTaskLogging(TaskLogging task_execution_logging);
// Whether or not PostTask will emit a debug log.
Builder& SetLogPostTask(bool log_post_task);
// Whether or not debug logs will be emitted when a delayed task becomes
// eligible to run.
Builder& SetLogTaskDelayExpiry(bool log_task_delay_expiry);
// Whether or not usages of the RunLoop API will be logged.
Builder& SetLogRunloopQuitAndQuitWhenIdle(
bool log_runloop_quit_and_quit_when_idle);
// Scheduler policy induced raciness is an area of concern. This lets us
// apply an extra delay per priority for cross thread posting.
Builder& SetPerPriorityCrossThreadTaskDelay(
std::array<TimeDelta, TaskQueue::kQueuePriorityCount>
per_priority_cross_thread_task_delay);
// Scheduler policy induced raciness is an area of concern. This lets us
// apply an extra delay per priority for same thread posting.
Builder& SetPerPrioritySameThreadTaskDelay(
std::array<TimeDelta, TaskQueue::kQueuePriorityCount>
per_priority_same_thread_task_delay);
// If not zero this seeds a PRNG used by the task selection logic to choose a
// random TaskQueue for a given priority rather than the TaskQueue with the
// oldest EnqueueOrder.
Builder& SetRandomTaskSelectionSeed(int random_task_selection_seed);
#endif // DCHECK_IS_ON()
Settings Build();
private:
Settings settings_;
};
// Create SequenceManager using MessageLoop on the current thread.
// Implementation is located in sequence_manager_impl.cc.
// TODO(scheduler-dev): Remove after every thread has a SequenceManager.
BASE_EXPORT std::unique_ptr<SequenceManager>
CreateSequenceManagerOnCurrentThread(SequenceManager::Settings settings);
// Create a SequenceManager using the given MessagePump on the current thread.
// MessagePump instances can be created with
// MessagePump::CreateMessagePumpForType().
BASE_EXPORT std::unique_ptr<SequenceManager>
CreateSequenceManagerOnCurrentThreadWithPump(
std::unique_ptr<MessagePump> message_pump,
SequenceManager::Settings settings = SequenceManager::Settings());
// Create an unbound SequenceManager (typically for a future thread or because
// additional setup is required before binding). The SequenceManager can be
// initialized on the current thread and then needs to be bound and initialized
// on the target thread by calling one of the Bind*() methods.
BASE_EXPORT std::unique_ptr<SequenceManager> CreateUnboundSequenceManager(
SequenceManager::Settings settings = SequenceManager::Settings());
} // namespace sequence_manager
} // namespace base
#endif // BASE_TASK_SEQUENCE_MANAGER_SEQUENCE_MANAGER_H_

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,437 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TASK_SEQUENCE_MANAGER_SEQUENCE_MANAGER_IMPL_H_
#define BASE_TASK_SEQUENCE_MANAGER_SEQUENCE_MANAGER_IMPL_H_
#include <list>
#include <map>
#include <memory>
#include <random>
#include <set>
#include <unordered_map>
#include <utility>
#include <vector>
#include "base/atomic_sequence_num.h"
#include "base/cancelable_callback.h"
#include "base/containers/circular_deque.h"
#include "base/debug/crash_logging.h"
#include "base/macros.h"
#include "base/memory/scoped_refptr.h"
#include "base/memory/weak_ptr.h"
#include "base/message_loop/message_loop_current.h"
#include "base/message_loop/message_pump_type.h"
#include "base/pending_task.h"
#include "base/run_loop.h"
#include "base/sequenced_task_runner.h"
#include "base/single_thread_task_runner.h"
#include "base/synchronization/lock.h"
#include "base/task/common/task_annotator.h"
#include "base/task/sequence_manager/associated_thread_id.h"
#include "base/task/sequence_manager/enqueue_order.h"
#include "base/task/sequence_manager/enqueue_order_generator.h"
#include "base/task/sequence_manager/sequence_manager.h"
#include "base/task/sequence_manager/task_queue_impl.h"
#include "base/task/sequence_manager/task_queue_selector.h"
#include "base/task/sequence_manager/thread_controller.h"
#include "base/threading/thread_checker.h"
#include "base/time/default_tick_clock.h"
#include "build/build_config.h"
namespace base {
namespace trace_event {
class ConvertableToTraceFormat;
} // namespace trace_event
namespace sequence_manager {
class SequenceManagerForTest;
class TaskQueue;
class TaskTimeObserver;
class TimeDomain;
namespace internal {
class RealTimeDomain;
class TaskQueueImpl;
class ThreadControllerImpl;
// The task queue manager provides N task queues and a selector interface for
// choosing which task queue to service next. Each task queue consists of two
// sub queues:
//
// 1. Incoming task queue. Tasks that are posted get immediately appended here.
// When a task is appended into an empty incoming queue, the task manager
// work function (DoWork()) is scheduled to run on the main task runner.
//
// 2. Work queue. If a work queue is empty when DoWork() is entered, tasks from
// the incoming task queue (if any) are moved here. The work queues are
// registered with the selector as input to the scheduling decision.
//
class BASE_EXPORT SequenceManagerImpl
: public SequenceManager,
public internal::SequencedTaskSource,
public internal::TaskQueueSelector::Observer,
public RunLoop::NestingObserver {
public:
using Observer = SequenceManager::Observer;
~SequenceManagerImpl() override;
// Assume direct control over current thread and create a SequenceManager.
// This function should be called only once per thread.
// This function assumes that a MessageLoop is initialized for
// the current thread.
static std::unique_ptr<SequenceManagerImpl> CreateOnCurrentThread(
SequenceManager::Settings settings = SequenceManager::Settings());
// Create an unbound SequenceManager (typically for a future thread). The
// SequenceManager can be initialized on the current thread and then needs to
// be bound and initialized on the target thread by calling one of the Bind*()
// methods.
static std::unique_ptr<SequenceManagerImpl> CreateUnbound(
SequenceManager::Settings settings);
// SequenceManager implementation:
void BindToCurrentThread() override;
scoped_refptr<SequencedTaskRunner> GetTaskRunnerForCurrentTask() override;
void BindToMessagePump(std::unique_ptr<MessagePump> message_pump) override;
void SetObserver(Observer* observer) override;
void AddTaskTimeObserver(TaskTimeObserver* task_time_observer) override;
void RemoveTaskTimeObserver(TaskTimeObserver* task_time_observer) override;
void RegisterTimeDomain(TimeDomain* time_domain) override;
void UnregisterTimeDomain(TimeDomain* time_domain) override;
TimeDomain* GetRealTimeDomain() const override;
const TickClock* GetTickClock() const override;
TimeTicks NowTicks() const override;
void SetDefaultTaskRunner(
scoped_refptr<SingleThreadTaskRunner> task_runner) override;
void ReclaimMemory() override;
bool GetAndClearSystemIsQuiescentBit() override;
void SetWorkBatchSize(int work_batch_size) override;
void SetTimerSlack(TimerSlack timer_slack) override;
void EnableCrashKeys(const char* async_stack_crash_key) override;
const MetricRecordingSettings& GetMetricRecordingSettings() const override;
size_t GetPendingTaskCountForTesting() const override;
scoped_refptr<TaskQueue> CreateTaskQueue(
const TaskQueue::Spec& spec) override;
std::string DescribeAllPendingTasks() const override;
std::unique_ptr<NativeWorkHandle> OnNativeWorkPending(
TaskQueue::QueuePriority priority) override;
void AddTaskObserver(TaskObserver* task_observer) override;
void RemoveTaskObserver(TaskObserver* task_observer) override;
// SequencedTaskSource implementation:
Task* SelectNextTask() override;
void DidRunTask() override;
TimeDelta DelayTillNextTask(LazyNow* lazy_now) const override;
bool HasPendingHighResolutionTasks() override;
bool OnSystemIdle() override;
void AddDestructionObserver(
MessageLoopCurrent::DestructionObserver* destruction_observer);
void RemoveDestructionObserver(
MessageLoopCurrent::DestructionObserver* destruction_observer);
// TODO(alexclarke): Remove this as part of https://crbug.com/825327.
void SetTaskRunner(scoped_refptr<SingleThreadTaskRunner> task_runner);
// TODO(alexclarke): Remove this as part of https://crbug.com/825327.
scoped_refptr<SingleThreadTaskRunner> GetTaskRunner();
bool IsBoundToCurrentThread() const;
MessagePump* GetMessagePump() const;
bool IsType(MessagePumpType type) const;
void SetAddQueueTimeToTasks(bool enable);
void SetTaskExecutionAllowed(bool allowed);
bool IsTaskExecutionAllowed() const;
#if defined(OS_IOS)
void AttachToMessagePump();
#endif
bool IsIdleForTesting() override;
void BindToCurrentThread(std::unique_ptr<MessagePump> pump);
void DeletePendingTasks();
bool HasTasks();
MessagePumpType GetType() const;
// Requests that a task to process work is scheduled.
void ScheduleWork();
// Requests that a delayed task to process work is posted on the main task
// runner. These delayed tasks are de-duplicated. Must be called on the thread
// this class was created on.
// Schedules next wake-up at the given time, cancels any previous requests.
// Use TimeTicks::Max() to cancel a wake-up.
// Must be called from a TimeDomain only.
void SetNextDelayedDoWork(LazyNow* lazy_now, TimeTicks run_time);
// Returns the currently executing TaskQueue if any. Must be called on the
// thread this class was created on.
internal::TaskQueueImpl* currently_executing_task_queue() const;
// Unregisters a TaskQueue previously created by |NewTaskQueue()|.
// No tasks will run on this queue after this call.
void UnregisterTaskQueueImpl(
std::unique_ptr<internal::TaskQueueImpl> task_queue);
// Schedule a call to UnregisterTaskQueueImpl as soon as it's safe to do so.
void ShutdownTaskQueueGracefully(
std::unique_ptr<internal::TaskQueueImpl> task_queue);
const scoped_refptr<AssociatedThreadId>& associated_thread() const {
return associated_thread_;
}
const Settings& settings() const { return settings_; }
WeakPtr<SequenceManagerImpl> GetWeakPtr();
// How frequently to perform housekeeping tasks (sweeping canceled tasks etc).
static constexpr TimeDelta kReclaimMemoryInterval =
TimeDelta::FromSeconds(30);
protected:
static std::unique_ptr<ThreadControllerImpl>
CreateThreadControllerImplForCurrentThread(const TickClock* clock);
// Create a task queue manager where |controller| controls the thread
// on which the tasks are eventually run.
SequenceManagerImpl(std::unique_ptr<internal::ThreadController> controller,
SequenceManager::Settings settings = Settings());
friend class internal::TaskQueueImpl;
friend class ::base::sequence_manager::SequenceManagerForTest;
private:
class NativeWorkHandleImpl;
// Returns the SequenceManager running the
// current thread. It must only be used on the thread it was obtained.
// Only to be used by MessageLoopCurrent for the moment
static SequenceManagerImpl* GetCurrent();
friend class ::base::MessageLoopCurrent;
enum class ProcessTaskResult {
kDeferred,
kExecuted,
kSequenceManagerDeleted,
};
// SequenceManager maintains a queue of non-nestable tasks since they're
// uncommon and allocating an extra deque per TaskQueue will waste the memory.
using NonNestableTaskDeque =
circular_deque<internal::TaskQueueImpl::DeferredNonNestableTask>;
// We have to track rentrancy because we support nested runloops but the
// selector interface is unaware of those. This struct keeps track off all
// task related state needed to make pairs of SelectNextTask() / DidRunTask()
// work.
struct ExecutingTask {
ExecutingTask(Task&& task,
internal::TaskQueueImpl* task_queue,
TaskQueue::TaskTiming task_timing)
: pending_task(std::move(task)),
task_queue(task_queue),
task_queue_name(task_queue->GetName()),
task_timing(task_timing),
priority(task_queue->GetQueuePriority()),
task_type(pending_task.task_type) {}
Task pending_task;
internal::TaskQueueImpl* task_queue = nullptr;
// Save task_queue_name as the task queue can be deleted within the task.
const char* task_queue_name;
TaskQueue::TaskTiming task_timing;
// Save priority as it might change after running a task.
TaskQueue::QueuePriority priority;
// Save task metadata to use in after running a task as |pending_task|
// won't be available then.
int task_type;
};
struct MainThreadOnly {
explicit MainThreadOnly(
const scoped_refptr<AssociatedThreadId>& associated_thread,
const SequenceManager::Settings& settings);
~MainThreadOnly();
int nesting_depth = 0;
NonNestableTaskDeque non_nestable_task_queue;
// TODO(altimin): Switch to instruction pointer crash key when it's
// available.
debug::CrashKeyString* file_name_crash_key = nullptr;
debug::CrashKeyString* function_name_crash_key = nullptr;
debug::CrashKeyString* async_stack_crash_key = nullptr;
std::array<char, static_cast<size_t>(debug::CrashKeySize::Size64)>
async_stack_buffer = {};
std::mt19937_64 random_generator;
std::uniform_real_distribution<double> uniform_distribution;
internal::TaskQueueSelector selector;
ObserverList<TaskObserver>::Unchecked task_observers;
ObserverList<TaskTimeObserver>::Unchecked task_time_observers;
std::set<TimeDomain*> time_domains;
std::unique_ptr<internal::RealTimeDomain> real_time_domain;
// If true MaybeReclaimMemory will attempt to reclaim memory.
bool memory_reclaim_scheduled = false;
// Used to ensure we don't perform expensive housekeeping too frequently.
TimeTicks next_time_to_reclaim_memory;
// List of task queues managed by this SequenceManager.
// - active_queues contains queues that are still running tasks.
// Most often they are owned by relevant TaskQueues, but
// queues_to_gracefully_shutdown_ are included here too.
// - queues_to_gracefully_shutdown contains queues which should be deleted
// when they become empty.
// - queues_to_delete contains soon-to-be-deleted queues, because some
// internal scheduling code does not expect queues to be pulled
// from underneath.
std::set<internal::TaskQueueImpl*> active_queues;
std::map<internal::TaskQueueImpl*, std::unique_ptr<internal::TaskQueueImpl>>
queues_to_gracefully_shutdown;
std::map<internal::TaskQueueImpl*, std::unique_ptr<internal::TaskQueueImpl>>
queues_to_delete;
bool task_was_run_on_quiescence_monitored_queue = false;
bool nesting_observer_registered_ = false;
// Due to nested runloops more than one task can be executing concurrently.
std::vector<ExecutingTask> task_execution_stack;
Observer* observer = nullptr; // NOT OWNED
ObserverList<MessageLoopCurrent::DestructionObserver>::Unchecked
destruction_observers;
// By default native work is not prioritized at all.
std::multiset<TaskQueue::QueuePriority> pending_native_work{
TaskQueue::kBestEffortPriority};
};
void CompleteInitializationOnBoundThread();
// TaskQueueSelector::Observer:
void OnTaskQueueEnabled(internal::TaskQueueImpl* queue) override;
// RunLoop::NestingObserver:
void OnBeginNestedRunLoop() override;
void OnExitNestedRunLoop() override;
// Called by the task queue to inform this SequenceManager of a task that's
// about to be queued. This SequenceManager may use this opportunity to add
// metadata to |pending_task| before it is moved into the queue.
void WillQueueTask(Task* pending_task, const char* task_queue_name);
// Delayed Tasks with run_times <= Now() are enqueued onto the work queue and
// reloads any empty work queues.
void MoveReadyDelayedTasksToWorkQueues(LazyNow* lazy_now);
void NotifyWillProcessTask(ExecutingTask* task, LazyNow* time_before_task);
void NotifyDidProcessTask(ExecutingTask* task, LazyNow* time_after_task);
EnqueueOrder GetNextSequenceNumber();
bool GetAddQueueTimeToTasks();
std::unique_ptr<trace_event::ConvertableToTraceFormat>
AsValueWithSelectorResult(internal::WorkQueue* selected_work_queue,
bool force_verbose) const;
void AsValueWithSelectorResultInto(trace_event::TracedValue*,
internal::WorkQueue* selected_work_queue,
bool force_verbose) const;
// Used in construction of TaskQueueImpl to obtain an AtomicFlag which it can
// use to request reload by ReloadEmptyWorkQueues. The lifetime of
// TaskQueueImpl is managed by this class and the handle will be released by
// TaskQueueImpl::UnregisterTaskQueue which is always called before the
// queue's destruction.
AtomicFlagSet::AtomicFlag GetFlagToRequestReloadForEmptyQueue(
TaskQueueImpl* task_queue);
// Calls |TakeImmediateIncomingQueueTasks| on all queues with their reload
// flag set in |empty_queues_to_reload_|.
void ReloadEmptyWorkQueues() const;
std::unique_ptr<internal::TaskQueueImpl> CreateTaskQueueImpl(
const TaskQueue::Spec& spec) override;
// Periodically reclaims memory by sweeping away canceled tasks and shrinking
// buffers.
void MaybeReclaimMemory();
// Deletes queues marked for deletion and empty queues marked for shutdown.
void CleanUpQueues();
void RemoveAllCanceledTasksFromFrontOfWorkQueues();
TaskQueue::TaskTiming::TimeRecordingPolicy ShouldRecordTaskTiming(
const internal::TaskQueueImpl* task_queue);
bool ShouldRecordCPUTimeForTask();
void RecordCrashKeys(const PendingTask&);
// Helper to terminate all scoped trace events to allow starting new ones
// in SelectNextTask().
Task* SelectNextTaskImpl();
// Check if a task of priority |priority| should run given the pending set of
// native work.
bool ShouldRunTaskOfPriority(TaskQueue::QueuePriority priority) const;
// Ignores any immediate work.
TimeDelta GetDelayTillNextDelayedTask(LazyNow* lazy_now) const;
#if DCHECK_IS_ON()
void LogTaskDebugInfo(const internal::WorkQueue* work_queue) const;
#endif
// Determines if wall time or thread time should be recorded for the next
// task.
TaskQueue::TaskTiming InitializeTaskTiming(
internal::TaskQueueImpl* task_queue);
scoped_refptr<AssociatedThreadId> associated_thread_;
EnqueueOrderGenerator enqueue_order_generator_;
const std::unique_ptr<internal::ThreadController> controller_;
const Settings settings_;
const MetricRecordingSettings metric_recording_settings_;
// Whether to add the queue time to tasks.
base::subtle::Atomic32 add_queue_time_to_tasks_;
AtomicFlagSet empty_queues_to_reload_;
// A check to bail out early during memory corruption.
// https://crbug.com/757940
bool Validate();
volatile int32_t memory_corruption_sentinel_;
MainThreadOnly main_thread_only_;
MainThreadOnly& main_thread_only() {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
return main_thread_only_;
}
const MainThreadOnly& main_thread_only() const {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
return main_thread_only_;
}
WeakPtrFactory<SequenceManagerImpl> weak_factory_{this};
DISALLOW_COPY_AND_ASSIGN(SequenceManagerImpl);
};
} // namespace internal
} // namespace sequence_manager
} // namespace base
#endif // BASE_TASK_SEQUENCE_MANAGER_SEQUENCE_MANAGER_IMPL_H_

View file

@ -0,0 +1,49 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TASK_SEQUENCE_MANAGER_SEQUENCED_TASK_SOURCE_H_
#define BASE_TASK_SEQUENCE_MANAGER_SEQUENCED_TASK_SOURCE_H_
#include "base/optional.h"
#include "base/pending_task.h"
#include "base/task/sequence_manager/lazy_now.h"
#include "base/task/sequence_manager/tasks.h"
namespace base {
namespace sequence_manager {
namespace internal {
// Interface to pass tasks to ThreadController.
class SequencedTaskSource {
public:
virtual ~SequencedTaskSource() = default;
// Returns the next task to run from this source or nullptr if
// there're no more tasks ready to run. If a task is returned,
// DidRunTask() must be invoked before the next call to SelectNextTask().
virtual Task* SelectNextTask() = 0;
// Notifies this source that the task previously obtained
// from SelectNextTask() has been completed.
virtual void DidRunTask() = 0;
// Returns the delay till the next task or TimeDelta::Max()
// if there are no tasks left.
virtual TimeDelta DelayTillNextTask(LazyNow* lazy_now) const = 0;
// Return true if there are any pending tasks in the task source which require
// high resolution timing.
virtual bool HasPendingHighResolutionTasks() = 0;
// Called when we have run out of immediate work. If more immediate work
// becomes available as a result of any processing done by this callback,
// return true to schedule a future DoWork.
virtual bool OnSystemIdle() = 0;
};
} // namespace internal
} // namespace sequence_manager
} // namespace base
#endif // BASE_TASK_SEQUENCE_MANAGER_SEQUENCED_TASK_SOURCE_H_

View file

@ -0,0 +1,359 @@
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/task/sequence_manager/task_queue.h"
#include <utility>
#include "base/bind.h"
#include "base/memory/ptr_util.h"
#include "base/task/sequence_manager/associated_thread_id.h"
#include "base/task/sequence_manager/sequence_manager_impl.h"
#include "base/task/sequence_manager/task_queue_impl.h"
#include "base/threading/thread_checker.h"
#include "base/threading/thread_checker_impl.h"
#include "base/time/time.h"
namespace base {
namespace sequence_manager {
namespace {
class NullTaskRunner final : public SingleThreadTaskRunner {
public:
NullTaskRunner() {}
bool PostDelayedTask(const Location& location,
OnceClosure callback,
TimeDelta delay) override {
return false;
}
bool PostNonNestableDelayedTask(const Location& location,
OnceClosure callback,
TimeDelta delay) override {
return false;
}
bool RunsTasksInCurrentSequence() const override {
return thread_checker_.CalledOnValidThread();
}
private:
// Ref-counted
~NullTaskRunner() override = default;
ThreadCheckerImpl thread_checker_;
};
// TODO(kraynov): Move NullTaskRunner from //base/test to //base.
scoped_refptr<SingleThreadTaskRunner> CreateNullTaskRunner() {
return MakeRefCounted<NullTaskRunner>();
}
} // namespace
TaskQueue::QueueEnabledVoter::QueueEnabledVoter(
scoped_refptr<TaskQueue> task_queue)
: task_queue_(std::move(task_queue)), enabled_(true) {
task_queue_->AddQueueEnabledVoter(enabled_);
}
TaskQueue::QueueEnabledVoter::~QueueEnabledVoter() {
task_queue_->RemoveQueueEnabledVoter(enabled_);
}
void TaskQueue::QueueEnabledVoter::SetVoteToEnable(bool enabled) {
if (enabled == enabled_)
return;
enabled_ = enabled;
task_queue_->OnQueueEnabledVoteChanged(enabled_);
}
void TaskQueue::AddQueueEnabledVoter(bool voter_is_enabled) {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
++voter_count_;
if (voter_is_enabled)
++enabled_voter_count_;
}
void TaskQueue::RemoveQueueEnabledVoter(bool voter_is_enabled) {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
if (!impl_)
return;
bool was_enabled = AreAllQueueEnabledVotersEnabled();
if (voter_is_enabled) {
--enabled_voter_count_;
DCHECK_GE(enabled_voter_count_, 0);
}
--voter_count_;
DCHECK_GE(voter_count_, 0);
bool is_enabled = AreAllQueueEnabledVotersEnabled();
if (was_enabled != is_enabled)
impl_->SetQueueEnabled(is_enabled);
}
bool TaskQueue::AreAllQueueEnabledVotersEnabled() const {
return enabled_voter_count_ == voter_count_;
}
void TaskQueue::OnQueueEnabledVoteChanged(bool enabled) {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
bool was_enabled = AreAllQueueEnabledVotersEnabled();
if (enabled) {
++enabled_voter_count_;
DCHECK_LE(enabled_voter_count_, voter_count_);
} else {
--enabled_voter_count_;
DCHECK_GE(enabled_voter_count_, 0);
}
bool is_enabled = AreAllQueueEnabledVotersEnabled();
if (was_enabled != is_enabled)
impl_->SetQueueEnabled(is_enabled);
}
TaskQueue::TaskQueue(std::unique_ptr<internal::TaskQueueImpl> impl,
const TaskQueue::Spec& spec)
: impl_(std::move(impl)),
sequence_manager_(impl_ ? impl_->GetSequenceManagerWeakPtr() : nullptr),
associated_thread_((impl_ && impl_->sequence_manager())
? impl_->sequence_manager()->associated_thread()
: MakeRefCounted<internal::AssociatedThreadId>()),
default_task_runner_(impl_ ? impl_->CreateTaskRunner(kTaskTypeNone)
: CreateNullTaskRunner()),
name_(impl_ ? impl_->GetName() : "") {}
TaskQueue::~TaskQueue() {
ShutdownTaskQueueGracefully();
}
void TaskQueue::ShutdownTaskQueueGracefully() {
// scoped_refptr guarantees us that this object isn't used.
if (!impl_)
return;
if (impl_->IsUnregistered())
return;
// If we've not been unregistered then this must occur on the main thread.
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
impl_->SetObserver(nullptr);
impl_->sequence_manager()->ShutdownTaskQueueGracefully(TakeTaskQueueImpl());
}
TaskQueue::TaskTiming::TaskTiming(bool has_wall_time, bool has_thread_time)
: has_wall_time_(has_wall_time), has_thread_time_(has_thread_time) {}
void TaskQueue::TaskTiming::RecordTaskStart(LazyNow* now) {
DCHECK_EQ(State::NotStarted, state_);
state_ = State::Running;
if (has_wall_time())
start_time_ = now->Now();
if (has_thread_time())
start_thread_time_ = base::ThreadTicks::Now();
}
void TaskQueue::TaskTiming::RecordTaskEnd(LazyNow* now) {
DCHECK(state_ == State::Running || state_ == State::Finished);
if (state_ == State::Finished)
return;
state_ = State::Finished;
if (has_wall_time())
end_time_ = now->Now();
if (has_thread_time())
end_thread_time_ = base::ThreadTicks::Now();
}
void TaskQueue::ShutdownTaskQueue() {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
if (!impl_)
return;
if (!sequence_manager_) {
TakeTaskQueueImpl().reset();
return;
}
impl_->SetBlameContext(nullptr);
impl_->SetOnTaskStartedHandler(
internal::TaskQueueImpl::OnTaskStartedHandler());
impl_->SetOnTaskCompletedHandler(
internal::TaskQueueImpl::OnTaskCompletedHandler());
sequence_manager_->UnregisterTaskQueueImpl(TakeTaskQueueImpl());
}
scoped_refptr<SingleThreadTaskRunner> TaskQueue::CreateTaskRunner(
TaskType task_type) {
// We only need to lock if we're not on the main thread.
base::internal::CheckedAutoLockMaybe lock(IsOnMainThread() ? &impl_lock_
: nullptr);
if (!impl_)
return CreateNullTaskRunner();
return impl_->CreateTaskRunner(task_type);
}
std::unique_ptr<TaskQueue::QueueEnabledVoter>
TaskQueue::CreateQueueEnabledVoter() {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
if (!impl_)
return nullptr;
return WrapUnique(new QueueEnabledVoter(this));
}
bool TaskQueue::IsQueueEnabled() const {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
if (!impl_)
return false;
return impl_->IsQueueEnabled();
}
bool TaskQueue::IsEmpty() const {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
if (!impl_)
return true;
return impl_->IsEmpty();
}
size_t TaskQueue::GetNumberOfPendingTasks() const {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
if (!impl_)
return 0;
return impl_->GetNumberOfPendingTasks();
}
bool TaskQueue::HasTaskToRunImmediately() const {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
if (!impl_)
return false;
return impl_->HasTaskToRunImmediately();
}
Optional<TimeTicks> TaskQueue::GetNextScheduledWakeUp() {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
if (!impl_)
return nullopt;
return impl_->GetNextScheduledWakeUp();
}
void TaskQueue::SetQueuePriority(TaskQueue::QueuePriority priority) {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
if (!impl_)
return;
impl_->SetQueuePriority(priority);
}
TaskQueue::QueuePriority TaskQueue::GetQueuePriority() const {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
if (!impl_)
return TaskQueue::QueuePriority::kLowPriority;
return impl_->GetQueuePriority();
}
void TaskQueue::AddTaskObserver(TaskObserver* task_observer) {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
if (!impl_)
return;
impl_->AddTaskObserver(task_observer);
}
void TaskQueue::RemoveTaskObserver(TaskObserver* task_observer) {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
if (!impl_)
return;
impl_->RemoveTaskObserver(task_observer);
}
void TaskQueue::SetTimeDomain(TimeDomain* time_domain) {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
if (!impl_)
return;
impl_->SetTimeDomain(time_domain);
}
TimeDomain* TaskQueue::GetTimeDomain() const {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
if (!impl_)
return nullptr;
return impl_->GetTimeDomain();
}
void TaskQueue::SetBlameContext(trace_event::BlameContext* blame_context) {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
if (!impl_)
return;
impl_->SetBlameContext(blame_context);
}
void TaskQueue::InsertFence(InsertFencePosition position) {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
if (!impl_)
return;
impl_->InsertFence(position);
}
void TaskQueue::InsertFenceAt(TimeTicks time) {
impl_->InsertFenceAt(time);
}
void TaskQueue::RemoveFence() {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
if (!impl_)
return;
impl_->RemoveFence();
}
bool TaskQueue::HasActiveFence() {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
if (!impl_)
return false;
return impl_->HasActiveFence();
}
bool TaskQueue::BlockedByFence() const {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
if (!impl_)
return false;
return impl_->BlockedByFence();
}
EnqueueOrder TaskQueue::GetEnqueueOrderAtWhichWeBecameUnblocked() const {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
if (!impl_)
return EnqueueOrder();
return impl_->GetEnqueueOrderAtWhichWeBecameUnblocked();
}
const char* TaskQueue::GetName() const {
return name_;
}
void TaskQueue::SetObserver(Observer* observer) {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
if (!impl_)
return;
// Observer is guaranteed to outlive TaskQueue and TaskQueueImpl lifecycle is
// controlled by |this|.
impl_->SetObserver(observer);
}
void TaskQueue::SetShouldReportPostedTasksWhenDisabled(bool should_report) {
impl_->SetShouldReportPostedTasksWhenDisabled(should_report);
}
bool TaskQueue::IsOnMainThread() const {
return associated_thread_->IsBoundToCurrentThread();
}
std::unique_ptr<internal::TaskQueueImpl> TaskQueue::TakeTaskQueueImpl() {
base::internal::CheckedAutoLock lock(impl_lock_);
DCHECK(impl_);
return std::move(impl_);
}
} // namespace sequence_manager
} // namespace base

View file

@ -0,0 +1,397 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TASK_SEQUENCE_MANAGER_TASK_QUEUE_H_
#define BASE_TASK_SEQUENCE_MANAGER_TASK_QUEUE_H_
#include <memory>
#include "base/macros.h"
#include "base/memory/weak_ptr.h"
#include "base/optional.h"
#include "base/single_thread_task_runner.h"
#include "base/task/common/checked_lock.h"
#include "base/task/sequence_manager/lazy_now.h"
#include "base/task/sequence_manager/tasks.h"
#include "base/task/task_observer.h"
#include "base/threading/platform_thread.h"
#include "base/time/time.h"
namespace base {
class TaskObserver;
namespace trace_event {
class BlameContext;
}
namespace sequence_manager {
namespace internal {
class AssociatedThreadId;
class SequenceManagerImpl;
class TaskQueueImpl;
} // namespace internal
class TimeDomain;
// TODO(kraynov): Make TaskQueue to actually be an interface for TaskQueueImpl
// and stop using ref-counting because we're no longer tied to task runner
// lifecycle and there's no other need for ref-counting either.
// NOTE: When TaskQueue gets automatically deleted on zero ref-count,
// TaskQueueImpl gets gracefully shutdown. It means that it doesn't get
// unregistered immediately and might accept some last minute tasks until
// SequenceManager will unregister it at some point. It's done to ensure that
// task queue always gets unregistered on the main thread.
class BASE_EXPORT TaskQueue : public RefCountedThreadSafe<TaskQueue> {
public:
class Observer {
public:
virtual ~Observer() = default;
// Notify observer that the time at which this queue wants to run
// the next task has changed. |next_wakeup| can be in the past
// (e.g. TimeTicks() can be used to notify about immediate work).
// Can be called on any thread
// All methods but SetObserver, SetTimeDomain and GetTimeDomain can be
// called on |queue|.
//
// TODO(altimin): Make it Optional<TimeTicks> to tell
// observer about cancellations.
virtual void OnQueueNextWakeUpChanged(TimeTicks next_wake_up) = 0;
};
// Shuts down the queue. All tasks currently queued will be discarded.
virtual void ShutdownTaskQueue();
// Shuts down the queue when there are no more tasks queued.
void ShutdownTaskQueueGracefully();
// TODO(scheduler-dev): Could we define a more clear list of priorities?
// See https://crbug.com/847858.
enum QueuePriority : uint8_t {
// Queues with control priority will run before any other queue, and will
// explicitly starve other queues. Typically this should only be used for
// private queues which perform control operations.
kControlPriority = 0,
// The selector will prioritize highest over high, normal and low; and
// high over normal and low; and normal over low. However it will ensure
// neither of the lower priority queues can be completely starved by higher
// priority tasks. All three of these queues will always take priority over
// and can starve the best effort queue.
kHighestPriority = 1,
kVeryHighPriority = 2,
kHighPriority = 3,
// Queues with normal priority are the default.
kNormalPriority = 4,
kLowPriority = 5,
// Queues with best effort priority will only be run if all other queues are
// empty. They can be starved by the other queues.
kBestEffortPriority = 6,
// Must be the last entry.
kQueuePriorityCount = 7,
kFirstQueuePriority = kControlPriority,
};
// Can be called on any thread.
static const char* PriorityToString(QueuePriority priority);
// Options for constructing a TaskQueue.
struct Spec {
explicit Spec(const char* name) : name(name) {}
Spec SetShouldMonitorQuiescence(bool should_monitor) {
should_monitor_quiescence = should_monitor;
return *this;
}
Spec SetShouldNotifyObservers(bool run_observers) {
should_notify_observers = run_observers;
return *this;
}
// Delayed fences require Now() to be sampled when posting immediate tasks
// which is not free.
Spec SetDelayedFencesAllowed(bool allow_delayed_fences) {
delayed_fence_allowed = allow_delayed_fences;
return *this;
}
Spec SetTimeDomain(TimeDomain* domain) {
time_domain = domain;
return *this;
}
const char* name;
bool should_monitor_quiescence = false;
TimeDomain* time_domain = nullptr;
bool should_notify_observers = true;
bool delayed_fence_allowed = false;
};
// TODO(altimin): Make this private after TaskQueue/TaskQueueImpl refactoring.
TaskQueue(std::unique_ptr<internal::TaskQueueImpl> impl,
const TaskQueue::Spec& spec);
// Information about task execution.
//
// Wall-time related methods (start_time, end_time, wall_duration) can be
// called only when |has_wall_time()| is true.
// Thread-time related mehtods (start_thread_time, end_thread_time,
// thread_duration) can be called only when |has_thread_time()| is true.
//
// start_* should be called after RecordTaskStart.
// end_* and *_duration should be called after RecordTaskEnd.
class BASE_EXPORT TaskTiming {
public:
enum class State { NotStarted, Running, Finished };
enum class TimeRecordingPolicy { DoRecord, DoNotRecord };
TaskTiming(bool has_wall_time, bool has_thread_time);
bool has_wall_time() const { return has_wall_time_; }
bool has_thread_time() const { return has_thread_time_; }
base::TimeTicks start_time() const {
DCHECK(has_wall_time());
return start_time_;
}
base::TimeTicks end_time() const {
DCHECK(has_wall_time());
return end_time_;
}
base::TimeDelta wall_duration() const {
DCHECK(has_wall_time());
return end_time_ - start_time_;
}
base::ThreadTicks start_thread_time() const {
DCHECK(has_thread_time());
return start_thread_time_;
}
base::ThreadTicks end_thread_time() const {
DCHECK(has_thread_time());
return end_thread_time_;
}
base::TimeDelta thread_duration() const {
DCHECK(has_thread_time());
return end_thread_time_ - start_thread_time_;
}
State state() const { return state_; }
void RecordTaskStart(LazyNow* now);
void RecordTaskEnd(LazyNow* now);
// Protected for tests.
protected:
State state_ = State::NotStarted;
bool has_wall_time_;
bool has_thread_time_;
base::TimeTicks start_time_;
base::TimeTicks end_time_;
base::ThreadTicks start_thread_time_;
base::ThreadTicks end_thread_time_;
};
// An interface that lets the owner vote on whether or not the associated
// TaskQueue should be enabled.
class BASE_EXPORT QueueEnabledVoter {
public:
~QueueEnabledVoter();
QueueEnabledVoter(const QueueEnabledVoter&) = delete;
const QueueEnabledVoter& operator=(const QueueEnabledVoter&) = delete;
// Votes to enable or disable the associated TaskQueue. The TaskQueue will
// only be enabled if all the voters agree it should be enabled, or if there
// are no voters.
// NOTE this must be called on the thread the associated TaskQueue was
// created on.
void SetVoteToEnable(bool enabled);
bool IsVotingToEnable() const { return enabled_; }
private:
friend class TaskQueue;
explicit QueueEnabledVoter(scoped_refptr<TaskQueue> task_queue);
scoped_refptr<TaskQueue> const task_queue_;
bool enabled_;
};
// Returns an interface that allows the caller to vote on whether or not this
// TaskQueue is enabled. The TaskQueue will be enabled if there are no voters
// or if all agree it should be enabled.
// NOTE this must be called on the thread this TaskQueue was created by.
std::unique_ptr<QueueEnabledVoter> CreateQueueEnabledVoter();
// NOTE this must be called on the thread this TaskQueue was created by.
bool IsQueueEnabled() const;
// Returns true if the queue is completely empty.
bool IsEmpty() const;
// Returns the number of pending tasks in the queue.
size_t GetNumberOfPendingTasks() const;
// Returns true if the queue has work that's ready to execute now.
// NOTE: this must be called on the thread this TaskQueue was created by.
bool HasTaskToRunImmediately() const;
// Returns requested run time of next scheduled wake-up for a delayed task
// which is not ready to run. If there are no such tasks (immediate tasks
// don't count) or the queue is disabled it returns nullopt.
// NOTE: this must be called on the thread this TaskQueue was created by.
Optional<TimeTicks> GetNextScheduledWakeUp();
// Can be called on any thread.
virtual const char* GetName() const;
// Set the priority of the queue to |priority|. NOTE this must be called on
// the thread this TaskQueue was created by.
void SetQueuePriority(QueuePriority priority);
// Returns the current queue priority.
QueuePriority GetQueuePriority() const;
// These functions can only be called on the same thread that the task queue
// manager executes its tasks on.
void AddTaskObserver(TaskObserver* task_observer);
void RemoveTaskObserver(TaskObserver* task_observer);
// Set the blame context which is entered and left while executing tasks from
// this task queue. |blame_context| must be null or outlive this task queue.
// Must be called on the thread this TaskQueue was created by.
void SetBlameContext(trace_event::BlameContext* blame_context);
// Removes the task queue from the previous TimeDomain and adds it to
// |domain|. This is a moderately expensive operation.
void SetTimeDomain(TimeDomain* domain);
// Returns the queue's current TimeDomain. Can be called from any thread.
TimeDomain* GetTimeDomain() const;
enum class InsertFencePosition {
kNow, // Tasks posted on the queue up till this point further may run.
// All further tasks are blocked.
kBeginningOfTime, // No tasks posted on this queue may run.
};
// Inserts a barrier into the task queue which prevents tasks with an enqueue
// order greater than the fence from running until either the fence has been
// removed or a subsequent fence has unblocked some tasks within the queue.
// Note: delayed tasks get their enqueue order set once their delay has
// expired, and non-delayed tasks get their enqueue order set when posted.
//
// Fences come in three flavours:
// - Regular (InsertFence(NOW)) - all tasks posted after this moment
// are blocked.
// - Fully blocking (InsertFence(kBeginningOfTime)) - all tasks including
// already posted are blocked.
// - Delayed (InsertFenceAt(timestamp)) - blocks all tasks posted after given
// point in time (must be in the future).
//
// Only one fence can be scheduled at a time. Inserting a new fence
// will automatically remove the previous one, regardless of fence type.
void InsertFence(InsertFencePosition position);
// Delayed fences are only allowed for queues created with
// SetDelayedFencesAllowed(true) because this feature implies sampling Now()
// (which isn't free) for every PostTask, even those with zero delay.
void InsertFenceAt(TimeTicks time);
// Removes any previously added fence and unblocks execution of any tasks
// blocked by it.
void RemoveFence();
// Returns true if the queue has a fence but it isn't necessarily blocking
// execution of tasks (it may be the case if tasks enqueue order hasn't
// reached the number set for a fence).
bool HasActiveFence();
// Returns true if the queue has a fence which is blocking execution of tasks.
bool BlockedByFence() const;
// Returns an EnqueueOrder generated at the last transition to unblocked. A
// queue is unblocked when it is enabled and no fence prevents the front task
// from running. If the EnqueueOrder of a task is greater than this when it
// starts running, it means that is was never blocked.
EnqueueOrder GetEnqueueOrderAtWhichWeBecameUnblocked() const;
void SetObserver(Observer* observer);
// Controls whether or not the queue will emit traces events when tasks are
// posted to it while disabled. This only applies for the current or next
// period during which the queue is disabled. When the queue is re-enabled
// this will revert back to the default value of false.
void SetShouldReportPostedTasksWhenDisabled(bool should_report);
// Create a task runner for this TaskQueue which will annotate all
// posted tasks with the given task type.
// May be called on any thread.
// NOTE: Task runners don't hold a reference to a TaskQueue, hence,
// it's required to retain that reference to prevent automatic graceful
// shutdown. Unique ownership of task queues will fix this issue soon.
scoped_refptr<SingleThreadTaskRunner> CreateTaskRunner(TaskType task_type);
// Default task runner which doesn't annotate tasks with a task type.
scoped_refptr<SingleThreadTaskRunner> task_runner() const {
return default_task_runner_;
}
protected:
virtual ~TaskQueue();
internal::TaskQueueImpl* GetTaskQueueImpl() const { return impl_.get(); }
private:
friend class RefCountedThreadSafe<TaskQueue>;
friend class internal::SequenceManagerImpl;
friend class internal::TaskQueueImpl;
void AddQueueEnabledVoter(bool voter_is_enabled);
void RemoveQueueEnabledVoter(bool voter_is_enabled);
bool AreAllQueueEnabledVotersEnabled() const;
void OnQueueEnabledVoteChanged(bool enabled);
bool IsOnMainThread() const;
// TaskQueue has ownership of an underlying implementation but in certain
// cases (e.g. detached frames) their lifetime may diverge.
// This method should be used to take away the impl for graceful shutdown.
// TaskQueue will disregard any calls or posting tasks thereafter.
std::unique_ptr<internal::TaskQueueImpl> TakeTaskQueueImpl();
// |impl_| can be written to on the main thread but can be read from
// any thread.
// |impl_lock_| must be acquired when writing to |impl_| or when accessing
// it from non-main thread. Reading from the main thread does not require
// a lock.
mutable base::internal::CheckedLock impl_lock_{
base::internal::UniversalPredecessor{}};
std::unique_ptr<internal::TaskQueueImpl> impl_;
const WeakPtr<internal::SequenceManagerImpl> sequence_manager_;
scoped_refptr<internal::AssociatedThreadId> associated_thread_;
scoped_refptr<SingleThreadTaskRunner> default_task_runner_;
int enabled_voter_count_ = 0;
int voter_count_ = 0;
const char* name_;
DISALLOW_COPY_AND_ASSIGN(TaskQueue);
};
} // namespace sequence_manager
} // namespace base
#endif // BASE_TASK_SEQUENCE_MANAGER_TASK_QUEUE_H_

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,558 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TASK_SEQUENCE_MANAGER_TASK_QUEUE_IMPL_H_
#define BASE_TASK_SEQUENCE_MANAGER_TASK_QUEUE_IMPL_H_
#include <stddef.h>
#include <memory>
#include <queue>
#include <set>
#include "base/callback.h"
#include "base/macros.h"
#include "base/memory/weak_ptr.h"
#include "base/pending_task.h"
#include "base/task/common/checked_lock.h"
#include "base/task/common/intrusive_heap.h"
#include "base/task/common/operations_controller.h"
#include "base/task/sequence_manager/associated_thread_id.h"
#include "base/task/sequence_manager/atomic_flag_set.h"
#include "base/task/sequence_manager/enqueue_order.h"
#include "base/task/sequence_manager/lazily_deallocated_deque.h"
#include "base/task/sequence_manager/sequenced_task_source.h"
#include "base/task/sequence_manager/task_queue.h"
#include "base/threading/thread_checker.h"
#include "base/trace_event/trace_event.h"
#include "base/trace_event/traced_value.h"
namespace base {
namespace sequence_manager {
class LazyNow;
class TimeDomain;
namespace internal {
class SequenceManagerImpl;
class WorkQueue;
class WorkQueueSets;
// TaskQueueImpl has four main queues:
//
// Immediate (non-delayed) tasks:
// |immediate_incoming_queue| - PostTask enqueues tasks here.
// |immediate_work_queue| - SequenceManager takes immediate tasks here.
//
// Delayed tasks
// |delayed_incoming_queue| - PostDelayedTask enqueues tasks here.
// |delayed_work_queue| - SequenceManager takes delayed tasks here.
//
// The |immediate_incoming_queue| can be accessed from any thread, the other
// queues are main-thread only. To reduce the overhead of locking,
// |immediate_work_queue| is swapped with |immediate_incoming_queue| when
// |immediate_work_queue| becomes empty.
//
// Delayed tasks are initially posted to |delayed_incoming_queue| and a wake-up
// is scheduled with the TimeDomain. When the delay has elapsed, the TimeDomain
// calls UpdateDelayedWorkQueue and ready delayed tasks are moved into the
// |delayed_work_queue|. Note the EnqueueOrder (used for ordering) for a delayed
// task is not set until it's moved into the |delayed_work_queue|.
//
// TaskQueueImpl uses the WorkQueueSets and the TaskQueueSelector to implement
// prioritization. Task selection is done by the TaskQueueSelector and when a
// queue is selected, it round-robins between the |immediate_work_queue| and
// |delayed_work_queue|. The reason for this is we want to make sure delayed
// tasks (normally the most common type) don't starve out immediate work.
class BASE_EXPORT TaskQueueImpl {
public:
TaskQueueImpl(SequenceManagerImpl* sequence_manager,
TimeDomain* time_domain,
const TaskQueue::Spec& spec);
~TaskQueueImpl();
// Types of queues TaskQueueImpl is maintaining internally.
enum class WorkQueueType { kImmediate, kDelayed };
// Some methods have fast paths when on the main thread.
enum class CurrentThread { kMainThread, kNotMainThread };
// Non-nestable tasks may get deferred but such queue is being maintained on
// SequenceManager side, so we need to keep information how to requeue it.
struct DeferredNonNestableTask {
Task task;
internal::TaskQueueImpl* task_queue;
WorkQueueType work_queue_type;
};
using OnNextWakeUpChangedCallback = RepeatingCallback<void(TimeTicks)>;
using OnTaskReadyHandler = RepeatingCallback<void(const Task&, LazyNow*)>;
using OnTaskStartedHandler =
RepeatingCallback<void(const Task&, const TaskQueue::TaskTiming&)>;
using OnTaskCompletedHandler =
RepeatingCallback<void(const Task&, TaskQueue::TaskTiming*, LazyNow*)>;
// May be called from any thread.
scoped_refptr<SingleThreadTaskRunner> CreateTaskRunner(
TaskType task_type) const;
// TaskQueue implementation.
const char* GetName() const;
bool IsQueueEnabled() const;
void SetQueueEnabled(bool enabled);
void SetShouldReportPostedTasksWhenDisabled(bool should_report);
bool IsEmpty() const;
size_t GetNumberOfPendingTasks() const;
bool HasTaskToRunImmediately() const;
Optional<TimeTicks> GetNextScheduledWakeUp();
Optional<DelayedWakeUp> GetNextScheduledWakeUpImpl();
void SetQueuePriority(TaskQueue::QueuePriority priority);
TaskQueue::QueuePriority GetQueuePriority() const;
void AddTaskObserver(TaskObserver* task_observer);
void RemoveTaskObserver(TaskObserver* task_observer);
void SetTimeDomain(TimeDomain* time_domain);
TimeDomain* GetTimeDomain() const;
void SetBlameContext(trace_event::BlameContext* blame_context);
void InsertFence(TaskQueue::InsertFencePosition position);
void InsertFenceAt(TimeTicks time);
void RemoveFence();
bool HasActiveFence();
bool BlockedByFence() const;
EnqueueOrder GetEnqueueOrderAtWhichWeBecameUnblocked() const;
// Implementation of TaskQueue::SetObserver.
void SetObserver(TaskQueue::Observer* observer);
void UnregisterTaskQueue();
// Returns true if a (potentially hypothetical) task with the specified
// |enqueue_order| could run on the queue. Must be called from the main
// thread.
bool CouldTaskRun(EnqueueOrder enqueue_order) const;
// Returns true if a task with |enqueue_order| obtained from this queue was
// ever in the queue while it was disabled, blocked by a fence, or less
// important than kNormalPriority.
bool WasBlockedOrLowPriority(EnqueueOrder enqueue_order) const;
// Must only be called from the thread this task queue was created on.
void ReloadEmptyImmediateWorkQueue();
void AsValueInto(TimeTicks now,
trace_event::TracedValue* state,
bool force_verbose) const;
bool GetQuiescenceMonitored() const { return should_monitor_quiescence_; }
bool GetShouldNotifyObservers() const { return should_notify_observers_; }
void NotifyWillProcessTask(const Task& task,
bool was_blocked_or_low_priority);
void NotifyDidProcessTask(const Task& task);
// Check for available tasks in immediate work queues.
// Used to check if we need to generate notifications about delayed work.
bool HasPendingImmediateWork();
bool HasPendingImmediateWorkLocked()
EXCLUSIVE_LOCKS_REQUIRED(any_thread_lock_);
bool has_pending_high_resolution_tasks() const {
return main_thread_only()
.delayed_incoming_queue.has_pending_high_resolution_tasks();
}
WorkQueue* delayed_work_queue() {
return main_thread_only().delayed_work_queue.get();
}
const WorkQueue* delayed_work_queue() const {
return main_thread_only().delayed_work_queue.get();
}
WorkQueue* immediate_work_queue() {
return main_thread_only().immediate_work_queue.get();
}
const WorkQueue* immediate_work_queue() const {
return main_thread_only().immediate_work_queue.get();
}
// Enqueues any delayed tasks which should be run now on the
// |delayed_work_queue|. Must be called from the main thread.
void MoveReadyDelayedTasksToWorkQueue(LazyNow* lazy_now);
base::internal::HeapHandle heap_handle() const {
return main_thread_only().heap_handle;
}
void set_heap_handle(base::internal::HeapHandle heap_handle) {
main_thread_only().heap_handle = heap_handle;
}
// Pushes |task| onto the front of the specified work queue. Caution must be
// taken with this API because you could easily starve out other work.
// TODO(kraynov): Simplify non-nestable task logic https://crbug.com/845437.
void RequeueDeferredNonNestableTask(DeferredNonNestableTask task);
void PushImmediateIncomingTaskForTest(Task&& task);
// Iterates over |delayed_incoming_queue| removing canceled tasks. In
// addition MaybeShrinkQueue is called on all internal queues.
void ReclaimMemory(TimeTicks now);
// Registers a handler to invoke when a task posted to this TaskQueueImpl is
// ready. For a non-delayed task, this is when the task is posted. For a
// delayed task, this is when the delay expires.
void SetOnTaskReadyHandler(OnTaskReadyHandler handler);
// Allows wrapping TaskQueue to set a handler to subscribe for notifications
// about started and completed tasks.
void SetOnTaskStartedHandler(OnTaskStartedHandler handler);
void OnTaskStarted(const Task& task,
const TaskQueue::TaskTiming& task_timing);
// |task_timing| may be passed in Running state and may not have the end time,
// so that the handler can run an additional task that is counted as a part of
// the main task.
// The handler can call TaskTiming::RecordTaskEnd, which is optional, to
// finalize the task, and use the resulting timing.
void SetOnTaskCompletedHandler(OnTaskCompletedHandler handler);
void OnTaskCompleted(const Task& task,
TaskQueue::TaskTiming* task_timing,
LazyNow* lazy_now);
bool RequiresTaskTiming() const;
WeakPtr<SequenceManagerImpl> GetSequenceManagerWeakPtr();
SequenceManagerImpl* sequence_manager() const { return sequence_manager_; }
// Returns true if this queue is unregistered or task queue manager is deleted
// and this queue can be safely deleted on any thread.
bool IsUnregistered() const;
// Delete all tasks within this TaskQueue.
void DeletePendingTasks();
// Whether this task queue owns any tasks. Task queue being disabled doesn't
// affect this.
bool HasTasks() const;
protected:
void SetDelayedWakeUpForTesting(Optional<DelayedWakeUp> wake_up);
private:
friend class WorkQueue;
friend class WorkQueueTest;
// A TaskQueueImpl instance can be destroyed or unregistered before all its
// associated TaskRunner instances are (they are refcounted). Thus we need a
// way to prevent TaskRunner instances from posting further tasks. This class
// guards PostTask calls using an OperationsController.
// This class is ref-counted as both the TaskQueueImpl instance and all
// associated TaskRunner instances share the same GuardedTaskPoster instance.
// When TaskQueueImpl shuts down it calls ShutdownAndWaitForZeroOperations(),
// preventing further PostTask calls being made to the underlying
// TaskQueueImpl.
class GuardedTaskPoster : public RefCountedThreadSafe<GuardedTaskPoster> {
public:
explicit GuardedTaskPoster(TaskQueueImpl* outer);
bool PostTask(PostedTask task);
void StartAcceptingOperations() {
operations_controller_.StartAcceptingOperations();
}
void ShutdownAndWaitForZeroOperations() {
operations_controller_.ShutdownAndWaitForZeroOperations();
}
private:
friend class RefCountedThreadSafe<GuardedTaskPoster>;
~GuardedTaskPoster();
base::internal::OperationsController operations_controller_;
// Pointer might be stale, access guarded by |operations_controller_|
TaskQueueImpl* const outer_;
};
class TaskRunner : public SingleThreadTaskRunner {
public:
explicit TaskRunner(scoped_refptr<GuardedTaskPoster> task_poster,
scoped_refptr<AssociatedThreadId> associated_thread,
TaskType task_type);
bool PostDelayedTask(const Location& location,
OnceClosure callback,
TimeDelta delay) final;
bool PostNonNestableDelayedTask(const Location& location,
OnceClosure callback,
TimeDelta delay) final;
bool RunsTasksInCurrentSequence() const final;
private:
~TaskRunner() final;
bool PostTask(PostedTask task) const;
const scoped_refptr<GuardedTaskPoster> task_poster_;
const scoped_refptr<AssociatedThreadId> associated_thread_;
const TaskType task_type_;
};
// A queue for holding delayed tasks before their delay has expired.
struct DelayedIncomingQueue {
public:
DelayedIncomingQueue();
~DelayedIncomingQueue();
void push(Task&& task);
void pop();
bool empty() const { return queue_.empty(); }
size_t size() const { return queue_.size(); }
const Task& top() const { return queue_.top(); }
void swap(DelayedIncomingQueue* other);
bool has_pending_high_resolution_tasks() const {
return pending_high_res_tasks_;
}
void SweepCancelledTasks();
std::priority_queue<Task> TakeTasks() { return std::move(queue_); }
void AsValueInto(TimeTicks now, trace_event::TracedValue* state) const;
private:
struct PQueue : public std::priority_queue<Task> {
// Expose the container and comparator.
using std::priority_queue<Task>::c;
using std::priority_queue<Task>::comp;
};
PQueue queue_;
// Number of pending tasks in the queue that need high resolution timing.
int pending_high_res_tasks_ = 0;
DISALLOW_COPY_AND_ASSIGN(DelayedIncomingQueue);
};
struct MainThreadOnly {
MainThreadOnly(TaskQueueImpl* task_queue, TimeDomain* time_domain);
~MainThreadOnly();
// Another copy of TimeDomain for lock-free access from the main thread.
// See description inside struct AnyThread for details.
TimeDomain* time_domain;
TaskQueue::Observer* task_queue_observer = nullptr;
std::unique_ptr<WorkQueue> delayed_work_queue;
std::unique_ptr<WorkQueue> immediate_work_queue;
DelayedIncomingQueue delayed_incoming_queue;
ObserverList<TaskObserver>::Unchecked task_observers;
base::internal::HeapHandle heap_handle;
bool is_enabled = true;
trace_event::BlameContext* blame_context = nullptr; // Not owned.
EnqueueOrder current_fence;
Optional<TimeTicks> delayed_fence;
// Snapshots the next sequence number when the queue is unblocked, otherwise
// it contains EnqueueOrder::none(). If the EnqueueOrder of a task just
// popped from this queue is greater than this, it means that the queue was
// never disabled or blocked by a fence while the task was queued.
EnqueueOrder enqueue_order_at_which_we_became_unblocked;
// If the EnqueueOrder of a task just popped from this queue is greater than
// this, it means that the queue was never disabled, blocked by a fence or
// less important than kNormalPriority while the task was queued.
//
// Implementation details:
// 1) When the queue is made less important than kNormalPriority, this is
// set to EnqueueOrder::max(). The EnqueueOrder of any task will compare
// less than this.
// 2) When the queue is made at least as important as kNormalPriority, this
// snapshots the next sequence number. If the queue is blocked, the value
// is irrelevant because no task should be popped. If the queue is not
// blocked, the EnqueueOrder of any already queued task will compare less
// than this.
// 3) When the queue is unblocked while at least as important as
// kNormalPriority, this snapshots the next sequence number. The
// EnqueueOrder of any already queued task will compare less than this.
EnqueueOrder
enqueue_order_at_which_we_became_unblocked_with_normal_priority;
OnTaskReadyHandler on_task_ready_handler;
OnTaskStartedHandler on_task_started_handler;
OnTaskCompletedHandler on_task_completed_handler;
// Last reported wake up, used only in UpdateWakeUp to avoid
// excessive calls.
Optional<DelayedWakeUp> scheduled_wake_up;
// If false, queue will be disabled. Used only for tests.
bool is_enabled_for_test = true;
// The time at which the task queue was disabled, if it is currently
// disabled.
Optional<TimeTicks> disabled_time;
// Whether or not the task queue should emit tracing events for tasks
// posted to this queue when it is disabled.
bool should_report_posted_tasks_when_disabled = false;
};
void PostTask(PostedTask task);
void PostImmediateTaskImpl(PostedTask task, CurrentThread current_thread);
void PostDelayedTaskImpl(PostedTask task, CurrentThread current_thread);
// Push the task onto the |delayed_incoming_queue|. Lock-free main thread
// only fast path.
void PushOntoDelayedIncomingQueueFromMainThread(Task pending_task,
TimeTicks now,
bool notify_task_annotator);
// Push the task onto the |delayed_incoming_queue|. Slow path from other
// threads.
void PushOntoDelayedIncomingQueue(Task pending_task);
void ScheduleDelayedWorkTask(Task pending_task);
void MoveReadyImmediateTasksToImmediateWorkQueueLocked()
EXCLUSIVE_LOCKS_REQUIRED(any_thread_lock_);
// LazilyDeallocatedDeque use TimeTicks to figure out when to resize. We
// should use real time here always.
using TaskDeque =
LazilyDeallocatedDeque<Task, subtle::TimeTicksNowIgnoringOverride>;
// Extracts all the tasks from the immediate incoming queue and swaps it with
// |queue| which must be empty.
// Can be called from any thread.
void TakeImmediateIncomingQueueTasks(TaskDeque* queue);
void TraceQueueSize() const;
static void QueueAsValueInto(const TaskDeque& queue,
TimeTicks now,
trace_event::TracedValue* state);
static void QueueAsValueInto(const std::priority_queue<Task>& queue,
TimeTicks now,
trace_event::TracedValue* state);
static void TaskAsValueInto(const Task& task,
TimeTicks now,
trace_event::TracedValue* state);
// Schedules delayed work on time domain and calls the observer.
void UpdateDelayedWakeUp(LazyNow* lazy_now);
void UpdateDelayedWakeUpImpl(LazyNow* lazy_now,
Optional<DelayedWakeUp> wake_up);
// Activate a delayed fence if a time has come.
void ActivateDelayedFenceIfNeeded(TimeTicks now);
// Updates state protected by any_thread_lock_.
void UpdateCrossThreadQueueStateLocked()
EXCLUSIVE_LOCKS_REQUIRED(any_thread_lock_);
void MaybeLogPostTask(PostedTask* task);
void MaybeAdjustTaskDelay(PostedTask* task, CurrentThread current_thread);
// Reports the task if it was due to IPC and was posted to a disabled queue.
// This should be called after WillQueueTask has been called for the task.
void MaybeReportIpcTaskQueuedFromMainThread(Task* pending_task,
const char* task_queue_name);
bool ShouldReportIpcTaskQueuedFromAnyThreadLocked(
base::TimeDelta* time_since_disabled)
EXCLUSIVE_LOCKS_REQUIRED(any_thread_lock_);
void MaybeReportIpcTaskQueuedFromAnyThreadLocked(Task* pending_task,
const char* task_queue_name)
EXCLUSIVE_LOCKS_REQUIRED(any_thread_lock_);
void MaybeReportIpcTaskQueuedFromAnyThreadUnlocked(
Task* pending_task,
const char* task_queue_name);
void ReportIpcTaskQueued(Task* pending_task,
const char* task_queue_name,
const base::TimeDelta& time_since_disabled);
// Invoked when the queue becomes enabled and not blocked by a fence.
void OnQueueUnblocked();
const char* name_;
SequenceManagerImpl* const sequence_manager_;
scoped_refptr<AssociatedThreadId> associated_thread_;
const scoped_refptr<GuardedTaskPoster> task_poster_;
mutable base::internal::CheckedLock any_thread_lock_;
struct AnyThread {
// Mirrored from MainThreadOnly. These are only used for tracing.
struct TracingOnly {
TracingOnly();
~TracingOnly();
bool is_enabled = true;
Optional<TimeTicks> disabled_time;
bool should_report_posted_tasks_when_disabled = false;
};
explicit AnyThread(TimeDomain* time_domain);
~AnyThread();
// TimeDomain is maintained in two copies: inside AnyThread and inside
// MainThreadOnly. It can be changed only from main thread, so it should be
// locked before accessing from other threads.
TimeDomain* time_domain;
TaskQueue::Observer* task_queue_observer = nullptr;
TaskDeque immediate_incoming_queue;
// True if main_thread_only().immediate_work_queue is empty.
bool immediate_work_queue_empty = true;
bool post_immediate_task_should_schedule_work = true;
bool unregistered = false;
OnTaskReadyHandler on_task_ready_handler;
#if DCHECK_IS_ON()
// A cache of |immediate_work_queue->work_queue_set_index()| which is used
// to index into
// SequenceManager::Settings::per_priority_cross_thread_task_delay to apply
// a priority specific delay for debugging purposes.
int queue_set_index = 0;
#endif
TracingOnly tracing_only;
};
AnyThread any_thread_ GUARDED_BY(any_thread_lock_);
MainThreadOnly main_thread_only_;
MainThreadOnly& main_thread_only() {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
return main_thread_only_;
}
const MainThreadOnly& main_thread_only() const {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
return main_thread_only_;
}
// Handle to our entry within the SequenceManagers |empty_queues_to_reload_|
// atomic flag set. Used to signal that this queue needs to be reloaded.
// If you call SetActive(false) you should do so inside |any_thread_lock_|
// because there is a danger a cross thread PostTask might reset it before we
// make |immediate_work_queue| non-empty.
AtomicFlagSet::AtomicFlag empty_queues_to_reload_handle_;
const bool should_monitor_quiescence_;
const bool should_notify_observers_;
const bool delayed_fence_allowed_;
DISALLOW_COPY_AND_ASSIGN(TaskQueueImpl);
};
} // namespace internal
} // namespace sequence_manager
} // namespace base
#endif // BASE_TASK_SEQUENCE_MANAGER_TASK_QUEUE_IMPL_H_

View file

@ -0,0 +1,252 @@
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/task/sequence_manager/task_queue_selector.h"
#include <utility>
#include "base/bits.h"
#include "base/logging.h"
#include "base/task/sequence_manager/associated_thread_id.h"
#include "base/task/sequence_manager/task_queue_impl.h"
#include "base/task/sequence_manager/work_queue.h"
#include "base/threading/thread_checker.h"
#include "base/trace_event/traced_value.h"
namespace base {
namespace sequence_manager {
namespace internal {
TaskQueueSelector::TaskQueueSelector(
scoped_refptr<AssociatedThreadId> associated_thread,
const SequenceManager::Settings& settings)
: associated_thread_(std::move(associated_thread)),
#if DCHECK_IS_ON()
random_task_selection_(settings.random_task_selection_seed != 0),
#endif
delayed_work_queue_sets_("delayed", this, settings),
immediate_work_queue_sets_("immediate", this, settings) {
}
TaskQueueSelector::~TaskQueueSelector() = default;
void TaskQueueSelector::AddQueue(internal::TaskQueueImpl* queue) {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
DCHECK(queue->IsQueueEnabled());
AddQueueImpl(queue, TaskQueue::kNormalPriority);
}
void TaskQueueSelector::RemoveQueue(internal::TaskQueueImpl* queue) {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
if (queue->IsQueueEnabled()) {
RemoveQueueImpl(queue);
}
}
void TaskQueueSelector::EnableQueue(internal::TaskQueueImpl* queue) {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
DCHECK(queue->IsQueueEnabled());
AddQueueImpl(queue, queue->GetQueuePriority());
if (task_queue_selector_observer_)
task_queue_selector_observer_->OnTaskQueueEnabled(queue);
}
void TaskQueueSelector::DisableQueue(internal::TaskQueueImpl* queue) {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
DCHECK(!queue->IsQueueEnabled());
RemoveQueueImpl(queue);
}
void TaskQueueSelector::SetQueuePriority(internal::TaskQueueImpl* queue,
TaskQueue::QueuePriority priority) {
DCHECK_LT(priority, TaskQueue::kQueuePriorityCount);
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
if (queue->IsQueueEnabled()) {
ChangeSetIndex(queue, priority);
} else {
// Disabled queue is not in any set so we can't use ChangeSetIndex here
// and have to assign priority for the queue itself.
queue->delayed_work_queue()->AssignSetIndex(priority);
queue->immediate_work_queue()->AssignSetIndex(priority);
}
DCHECK_EQ(priority, queue->GetQueuePriority());
}
TaskQueue::QueuePriority TaskQueueSelector::NextPriority(
TaskQueue::QueuePriority priority) {
DCHECK(priority < TaskQueue::kQueuePriorityCount);
return static_cast<TaskQueue::QueuePriority>(static_cast<int>(priority) + 1);
}
void TaskQueueSelector::AddQueueImpl(internal::TaskQueueImpl* queue,
TaskQueue::QueuePriority priority) {
#if DCHECK_IS_ON()
DCHECK(!CheckContainsQueueForTest(queue));
#endif
delayed_work_queue_sets_.AddQueue(queue->delayed_work_queue(), priority);
immediate_work_queue_sets_.AddQueue(queue->immediate_work_queue(), priority);
#if DCHECK_IS_ON()
DCHECK(CheckContainsQueueForTest(queue));
#endif
}
void TaskQueueSelector::ChangeSetIndex(internal::TaskQueueImpl* queue,
TaskQueue::QueuePriority priority) {
#if DCHECK_IS_ON()
DCHECK(CheckContainsQueueForTest(queue));
#endif
delayed_work_queue_sets_.ChangeSetIndex(queue->delayed_work_queue(),
priority);
immediate_work_queue_sets_.ChangeSetIndex(queue->immediate_work_queue(),
priority);
#if DCHECK_IS_ON()
DCHECK(CheckContainsQueueForTest(queue));
#endif
}
void TaskQueueSelector::RemoveQueueImpl(internal::TaskQueueImpl* queue) {
#if DCHECK_IS_ON()
DCHECK(CheckContainsQueueForTest(queue));
#endif
delayed_work_queue_sets_.RemoveQueue(queue->delayed_work_queue());
immediate_work_queue_sets_.RemoveQueue(queue->immediate_work_queue());
#if DCHECK_IS_ON()
DCHECK(!CheckContainsQueueForTest(queue));
#endif
}
void TaskQueueSelector::WorkQueueSetBecameEmpty(size_t set_index) {
non_empty_set_counts_[set_index]--;
DCHECK_GE(non_empty_set_counts_[set_index], 0);
// There are no delayed or immediate tasks for |set_index| so remove from
// |active_priority_tracker_|.
if (non_empty_set_counts_[set_index] == 0) {
active_priority_tracker_.SetActive(
static_cast<TaskQueue::QueuePriority>(set_index), false);
}
}
void TaskQueueSelector::WorkQueueSetBecameNonEmpty(size_t set_index) {
non_empty_set_counts_[set_index]++;
DCHECK_LE(non_empty_set_counts_[set_index], kMaxNonEmptySetCount);
// There is now a delayed or an immediate task for |set_index|, so add to
// |active_priority_tracker_|.
if (non_empty_set_counts_[set_index] == 1) {
TaskQueue::QueuePriority priority =
static_cast<TaskQueue::QueuePriority>(set_index);
active_priority_tracker_.SetActive(priority, true);
}
}
void TaskQueueSelector::CollectSkippedOverLowerPriorityTasks(
const internal::WorkQueue* selected_work_queue,
std::vector<const Task*>* result) const {
delayed_work_queue_sets_.CollectSkippedOverLowerPriorityTasks(
selected_work_queue, result);
immediate_work_queue_sets_.CollectSkippedOverLowerPriorityTasks(
selected_work_queue, result);
}
#if DCHECK_IS_ON() || !defined(NDEBUG)
bool TaskQueueSelector::CheckContainsQueueForTest(
const internal::TaskQueueImpl* queue) const {
bool contains_delayed_work_queue =
delayed_work_queue_sets_.ContainsWorkQueueForTest(
queue->delayed_work_queue());
bool contains_immediate_work_queue =
immediate_work_queue_sets_.ContainsWorkQueueForTest(
queue->immediate_work_queue());
DCHECK_EQ(contains_delayed_work_queue, contains_immediate_work_queue);
return contains_delayed_work_queue;
}
#endif
WorkQueue* TaskQueueSelector::SelectWorkQueueToService() {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
if (!active_priority_tracker_.HasActivePriority())
return nullptr;
// Select the priority from which we will select a task. Usually this is
// the highest priority for which we have work, unless we are starving a lower
// priority.
TaskQueue::QueuePriority priority =
active_priority_tracker_.HighestActivePriority();
WorkQueue* queue =
#if DCHECK_IS_ON()
random_task_selection_ ? ChooseWithPriority<SetOperationRandom>(priority)
:
#endif
ChooseWithPriority<SetOperationOldest>(priority);
// If we have selected a delayed task while having an immediate task of the
// same priority, increase the starvation count.
if (queue->queue_type() == WorkQueue::QueueType::kDelayed &&
!immediate_work_queue_sets_.IsSetEmpty(priority)) {
immediate_starvation_count_++;
} else {
immediate_starvation_count_ = 0;
}
return queue;
}
void TaskQueueSelector::AsValueInto(trace_event::TracedValue* state) const {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
state->SetInteger("immediate_starvation_count", immediate_starvation_count_);
}
void TaskQueueSelector::SetTaskQueueSelectorObserver(Observer* observer) {
task_queue_selector_observer_ = observer;
}
Optional<TaskQueue::QueuePriority>
TaskQueueSelector::GetHighestPendingPriority() const {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
if (!active_priority_tracker_.HasActivePriority())
return nullopt;
return active_priority_tracker_.HighestActivePriority();
}
void TaskQueueSelector::SetImmediateStarvationCountForTest(
size_t immediate_starvation_count) {
immediate_starvation_count_ = immediate_starvation_count;
}
bool TaskQueueSelector::HasTasksWithPriority(
TaskQueue::QueuePriority priority) {
return !delayed_work_queue_sets_.IsSetEmpty(priority) ||
!immediate_work_queue_sets_.IsSetEmpty(priority);
}
TaskQueueSelector::ActivePriorityTracker::ActivePriorityTracker() = default;
void TaskQueueSelector::ActivePriorityTracker::SetActive(
TaskQueue::QueuePriority priority,
bool is_active) {
DCHECK_LT(priority, TaskQueue::QueuePriority::kQueuePriorityCount);
DCHECK_NE(IsActive(priority), is_active);
if (is_active) {
active_priorities_ |= (1u << static_cast<size_t>(priority));
} else {
active_priorities_ &= ~(1u << static_cast<size_t>(priority));
}
}
TaskQueue::QueuePriority
TaskQueueSelector::ActivePriorityTracker::HighestActivePriority() const {
DCHECK_NE(active_priorities_, 0u)
<< "CountTrailingZeroBits(0) has undefined behavior";
return static_cast<TaskQueue::QueuePriority>(
bits::CountTrailingZeroBits(active_priorities_));
}
} // namespace internal
} // namespace sequence_manager
} // namespace base

View file

@ -0,0 +1,251 @@
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TASK_SEQUENCE_MANAGER_TASK_QUEUE_SELECTOR_H_
#define BASE_TASK_SEQUENCE_MANAGER_TASK_QUEUE_SELECTOR_H_
#include <stddef.h>
#include "base/base_export.h"
#include "base/macros.h"
#include "base/pending_task.h"
#include "base/task/sequence_manager/sequence_manager.h"
#include "base/task/sequence_manager/task_queue_selector_logic.h"
#include "base/task/sequence_manager/work_queue_sets.h"
namespace base {
namespace sequence_manager {
namespace internal {
class AssociatedThreadId;
// TaskQueueSelector is used by the SchedulerHelper to enable prioritization
// of particular task queues.
class BASE_EXPORT TaskQueueSelector : public WorkQueueSets::Observer {
public:
TaskQueueSelector(scoped_refptr<AssociatedThreadId> associated_thread,
const SequenceManager::Settings& settings);
~TaskQueueSelector() override;
// Called to register a queue that can be selected. This function is called
// on the main thread.
void AddQueue(internal::TaskQueueImpl* queue);
// The specified work will no longer be considered for selection. This
// function is called on the main thread.
void RemoveQueue(internal::TaskQueueImpl* queue);
// Make |queue| eligible for selection. This function is called on the main
// thread. Must only be called if |queue| is disabled.
void EnableQueue(internal::TaskQueueImpl* queue);
// Disable selection from |queue|. Must only be called if |queue| is enabled.
void DisableQueue(internal::TaskQueueImpl* queue);
// Called get or set the priority of |queue|.
void SetQueuePriority(internal::TaskQueueImpl* queue,
TaskQueue::QueuePriority priority);
// Called to choose the work queue from which the next task should be taken
// and run. Return the queue to service if there is one or null otherwise.
// This function is called on the main thread.
WorkQueue* SelectWorkQueueToService();
// Serialize the selector state for tracing.
void AsValueInto(trace_event::TracedValue* state) const;
class BASE_EXPORT Observer {
public:
virtual ~Observer() = default;
// Called when |queue| transitions from disabled to enabled.
virtual void OnTaskQueueEnabled(internal::TaskQueueImpl* queue) = 0;
};
// Called once to set the Observer. This function is called
// on the main thread. If |observer| is null, then no callbacks will occur.
void SetTaskQueueSelectorObserver(Observer* observer);
// Returns the priority of the most important pending task if one exists.
// O(1).
Optional<TaskQueue::QueuePriority> GetHighestPendingPriority() const;
// WorkQueueSets::Observer implementation:
void WorkQueueSetBecameEmpty(size_t set_index) override;
void WorkQueueSetBecameNonEmpty(size_t set_index) override;
// Populates |result| with tasks with lower priority than the first task from
// |selected_work_queue| which could otherwise run now.
void CollectSkippedOverLowerPriorityTasks(
const internal::WorkQueue* selected_work_queue,
std::vector<const Task*>* result) const;
protected:
WorkQueueSets* delayed_work_queue_sets() { return &delayed_work_queue_sets_; }
WorkQueueSets* immediate_work_queue_sets() {
return &immediate_work_queue_sets_;
}
// This method will force select an immediate task if those are being
// starved by delayed tasks.
void SetImmediateStarvationCountForTest(size_t immediate_starvation_count);
// Maximum number of delayed tasks tasks which can be run while there's a
// waiting non-delayed task.
static const size_t kMaxDelayedStarvationTasks = 3;
// Tracks which priorities are currently active, meaning there are pending
// runnable tasks with that priority. Because there are only a handful of
// priorities, and because we always run tasks in order from highest to lowest
// priority, we can use a single integer to represent enabled priorities,
// using a bit per priority.
class BASE_EXPORT ActivePriorityTracker {
public:
ActivePriorityTracker();
bool HasActivePriority() const { return active_priorities_ != 0; }
bool IsActive(TaskQueue::QueuePriority priority) const {
return active_priorities_ & (1u << static_cast<size_t>(priority));
}
void SetActive(TaskQueue::QueuePriority priority, bool is_active);
TaskQueue::QueuePriority HighestActivePriority() const;
private:
static_assert(TaskQueue::QueuePriority::kQueuePriorityCount <
sizeof(size_t) * 8,
"The number of priorities must be strictly less than the "
"number of bits of |active_priorities_|!");
size_t active_priorities_ = 0;
};
/*
* SetOperation is used to configure ChooseWithPriority() and must have:
*
* static WorkQueue* GetWithPriority(const WorkQueueSets& sets,
* TaskQueue::QueuePriority priority);
*
* static WorkQueue* GetWithPriorityAndEnqueueOrder(
* const WorkQueueSets& sets,
* TaskQueue::QueuePriority priority
* EnqueueOrder* enqueue_order);
*/
// The default
struct SetOperationOldest {
static WorkQueue* GetWithPriority(const WorkQueueSets& sets,
TaskQueue::QueuePriority priority) {
return sets.GetOldestQueueInSet(priority);
}
static WorkQueue* GetWithPriorityAndEnqueueOrder(
const WorkQueueSets& sets,
TaskQueue::QueuePriority priority,
EnqueueOrder* enqueue_order) {
return sets.GetOldestQueueAndEnqueueOrderInSet(priority, enqueue_order);
}
};
#if DCHECK_IS_ON()
struct SetOperationRandom {
static WorkQueue* GetWithPriority(const WorkQueueSets& sets,
TaskQueue::QueuePriority priority) {
return sets.GetRandomQueueInSet(priority);
}
static WorkQueue* GetWithPriorityAndEnqueueOrder(
const WorkQueueSets& sets,
TaskQueue::QueuePriority priority,
EnqueueOrder* enqueue_order) {
return sets.GetRandomQueueAndEnqueueOrderInSet(priority, enqueue_order);
}
};
#endif // DCHECK_IS_ON()
template <typename SetOperation>
WorkQueue* ChooseWithPriority(TaskQueue::QueuePriority priority) const {
// Select an immediate work queue if we are starving immediate tasks.
if (immediate_starvation_count_ >= kMaxDelayedStarvationTasks) {
WorkQueue* queue =
SetOperation::GetWithPriority(immediate_work_queue_sets_, priority);
if (queue)
return queue;
return SetOperation::GetWithPriority(delayed_work_queue_sets_, priority);
}
return ChooseImmediateOrDelayedTaskWithPriority<SetOperation>(priority);
}
private:
void ChangeSetIndex(internal::TaskQueueImpl* queue,
TaskQueue::QueuePriority priority);
void AddQueueImpl(internal::TaskQueueImpl* queue,
TaskQueue::QueuePriority priority);
void RemoveQueueImpl(internal::TaskQueueImpl* queue);
#if DCHECK_IS_ON() || !defined(NDEBUG)
bool CheckContainsQueueForTest(const internal::TaskQueueImpl* queue) const;
#endif
template <typename SetOperation>
WorkQueue* ChooseImmediateOrDelayedTaskWithPriority(
TaskQueue::QueuePriority priority) const {
EnqueueOrder immediate_enqueue_order;
WorkQueue* immediate_queue = SetOperation::GetWithPriorityAndEnqueueOrder(
immediate_work_queue_sets_, priority, &immediate_enqueue_order);
if (immediate_queue) {
EnqueueOrder delayed_enqueue_order;
WorkQueue* delayed_queue = SetOperation::GetWithPriorityAndEnqueueOrder(
delayed_work_queue_sets_, priority, &delayed_enqueue_order);
if (!delayed_queue)
return immediate_queue;
if (immediate_enqueue_order < delayed_enqueue_order) {
return immediate_queue;
} else {
return delayed_queue;
}
}
return SetOperation::GetWithPriority(delayed_work_queue_sets_, priority);
}
// Returns the priority which is next after |priority|.
static TaskQueue::QueuePriority NextPriority(
TaskQueue::QueuePriority priority);
// Returns true if there are pending tasks with priority |priority|.
bool HasTasksWithPriority(TaskQueue::QueuePriority priority);
scoped_refptr<AssociatedThreadId> associated_thread_;
#if DCHECK_IS_ON()
const bool random_task_selection_ = false;
#endif
// Count of the number of sets (delayed or immediate) for each priority.
// Should only contain 0, 1 or 2.
std::array<int, TaskQueue::kQueuePriorityCount> non_empty_set_counts_ = {{0}};
static constexpr const int kMaxNonEmptySetCount = 2;
// List of active priorities, which is used to work out which priority to run
// next.
ActivePriorityTracker active_priority_tracker_;
WorkQueueSets delayed_work_queue_sets_;
WorkQueueSets immediate_work_queue_sets_;
size_t immediate_starvation_count_ = 0;
Observer* task_queue_selector_observer_ = nullptr; // Not owned.
DISALLOW_COPY_AND_ASSIGN(TaskQueueSelector);
};
} // namespace internal
} // namespace sequence_manager
} // namespace base
#endif // BASE_TASK_SEQUENCE_MANAGER_TASK_QUEUE_SELECTOR_H_

View file

@ -0,0 +1,37 @@
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TASK_SEQUENCE_MANAGER_TASK_QUEUE_SELECTOR_LOGIC_H_
#define BASE_TASK_SEQUENCE_MANAGER_TASK_QUEUE_SELECTOR_LOGIC_H_
namespace base {
namespace sequence_manager {
namespace internal {
// Used to describe the logic trigerred when a task queue is selected to
// service.
// This enum is used for histograms and should not be renumbered.
enum class TaskQueueSelectorLogic {
// Selected due to priority rules.
kControlPriorityLogic = 0,
kHighestPriorityLogic = 1,
kHighPriorityLogic = 2,
kNormalPriorityLogic = 3,
kLowPriorityLogic = 4,
kBestEffortPriorityLogic = 5,
// Selected due to starvation logic.
kHighPriorityStarvationLogic = 6,
kNormalPriorityStarvationLogic = 7,
kLowPriorityStarvationLogic = 8,
kCount = 9,
};
} // namespace internal
} // namespace sequence_manager
} // namespace base
#endif // BASE_TASK_SEQUENCE_MANAGER_TASK_QUEUE_SELECTOR_LOGIC_H_

View file

@ -0,0 +1,32 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TASK_SEQUENCE_MANAGER_TASK_TIME_OBSERVER_H_
#define BASE_TASK_SEQUENCE_MANAGER_TASK_TIME_OBSERVER_H_
#include "base/time/time.h"
namespace base {
namespace sequence_manager {
// TaskTimeObserver provides an API for observing completion of tasks.
class TaskTimeObserver {
public:
TaskTimeObserver() = default;
virtual ~TaskTimeObserver() = default;
// To be called when task is about to start.
virtual void WillProcessTask(TimeTicks start_time) = 0;
// To be called when task is completed.
virtual void DidProcessTask(TimeTicks start_time, TimeTicks end_time) = 0;
private:
DISALLOW_COPY_AND_ASSIGN(TaskTimeObserver);
};
} // namespace sequence_manager
} // namespace base
#endif // BASE_TASK_SEQUENCE_MANAGER_TASK_TIME_OBSERVER_H_

View file

@ -0,0 +1,65 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/task/sequence_manager/tasks.h"
namespace base {
namespace sequence_manager {
Task::Task(internal::PostedTask posted_task,
TimeTicks delayed_run_time,
EnqueueOrder sequence_order,
EnqueueOrder enqueue_order,
internal::WakeUpResolution resolution)
: PendingTask(posted_task.location,
std::move(posted_task.callback),
delayed_run_time,
posted_task.nestable),
task_type(posted_task.task_type),
task_runner(std::move(posted_task.task_runner)),
enqueue_order_(enqueue_order) {
// We use |sequence_num| in DelayedWakeUp for ordering purposes and it
// may wrap around to a negative number during the static cast, hence,
// the relevant code is especially sensitive to a potential change of
// |PendingTask::sequence_num|'s type.
static_assert(std::is_same<decltype(sequence_num), int>::value, "");
sequence_num = static_cast<int>(sequence_order);
this->is_high_res = resolution == internal::WakeUpResolution::kHigh;
queue_time = posted_task.queue_time;
}
Task::Task(Task&& move_from) = default;
Task::~Task() = default;
Task& Task::operator=(Task&& other) = default;
namespace internal {
PostedTask::PostedTask(scoped_refptr<SequencedTaskRunner> task_runner,
OnceClosure callback,
Location location,
TimeDelta delay,
Nestable nestable,
TaskType task_type)
: callback(std::move(callback)),
location(location),
delay(delay),
nestable(nestable),
task_type(task_type),
task_runner(std::move(task_runner)) {}
PostedTask::PostedTask(PostedTask&& move_from) noexcept
: callback(std::move(move_from.callback)),
location(move_from.location),
delay(move_from.delay),
nestable(move_from.nestable),
task_type(move_from.task_type),
task_runner(std::move(move_from.task_runner)),
queue_time(move_from.queue_time) {}
PostedTask::~PostedTask() = default;
} // namespace internal
} // namespace sequence_manager
} // namespace base

View file

@ -0,0 +1,128 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TASK_SEQUENCE_MANAGER_TASKS_H_
#define BASE_TASK_SEQUENCE_MANAGER_TASKS_H_
#include "base/pending_task.h"
#include "base/sequenced_task_runner.h"
#include "base/task/sequence_manager/enqueue_order.h"
namespace base {
namespace sequence_manager {
using TaskType = uint8_t;
constexpr TaskType kTaskTypeNone = 0;
namespace internal {
enum class WakeUpResolution { kLow, kHigh };
// Wrapper around PostTask method arguments and the assigned task type.
// Eventually it becomes a PendingTask once accepted by a TaskQueueImpl.
struct BASE_EXPORT PostedTask {
explicit PostedTask(scoped_refptr<SequencedTaskRunner> task_runner,
OnceClosure callback = OnceClosure(),
Location location = Location(),
TimeDelta delay = TimeDelta(),
Nestable nestable = Nestable::kNestable,
TaskType task_type = kTaskTypeNone);
PostedTask(PostedTask&& move_from) noexcept;
~PostedTask();
OnceClosure callback;
Location location;
TimeDelta delay;
Nestable nestable;
TaskType task_type;
// The task runner this task is running on. Can be used by task runners that
// support posting back to the "current sequence".
scoped_refptr<SequencedTaskRunner> task_runner;
// The time at which the task was queued.
TimeTicks queue_time;
DISALLOW_COPY_AND_ASSIGN(PostedTask);
};
// Represents a time at which a task wants to run. Tasks scheduled for the
// same point in time will be ordered by their sequence numbers.
struct DelayedWakeUp {
TimeTicks time;
int sequence_num;
bool operator!=(const DelayedWakeUp& other) const {
return time != other.time || other.sequence_num != sequence_num;
}
bool operator==(const DelayedWakeUp& other) const {
return !(*this != other);
}
bool operator<=(const DelayedWakeUp& other) const {
if (time == other.time) {
// Debug gcc builds can compare an element against itself.
DCHECK(sequence_num != other.sequence_num || this == &other);
// |sequence_num| is int and might wrap around to a negative number when
// casted from EnqueueOrder. This way of comparison handles that properly.
return (sequence_num - other.sequence_num) <= 0;
}
return time < other.time;
}
};
} // namespace internal
// PendingTask with extra metadata for SequenceManager.
struct BASE_EXPORT Task : public PendingTask {
Task(internal::PostedTask posted_task,
TimeTicks delayed_run_time,
EnqueueOrder sequence_order,
EnqueueOrder enqueue_order = EnqueueOrder(),
internal::WakeUpResolution wake_up_resolution =
internal::WakeUpResolution::kLow);
Task(Task&& move_from);
~Task();
Task& operator=(Task&& other);
internal::DelayedWakeUp delayed_wake_up() const {
return internal::DelayedWakeUp{delayed_run_time, sequence_num};
}
// SequenceManager is particularly sensitive to enqueue order,
// so we have accessors for safety.
EnqueueOrder enqueue_order() const {
DCHECK(enqueue_order_);
return enqueue_order_;
}
void set_enqueue_order(EnqueueOrder enqueue_order) {
DCHECK(!enqueue_order_);
enqueue_order_ = enqueue_order;
}
bool enqueue_order_set() const { return enqueue_order_; }
TaskType task_type;
// The task runner this task is running on. Can be used by task runners that
// support posting back to the "current sequence".
scoped_refptr<SequencedTaskRunner> task_runner;
#if DCHECK_IS_ON()
bool cross_thread_;
#endif
private:
// Similar to |sequence_num|, but ultimately the |enqueue_order| is what
// the scheduler uses for task ordering. For immediate tasks |enqueue_order|
// is set when posted, but for delayed tasks it's not defined until they are
// enqueued. This is because otherwise delayed tasks could run before
// an immediate task posted after the delayed task.
EnqueueOrder enqueue_order_;
};
} // namespace sequence_manager
} // namespace base
#endif // BASE_TASK_SEQUENCE_MANAGER_TASKS_H_

View file

@ -0,0 +1,124 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TASK_SEQUENCE_MANAGER_THREAD_CONTROLLER_H_
#define BASE_TASK_SEQUENCE_MANAGER_THREAD_CONTROLLER_H_
#include "base/message_loop/message_pump.h"
#include "base/run_loop.h"
#include "base/single_thread_task_runner.h"
#include "base/task/sequence_manager/lazy_now.h"
#include "base/time/time.h"
#include "build/build_config.h"
namespace base {
class MessageLoopBase;
class TickClock;
struct PendingTask;
namespace sequence_manager {
namespace internal {
class AssociatedThreadId;
class SequencedTaskSource;
// Implementation of this interface is used by SequenceManager to schedule
// actual work to be run. Hopefully we can stop using MessageLoop and this
// interface will become more concise.
class ThreadController {
public:
virtual ~ThreadController() = default;
// Sets the number of tasks executed in a single invocation of DoWork.
// Increasing the batch size can reduce the overhead of yielding back to the
// main message loop.
virtual void SetWorkBatchSize(int work_batch_size = 1) = 0;
// Notifies that |pending_task| is about to be enqueued. Needed for tracing
// purposes. The impl may use this opportunity add metadata to |pending_task|
// before it is moved into the queue.
virtual void WillQueueTask(PendingTask* pending_task,
const char* task_queue_name) = 0;
// Notify the controller that its associated sequence has immediate work
// to run. Shortly after this is called, the thread associated with this
// controller will run a task returned by sequence->TakeTask(). Can be called
// from any sequence.
//
// TODO(altimin): Change this to "the thread associated with this
// controller will run tasks returned by sequence->TakeTask() until it
// returns null or sequence->DidRunTask() returns false" once the
// code is changed to work that way.
virtual void ScheduleWork() = 0;
// Notify the controller that SequencedTaskSource will have a delayed work
// ready to be run at |run_time|. This call cancels any previously
// scheduled delayed work. Can only be called from the main sequence.
// NOTE: DelayTillNextTask might return a different value as it also takes
// immediate work into account.
// TODO(kraynov): Remove |lazy_now| parameter.
virtual void SetNextDelayedDoWork(LazyNow* lazy_now, TimeTicks run_time) = 0;
// Sets the sequenced task source from which to take tasks after
// a Schedule*Work() call is made.
// Must be called before the first call to Schedule*Work().
virtual void SetSequencedTaskSource(SequencedTaskSource*) = 0;
// Requests desired timer precision from the OS.
// Has no effect on some platforms.
virtual void SetTimerSlack(TimerSlack timer_slack) = 0;
// Completes delayed initialization of unbound ThreadControllers.
// BindToCurrentThread(MessageLoopBase*) or BindToCurrentThread(MessagePump*)
// may only be called once.
virtual void BindToCurrentThread(
std::unique_ptr<MessagePump> message_pump) = 0;
// Explicitly allow or disallow task execution. Implicitly disallowed when
// entering a nested runloop.
virtual void SetTaskExecutionAllowed(bool allowed) = 0;
// Whether task execution is allowed or not.
virtual bool IsTaskExecutionAllowed() const = 0;
// Returns the MessagePump we're bound to if any.
virtual MessagePump* GetBoundMessagePump() const = 0;
// Returns true if the current run loop should quit when idle.
virtual bool ShouldQuitRunLoopWhenIdle() = 0;
#if defined(OS_IOS) || defined(OS_ANDROID)
// On iOS, the main message loop cannot be Run(). Instead call
// AttachToMessagePump(), which connects this ThreadController to the
// UI thread's CFRunLoop and allows PostTask() to work.
virtual void AttachToMessagePump() = 0;
#endif
#if defined(OS_IOS)
// Detaches this ThreadController from the message pump, allowing the
// controller to be shut down cleanly.
virtual void DetachFromMessagePump() = 0;
#endif
// TODO(altimin): Get rid of the methods below.
// These methods exist due to current integration of SequenceManager
// with MessageLoop.
virtual bool RunsTasksInCurrentSequence() = 0;
virtual const TickClock* GetClock() = 0;
virtual void SetDefaultTaskRunner(scoped_refptr<SingleThreadTaskRunner>) = 0;
virtual scoped_refptr<SingleThreadTaskRunner> GetDefaultTaskRunner() = 0;
virtual void RestoreDefaultTaskRunner() = 0;
virtual void AddNestingObserver(RunLoop::NestingObserver* observer) = 0;
virtual void RemoveNestingObserver(RunLoop::NestingObserver* observer) = 0;
virtual const scoped_refptr<AssociatedThreadId>& GetAssociatedThread()
const = 0;
};
} // namespace internal
} // namespace sequence_manager
} // namespace base
#endif // BASE_TASK_SEQUENCE_MANAGER_THREAD_CONTROLLER_H_

View file

@ -0,0 +1,327 @@
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/task/sequence_manager/thread_controller_impl.h"
#include <algorithm>
#include "base/bind.h"
#include "base/memory/ptr_util.h"
#include "base/message_loop/message_pump.h"
#include "base/run_loop.h"
#include "base/task/sequence_manager/lazy_now.h"
#include "base/task/sequence_manager/sequence_manager_impl.h"
#include "base/task/sequence_manager/sequenced_task_source.h"
#include "base/trace_event/trace_event.h"
namespace base {
namespace sequence_manager {
namespace internal {
using ShouldScheduleWork = WorkDeduplicator::ShouldScheduleWork;
ThreadControllerImpl::ThreadControllerImpl(
SequenceManagerImpl* funneled_sequence_manager,
scoped_refptr<SingleThreadTaskRunner> task_runner,
const TickClock* time_source)
: funneled_sequence_manager_(funneled_sequence_manager),
task_runner_(task_runner),
associated_thread_(AssociatedThreadId::CreateUnbound()),
message_loop_task_runner_(funneled_sequence_manager
? funneled_sequence_manager->GetTaskRunner()
: nullptr),
time_source_(time_source),
work_deduplicator_(associated_thread_) {
if (task_runner_ || funneled_sequence_manager_)
work_deduplicator_.BindToCurrentThread();
immediate_do_work_closure_ =
BindRepeating(&ThreadControllerImpl::DoWork, weak_factory_.GetWeakPtr(),
WorkType::kImmediate);
delayed_do_work_closure_ =
BindRepeating(&ThreadControllerImpl::DoWork, weak_factory_.GetWeakPtr(),
WorkType::kDelayed);
}
ThreadControllerImpl::~ThreadControllerImpl() = default;
ThreadControllerImpl::MainSequenceOnly::MainSequenceOnly() = default;
ThreadControllerImpl::MainSequenceOnly::~MainSequenceOnly() = default;
std::unique_ptr<ThreadControllerImpl> ThreadControllerImpl::Create(
SequenceManagerImpl* funneled_sequence_manager,
const TickClock* time_source) {
return WrapUnique(new ThreadControllerImpl(
funneled_sequence_manager,
funneled_sequence_manager ? funneled_sequence_manager->GetTaskRunner()
: nullptr,
time_source));
}
void ThreadControllerImpl::SetSequencedTaskSource(
SequencedTaskSource* sequence) {
DCHECK_CALLED_ON_VALID_SEQUENCE(associated_thread_->sequence_checker);
DCHECK(sequence);
DCHECK(!sequence_);
sequence_ = sequence;
}
void ThreadControllerImpl::SetTimerSlack(TimerSlack timer_slack) {
if (!funneled_sequence_manager_)
return;
funneled_sequence_manager_->SetTimerSlack(timer_slack);
}
void ThreadControllerImpl::ScheduleWork() {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
"ThreadControllerImpl::ScheduleWork::PostTask");
if (work_deduplicator_.OnWorkRequested() ==
ShouldScheduleWork::kScheduleImmediate)
task_runner_->PostTask(FROM_HERE, immediate_do_work_closure_);
}
void ThreadControllerImpl::SetNextDelayedDoWork(LazyNow* lazy_now,
TimeTicks run_time) {
DCHECK_CALLED_ON_VALID_SEQUENCE(associated_thread_->sequence_checker);
DCHECK(sequence_);
if (main_sequence_only().next_delayed_do_work == run_time)
return;
// Cancel DoWork if it was scheduled and we set an "infinite" delay now.
if (run_time == TimeTicks::Max()) {
cancelable_delayed_do_work_closure_.Cancel();
main_sequence_only().next_delayed_do_work = TimeTicks::Max();
return;
}
if (work_deduplicator_.OnDelayedWorkRequested() ==
ShouldScheduleWork::kNotNeeded) {
return;
}
base::TimeDelta delay = std::max(TimeDelta(), run_time - lazy_now->Now());
TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
"ThreadControllerImpl::SetNextDelayedDoWork::PostDelayedTask",
"delay_ms", delay.InMillisecondsF());
main_sequence_only().next_delayed_do_work = run_time;
// Reset also causes cancellation of the previous DoWork task.
cancelable_delayed_do_work_closure_.Reset(delayed_do_work_closure_);
task_runner_->PostDelayedTask(
FROM_HERE, cancelable_delayed_do_work_closure_.callback(), delay);
}
bool ThreadControllerImpl::RunsTasksInCurrentSequence() {
return task_runner_->RunsTasksInCurrentSequence();
}
const TickClock* ThreadControllerImpl::GetClock() {
return time_source_;
}
void ThreadControllerImpl::SetDefaultTaskRunner(
scoped_refptr<SingleThreadTaskRunner> task_runner) {
#if DCHECK_IS_ON()
default_task_runner_set_ = true;
#endif
if (!funneled_sequence_manager_)
return;
funneled_sequence_manager_->SetTaskRunner(task_runner);
}
scoped_refptr<SingleThreadTaskRunner>
ThreadControllerImpl::GetDefaultTaskRunner() {
return funneled_sequence_manager_->GetTaskRunner();
}
void ThreadControllerImpl::RestoreDefaultTaskRunner() {
if (!funneled_sequence_manager_)
return;
funneled_sequence_manager_->SetTaskRunner(message_loop_task_runner_);
}
void ThreadControllerImpl::BindToCurrentThread(
std::unique_ptr<MessagePump> message_pump) {
NOTREACHED();
}
void ThreadControllerImpl::WillQueueTask(PendingTask* pending_task,
const char* task_queue_name) {
task_annotator_.WillQueueTask("SequenceManager PostTask", pending_task,
task_queue_name);
}
void ThreadControllerImpl::DoWork(WorkType work_type) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
"ThreadControllerImpl::DoWork");
DCHECK_CALLED_ON_VALID_SEQUENCE(associated_thread_->sequence_checker);
DCHECK(sequence_);
work_deduplicator_.OnWorkStarted();
WeakPtr<ThreadControllerImpl> weak_ptr = weak_factory_.GetWeakPtr();
// TODO(scheduler-dev): Consider moving to a time based work batch instead.
for (int i = 0; i < main_sequence_only().work_batch_size_; i++) {
Task* task = sequence_->SelectNextTask();
if (!task)
break;
// Trace-parsing tools (DevTools, Lighthouse, etc) consume this event
// to determine long tasks.
// The event scope must span across DidRunTask call below to make sure
// it covers RunMicrotasks event.
// See https://crbug.com/681863 and https://crbug.com/874982
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("devtools.timeline"), "RunTask");
{
// Trace events should finish before we call DidRunTask to ensure that
// SequenceManager trace events do not interfere with them.
TRACE_TASK_EXECUTION("ThreadControllerImpl::RunTask", *task);
task_annotator_.RunTask("SequenceManager RunTask", task);
}
if (!weak_ptr)
return;
sequence_->DidRunTask();
// NOTE: https://crbug.com/828835.
// When we're running inside a nested RunLoop it may quit anytime, so any
// outstanding pending tasks must run in the outer RunLoop
// (see SequenceManagerTestWithMessageLoop.QuitWhileNested test).
// Unfortunately, it's MessageLoop who's receving that signal and we can't
// know it before we return from DoWork, hence, OnExitNestedRunLoop
// will be called later. Since we must implement ThreadController and
// SequenceManager in conformance with MessageLoop task runners, we need
// to disable this batching optimization while nested.
// Implementing MessagePump::Delegate ourselves will help to resolve this
// issue.
if (main_sequence_only().nesting_depth > 0)
break;
}
work_deduplicator_.WillCheckForMoreWork();
LazyNow lazy_now(time_source_);
TimeDelta delay_till_next_task = sequence_->DelayTillNextTask(&lazy_now);
// The OnSystemIdle callback allows the TimeDomains to advance virtual time
// in which case we now have immediate word to do.
if (delay_till_next_task <= TimeDelta() || sequence_->OnSystemIdle()) {
// The next task needs to run immediately, post a continuation if
// another thread didn't get there first.
if (work_deduplicator_.DidCheckForMoreWork(
WorkDeduplicator::NextTask::kIsImmediate) ==
ShouldScheduleWork::kScheduleImmediate) {
task_runner_->PostTask(FROM_HERE, immediate_do_work_closure_);
}
return;
}
// It looks like we have a non-zero delay, however another thread may have
// posted an immediate task while we computed the delay.
if (work_deduplicator_.DidCheckForMoreWork(
WorkDeduplicator::NextTask::kIsDelayed) ==
ShouldScheduleWork::kScheduleImmediate) {
task_runner_->PostTask(FROM_HERE, immediate_do_work_closure_);
return;
}
// Check if there's no future work.
if (delay_till_next_task == TimeDelta::Max()) {
main_sequence_only().next_delayed_do_work = TimeTicks::Max();
cancelable_delayed_do_work_closure_.Cancel();
return;
}
// Check if we've already requested the required delay.
TimeTicks next_task_at = lazy_now.Now() + delay_till_next_task;
if (next_task_at == main_sequence_only().next_delayed_do_work)
return;
// Schedule a callback after |delay_till_next_task| and cancel any previous
// callback.
main_sequence_only().next_delayed_do_work = next_task_at;
cancelable_delayed_do_work_closure_.Reset(delayed_do_work_closure_);
task_runner_->PostDelayedTask(FROM_HERE,
cancelable_delayed_do_work_closure_.callback(),
delay_till_next_task);
}
void ThreadControllerImpl::AddNestingObserver(
RunLoop::NestingObserver* observer) {
DCHECK_CALLED_ON_VALID_SEQUENCE(associated_thread_->sequence_checker);
nesting_observer_ = observer;
RunLoop::AddNestingObserverOnCurrentThread(this);
}
void ThreadControllerImpl::RemoveNestingObserver(
RunLoop::NestingObserver* observer) {
DCHECK_CALLED_ON_VALID_SEQUENCE(associated_thread_->sequence_checker);
DCHECK_EQ(observer, nesting_observer_);
nesting_observer_ = nullptr;
RunLoop::RemoveNestingObserverOnCurrentThread(this);
}
const scoped_refptr<AssociatedThreadId>&
ThreadControllerImpl::GetAssociatedThread() const {
return associated_thread_;
}
void ThreadControllerImpl::OnBeginNestedRunLoop() {
main_sequence_only().nesting_depth++;
// Just assume we have a pending task and post a DoWork to make sure we don't
// grind to a halt while nested.
work_deduplicator_.OnWorkRequested(); // Set the pending DoWork flag.
task_runner_->PostTask(FROM_HERE, immediate_do_work_closure_);
if (nesting_observer_)
nesting_observer_->OnBeginNestedRunLoop();
}
void ThreadControllerImpl::OnExitNestedRunLoop() {
main_sequence_only().nesting_depth--;
if (nesting_observer_)
nesting_observer_->OnExitNestedRunLoop();
}
void ThreadControllerImpl::SetWorkBatchSize(int work_batch_size) {
main_sequence_only().work_batch_size_ = work_batch_size;
}
void ThreadControllerImpl::SetTaskExecutionAllowed(bool allowed) {
NOTREACHED();
}
bool ThreadControllerImpl::IsTaskExecutionAllowed() const {
return true;
}
bool ThreadControllerImpl::ShouldQuitRunLoopWhenIdle() {
// The MessageLoop does not expose the API needed to support this query.
return false;
}
MessagePump* ThreadControllerImpl::GetBoundMessagePump() const {
return nullptr;
}
#if defined(OS_IOS) || defined(OS_ANDROID)
void ThreadControllerImpl::AttachToMessagePump() {
NOTREACHED();
}
#endif // OS_IOS || OS_ANDROID
#if defined(OS_IOS)
void ThreadControllerImpl::DetachFromMessagePump() {
NOTREACHED();
}
#endif // OS_IOS
} // namespace internal
} // namespace sequence_manager
} // namespace base

View file

@ -0,0 +1,138 @@
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TASK_SEQUENCE_MANAGER_THREAD_CONTROLLER_IMPL_H_
#define BASE_TASK_SEQUENCE_MANAGER_THREAD_CONTROLLER_IMPL_H_
#include <memory>
#include "base/cancelable_callback.h"
#include "base/macros.h"
#include "base/memory/weak_ptr.h"
#include "base/run_loop.h"
#include "base/sequence_checker.h"
#include "base/single_thread_task_runner.h"
#include "base/task/common/task_annotator.h"
#include "base/task/sequence_manager/associated_thread_id.h"
#include "base/task/sequence_manager/thread_controller.h"
#include "base/task/sequence_manager/work_deduplicator.h"
#include "build/build_config.h"
namespace base {
namespace sequence_manager {
namespace internal {
class SequenceManagerImpl;
// This is the interface between a SequenceManager which sits on top of an
// underlying SequenceManagerImpl or SingleThreadTaskRunner. Currently it's only
// used for workers in blink although we'd intend to migrate those to
// ThreadControllerWithMessagePumpImpl (https://crbug.com/948051). Long term we
// intend to use this for sequence funneling.
class BASE_EXPORT ThreadControllerImpl : public ThreadController,
public RunLoop::NestingObserver {
public:
~ThreadControllerImpl() override;
// TODO(https://crbug.com/948051): replace |funneled_sequence_manager| with
// |funneled_task_runner| when we sort out the workers
static std::unique_ptr<ThreadControllerImpl> Create(
SequenceManagerImpl* funneled_sequence_manager,
const TickClock* time_source);
// ThreadController:
void SetWorkBatchSize(int work_batch_size) override;
void WillQueueTask(PendingTask* pending_task,
const char* task_queue_name) override;
void ScheduleWork() override;
void BindToCurrentThread(std::unique_ptr<MessagePump> message_pump) override;
void SetNextDelayedDoWork(LazyNow* lazy_now, TimeTicks run_time) override;
void SetSequencedTaskSource(SequencedTaskSource* sequence) override;
void SetTimerSlack(TimerSlack timer_slack) override;
bool RunsTasksInCurrentSequence() override;
const TickClock* GetClock() override;
void SetDefaultTaskRunner(scoped_refptr<SingleThreadTaskRunner>) override;
scoped_refptr<SingleThreadTaskRunner> GetDefaultTaskRunner() override;
void RestoreDefaultTaskRunner() override;
void AddNestingObserver(RunLoop::NestingObserver* observer) override;
void RemoveNestingObserver(RunLoop::NestingObserver* observer) override;
const scoped_refptr<AssociatedThreadId>& GetAssociatedThread() const override;
void SetTaskExecutionAllowed(bool allowed) override;
bool IsTaskExecutionAllowed() const override;
MessagePump* GetBoundMessagePump() const override;
#if defined(OS_IOS) || defined(OS_ANDROID)
void AttachToMessagePump() override;
#endif
#if defined(OS_IOS)
void DetachFromMessagePump() override;
#endif
bool ShouldQuitRunLoopWhenIdle() override;
// RunLoop::NestingObserver:
void OnBeginNestedRunLoop() override;
void OnExitNestedRunLoop() override;
protected:
ThreadControllerImpl(SequenceManagerImpl* sequence_manager,
scoped_refptr<SingleThreadTaskRunner> task_runner,
const TickClock* time_source);
// TODO(altimin): Make these const. Blocked on removing
// lazy initialisation support.
SequenceManagerImpl* funneled_sequence_manager_;
scoped_refptr<SingleThreadTaskRunner> task_runner_;
RunLoop::NestingObserver* nesting_observer_ = nullptr;
private:
enum class WorkType { kImmediate, kDelayed };
void DoWork(WorkType work_type);
// TODO(scheduler-dev): Maybe fold this into the main class and use
// thread annotations.
struct MainSequenceOnly {
MainSequenceOnly();
~MainSequenceOnly();
int nesting_depth = 0;
int work_batch_size_ = 1;
TimeTicks next_delayed_do_work = TimeTicks::Max();
};
scoped_refptr<AssociatedThreadId> associated_thread_;
MainSequenceOnly main_sequence_only_;
MainSequenceOnly& main_sequence_only() {
DCHECK_CALLED_ON_VALID_SEQUENCE(associated_thread_->sequence_checker);
return main_sequence_only_;
}
const MainSequenceOnly& main_sequence_only() const {
DCHECK_CALLED_ON_VALID_SEQUENCE(associated_thread_->sequence_checker);
return main_sequence_only_;
}
scoped_refptr<SingleThreadTaskRunner> message_loop_task_runner_;
const TickClock* time_source_;
RepeatingClosure immediate_do_work_closure_;
RepeatingClosure delayed_do_work_closure_;
CancelableClosure cancelable_delayed_do_work_closure_;
SequencedTaskSource* sequence_ = nullptr; // Not owned.
TaskAnnotator task_annotator_;
WorkDeduplicator work_deduplicator_;
#if DCHECK_IS_ON()
bool default_task_runner_set_ = false;
#endif
WeakPtrFactory<ThreadControllerImpl> weak_factory_{this};
DISALLOW_COPY_AND_ASSIGN(ThreadControllerImpl);
};
} // namespace internal
} // namespace sequence_manager
} // namespace base
#endif // BASE_TASK_SEQUENCE_MANAGER_THREAD_CONTROLLER_IMPL_H_

View file

@ -0,0 +1,522 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/task/sequence_manager/thread_controller_with_message_pump_impl.h"
#include "base/auto_reset.h"
#include "base/memory/ptr_util.h"
#include "base/message_loop/message_pump.h"
#include "base/threading/hang_watcher.h"
#include "base/time/tick_clock.h"
#include "base/trace_event/trace_event.h"
#include "build/build_config.h"
#if defined(OS_IOS)
#include "base/message_loop/message_pump_mac.h"
#elif defined(OS_ANDROID)
#include "base/message_loop/message_pump_android.h"
#endif
namespace base {
namespace sequence_manager {
namespace internal {
namespace {
// Returns |next_run_time| capped at 1 day from |lazy_now|. This is used to
// mitigate https://crbug.com/850450 where some platforms are unhappy with
// delays > 100,000,000 seconds. In practice, a diagnosis metric showed that no
// sleep > 1 hour ever completes (always interrupted by an earlier MessageLoop
// event) and 99% of completed sleeps are the ones scheduled for <= 1 second.
// Details @ https://crrev.com/c/1142589.
TimeTicks CapAtOneDay(TimeTicks next_run_time, LazyNow* lazy_now) {
return std::min(next_run_time, lazy_now->Now() + TimeDelta::FromDays(1));
}
} // namespace
ThreadControllerWithMessagePumpImpl::ThreadControllerWithMessagePumpImpl(
const SequenceManager::Settings& settings)
: associated_thread_(AssociatedThreadId::CreateUnbound()),
work_deduplicator_(associated_thread_),
#if DCHECK_IS_ON()
log_runloop_quit_and_quit_when_idle_(
settings.log_runloop_quit_and_quit_when_idle),
#endif
time_source_(settings.clock) {
}
ThreadControllerWithMessagePumpImpl::ThreadControllerWithMessagePumpImpl(
std::unique_ptr<MessagePump> message_pump,
const SequenceManager::Settings& settings)
: ThreadControllerWithMessagePumpImpl(settings) {
BindToCurrentThread(std::move(message_pump));
}
ThreadControllerWithMessagePumpImpl::~ThreadControllerWithMessagePumpImpl() {
// Destructors of MessagePump::Delegate and ThreadTaskRunnerHandle
// will do all the clean-up.
// ScopedSetSequenceLocalStorageMapForCurrentThread destructor will
// de-register the current thread as a sequence.
}
// static
std::unique_ptr<ThreadControllerWithMessagePumpImpl>
ThreadControllerWithMessagePumpImpl::CreateUnbound(
const SequenceManager::Settings& settings) {
return base::WrapUnique(new ThreadControllerWithMessagePumpImpl(settings));
}
ThreadControllerWithMessagePumpImpl::MainThreadOnly::MainThreadOnly() = default;
ThreadControllerWithMessagePumpImpl::MainThreadOnly::~MainThreadOnly() =
default;
void ThreadControllerWithMessagePumpImpl::SetSequencedTaskSource(
SequencedTaskSource* task_source) {
DCHECK(task_source);
DCHECK(!main_thread_only().task_source);
main_thread_only().task_source = task_source;
}
void ThreadControllerWithMessagePumpImpl::BindToCurrentThread(
std::unique_ptr<MessagePump> message_pump) {
associated_thread_->BindToCurrentThread();
pump_ = std::move(message_pump);
work_id_provider_ = WorkIdProvider::GetForCurrentThread();
RunLoop::RegisterDelegateForCurrentThread(this);
scoped_set_sequence_local_storage_map_for_current_thread_ = std::make_unique<
base::internal::ScopedSetSequenceLocalStorageMapForCurrentThread>(
&sequence_local_storage_map_);
{
base::internal::CheckedAutoLock task_runner_lock(task_runner_lock_);
if (task_runner_)
InitializeThreadTaskRunnerHandle();
}
if (work_deduplicator_.BindToCurrentThread() ==
ShouldScheduleWork::kScheduleImmediate) {
pump_->ScheduleWork();
}
}
void ThreadControllerWithMessagePumpImpl::SetWorkBatchSize(
int work_batch_size) {
DCHECK_GE(work_batch_size, 1);
main_thread_only().work_batch_size = work_batch_size;
}
void ThreadControllerWithMessagePumpImpl::SetTimerSlack(
TimerSlack timer_slack) {
DCHECK(RunsTasksInCurrentSequence());
pump_->SetTimerSlack(timer_slack);
}
void ThreadControllerWithMessagePumpImpl::WillQueueTask(
PendingTask* pending_task,
const char* task_queue_name) {
task_annotator_.WillQueueTask("SequenceManager PostTask", pending_task,
task_queue_name);
}
void ThreadControllerWithMessagePumpImpl::ScheduleWork() {
base::internal::CheckedLock::AssertNoLockHeldOnCurrentThread();
if (work_deduplicator_.OnWorkRequested() ==
ShouldScheduleWork::kScheduleImmediate) {
pump_->ScheduleWork();
}
}
void ThreadControllerWithMessagePumpImpl::SetNextDelayedDoWork(
LazyNow* lazy_now,
TimeTicks run_time) {
DCHECK_LT(lazy_now->Now(), run_time);
if (main_thread_only().next_delayed_do_work == run_time)
return;
// Cap at one day but remember the exact time for the above equality check on
// the next round.
main_thread_only().next_delayed_do_work = run_time;
run_time = CapAtOneDay(run_time, lazy_now);
// It's very rare for PostDelayedTask to be called outside of a Do(Some)Work
// in production, so most of the time this does nothing.
if (work_deduplicator_.OnDelayedWorkRequested() ==
ShouldScheduleWork::kScheduleImmediate) {
// |pump_| can't be null as all postTasks are cross-thread before binding,
// and delayed cross-thread postTasks do the thread hop through an immediate
// task.
pump_->ScheduleDelayedWork(run_time);
}
}
const TickClock* ThreadControllerWithMessagePumpImpl::GetClock() {
return time_source_;
}
bool ThreadControllerWithMessagePumpImpl::RunsTasksInCurrentSequence() {
return associated_thread_->IsBoundToCurrentThread();
}
void ThreadControllerWithMessagePumpImpl::SetDefaultTaskRunner(
scoped_refptr<SingleThreadTaskRunner> task_runner) {
base::internal::CheckedAutoLock lock(task_runner_lock_);
task_runner_ = task_runner;
if (associated_thread_->IsBound()) {
DCHECK(associated_thread_->IsBoundToCurrentThread());
// Thread task runner handle will be created in BindToCurrentThread().
InitializeThreadTaskRunnerHandle();
}
}
void ThreadControllerWithMessagePumpImpl::InitializeThreadTaskRunnerHandle() {
// Only one ThreadTaskRunnerHandle can exist at any time,
// so reset the old one.
main_thread_only().thread_task_runner_handle.reset();
main_thread_only().thread_task_runner_handle =
std::make_unique<ThreadTaskRunnerHandle>(task_runner_);
}
scoped_refptr<SingleThreadTaskRunner>
ThreadControllerWithMessagePumpImpl::GetDefaultTaskRunner() {
base::internal::CheckedAutoLock lock(task_runner_lock_);
return task_runner_;
}
void ThreadControllerWithMessagePumpImpl::RestoreDefaultTaskRunner() {
// There's no default task runner unlike with the MessageLoop.
main_thread_only().thread_task_runner_handle.reset();
}
void ThreadControllerWithMessagePumpImpl::AddNestingObserver(
RunLoop::NestingObserver* observer) {
DCHECK(!main_thread_only().nesting_observer);
DCHECK(observer);
main_thread_only().nesting_observer = observer;
RunLoop::AddNestingObserverOnCurrentThread(this);
}
void ThreadControllerWithMessagePumpImpl::RemoveNestingObserver(
RunLoop::NestingObserver* observer) {
DCHECK_EQ(main_thread_only().nesting_observer, observer);
main_thread_only().nesting_observer = nullptr;
RunLoop::RemoveNestingObserverOnCurrentThread(this);
}
const scoped_refptr<AssociatedThreadId>&
ThreadControllerWithMessagePumpImpl::GetAssociatedThread() const {
return associated_thread_;
}
void ThreadControllerWithMessagePumpImpl::BeforeDoInternalWork() {
// Nested runloops are covered by the parent loop hang watch scope.
// TODO(crbug/1034046): Provide more granular scoping that reuses the parent
// scope deadline.
if (main_thread_only().runloop_count == 1) {
hang_watch_scope_.emplace(base::HangWatchScope::kDefaultHangWatchTime);
}
work_id_provider_->IncrementWorkId();
}
void ThreadControllerWithMessagePumpImpl::BeforeWait() {
// Nested runloops are covered by the parent loop hang watch scope.
// TODO(crbug/1034046): Provide more granular scoping that reuses the parent
// scope deadline.
if (main_thread_only().runloop_count == 1) {
// Waiting for work cannot be covered by a hang watch scope because that
// means the thread can be idle for unbounded time.
hang_watch_scope_.reset();
}
work_id_provider_->IncrementWorkId();
}
MessagePump::Delegate::NextWorkInfo
ThreadControllerWithMessagePumpImpl::DoSomeWork() {
// Nested runloops are covered by the parent loop hang watch scope.
// TODO(crbug/1034046): Provide more granular scoping that reuses the parent
// scope deadline.
if (main_thread_only().runloop_count == 1) {
hang_watch_scope_.emplace(base::HangWatchScope::kDefaultHangWatchTime);
}
work_deduplicator_.OnWorkStarted();
bool ran_task = false; // Unused.
LazyNow continuation_lazy_now(time_source_);
TimeDelta delay_till_next_task =
DoWorkImpl(&continuation_lazy_now, &ran_task);
// Schedule a continuation.
WorkDeduplicator::NextTask next_task =
delay_till_next_task.is_zero() ? WorkDeduplicator::NextTask::kIsImmediate
: WorkDeduplicator::NextTask::kIsDelayed;
if (work_deduplicator_.DidCheckForMoreWork(next_task) ==
ShouldScheduleWork::kScheduleImmediate) {
// Need to run new work immediately, but due to the contract of DoSomeWork
// we only need to return a null TimeTicks to ensure that happens.
return MessagePump::Delegate::NextWorkInfo();
}
// While the math below would saturate when |delay_till_next_task.is_max()|;
// special-casing here avoids unnecessarily sampling Now() when out of work.
if (delay_till_next_task.is_max()) {
main_thread_only().next_delayed_do_work = TimeTicks::Max();
return {TimeTicks::Max()};
}
// The MessagePump will schedule the delay on our behalf, so we need to update
// |main_thread_only().next_delayed_do_work|.
// TODO(gab, alexclarke): Replace DelayTillNextTask() with NextTaskTime() to
// avoid converting back-and-forth between TimeTicks and TimeDelta.
main_thread_only().next_delayed_do_work =
continuation_lazy_now.Now() + delay_till_next_task;
// Don't request a run time past |main_thread_only().quit_runloop_after|.
if (main_thread_only().next_delayed_do_work >
main_thread_only().quit_runloop_after) {
main_thread_only().next_delayed_do_work =
main_thread_only().quit_runloop_after;
// If we've passed |quit_runloop_after| there's no more work to do.
if (continuation_lazy_now.Now() >= main_thread_only().quit_runloop_after)
return {TimeTicks::Max()};
}
return {CapAtOneDay(main_thread_only().next_delayed_do_work,
&continuation_lazy_now),
continuation_lazy_now.Now()};
}
TimeDelta ThreadControllerWithMessagePumpImpl::DoWorkImpl(
LazyNow* continuation_lazy_now,
bool* ran_task) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
"ThreadControllerImpl::DoWork");
if (!main_thread_only().task_execution_allowed) {
if (main_thread_only().quit_runloop_after == TimeTicks::Max())
return TimeDelta::Max();
return main_thread_only().quit_runloop_after - continuation_lazy_now->Now();
}
DCHECK(main_thread_only().task_source);
for (int i = 0; i < main_thread_only().work_batch_size; i++) {
Task* task = main_thread_only().task_source->SelectNextTask();
if (!task)
break;
// Execute the task and assume the worst: it is probably not reentrant.
main_thread_only().task_execution_allowed = false;
work_id_provider_->IncrementWorkId();
// Trace-parsing tools (DevTools, Lighthouse, etc) consume this event
// to determine long tasks.
// The event scope must span across DidRunTask call below to make sure
// it covers RunMicrotasks event.
// See https://crbug.com/681863 and https://crbug.com/874982
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("devtools.timeline"), "RunTask");
{
// Trace events should finish before we call DidRunTask to ensure that
// SequenceManager trace events do not interfere with them.
TRACE_TASK_EXECUTION("ThreadControllerImpl::RunTask", *task);
task_annotator_.RunTask("SequenceManager RunTask", task);
}
#if DCHECK_IS_ON()
if (log_runloop_quit_and_quit_when_idle_ && !quit_when_idle_requested_ &&
ShouldQuitWhenIdle()) {
DVLOG(1) << "ThreadControllerWithMessagePumpImpl::QuitWhenIdle";
quit_when_idle_requested_ = true;
}
#endif
*ran_task = true;
main_thread_only().task_execution_allowed = true;
main_thread_only().task_source->DidRunTask();
// When Quit() is called we must stop running the batch because the caller
// expects per-task granularity.
if (main_thread_only().quit_pending)
break;
}
if (main_thread_only().quit_pending)
return TimeDelta::Max();
work_deduplicator_.WillCheckForMoreWork();
TimeDelta do_work_delay =
main_thread_only().task_source->DelayTillNextTask(continuation_lazy_now);
DCHECK_GE(do_work_delay, TimeDelta());
return do_work_delay;
}
bool ThreadControllerWithMessagePumpImpl::DoIdleWork() {
TRACE_EVENT0("sequence_manager", "SequenceManager::DoIdleWork");
// Nested runloops are covered by the parent loop hang watch scope.
// TODO(crbug/1034046): Provide more granular scoping that reuses the parent
// scope deadline.
if (main_thread_only().runloop_count == 1) {
hang_watch_scope_.emplace(base::HangWatchScope::kDefaultHangWatchTime);
}
work_id_provider_->IncrementWorkId();
#if defined(OS_WIN)
bool need_high_res_mode =
main_thread_only().task_source->HasPendingHighResolutionTasks();
if (main_thread_only().in_high_res_mode != need_high_res_mode) {
// On Windows we activate the high resolution timer so that the wait
// _if_ triggered by the timer happens with good resolution. If we don't
// do this the default resolution is 15ms which might not be acceptable
// for some tasks.
main_thread_only().in_high_res_mode = need_high_res_mode;
Time::ActivateHighResolutionTimer(need_high_res_mode);
}
#endif // defined(OS_WIN)
if (main_thread_only().task_source->OnSystemIdle()) {
// The OnSystemIdle() callback resulted in more immediate work, so schedule
// a DoWork callback. For some message pumps returning true from here is
// sufficient to do that but not on mac.
pump_->ScheduleWork();
return false;
}
// Check if any runloop timeout has expired.
if (main_thread_only().quit_runloop_after != TimeTicks::Max() &&
main_thread_only().quit_runloop_after <= time_source_->NowTicks()) {
Quit();
return false;
}
// RunLoop::Delegate knows whether we called Run() or RunUntilIdle().
if (ShouldQuitWhenIdle())
Quit();
return false;
}
void ThreadControllerWithMessagePumpImpl::Run(bool application_tasks_allowed,
TimeDelta timeout) {
DCHECK(RunsTasksInCurrentSequence());
// RunLoops can be nested so we need to restore the previous value of
// |quit_runloop_after| upon exit. NB we could use saturated arithmetic here
// but don't because we have some tests which assert the number of calls to
// Now.
AutoReset<TimeTicks> quit_runloop_after(
&main_thread_only().quit_runloop_after,
(timeout == TimeDelta::Max()) ? TimeTicks::Max()
: time_source_->NowTicks() + timeout);
#if DCHECK_IS_ON()
AutoReset<bool> quit_when_idle_requested(&quit_when_idle_requested_, false);
#endif
// Quit may have been called outside of a Run(), so |quit_pending| might be
// true here. We can't use InTopLevelDoWork() in Quit() as this call may be
// outside top-level DoWork but still in Run().
main_thread_only().quit_pending = false;
main_thread_only().runloop_count++;
if (application_tasks_allowed && !main_thread_only().task_execution_allowed) {
// Allow nested task execution as explicitly requested.
DCHECK(RunLoop::IsNestedOnCurrentThread());
main_thread_only().task_execution_allowed = true;
pump_->Run(this);
main_thread_only().task_execution_allowed = false;
} else {
pump_->Run(this);
}
#if DCHECK_IS_ON()
if (log_runloop_quit_and_quit_when_idle_)
DVLOG(1) << "ThreadControllerWithMessagePumpImpl::Quit";
#endif
main_thread_only().runloop_count--;
main_thread_only().quit_pending = false;
// Reset the hang watch scope upon exiting the outermost loop since the
// execution it covers is now completely over.
if (main_thread_only().runloop_count == 0)
hang_watch_scope_.reset();
}
void ThreadControllerWithMessagePumpImpl::OnBeginNestedRunLoop() {
// We don't need to ScheduleWork here! That's because the call to pump_->Run()
// above, which is always called for RunLoop().Run(), guarantees a call to
// Do(Some)Work on all platforms.
if (main_thread_only().nesting_observer)
main_thread_only().nesting_observer->OnBeginNestedRunLoop();
}
void ThreadControllerWithMessagePumpImpl::OnExitNestedRunLoop() {
if (main_thread_only().nesting_observer)
main_thread_only().nesting_observer->OnExitNestedRunLoop();
}
void ThreadControllerWithMessagePumpImpl::Quit() {
DCHECK(RunsTasksInCurrentSequence());
// Interrupt a batch of work.
main_thread_only().quit_pending = true;
// If we're in a nested RunLoop, continuation will be posted if necessary.
pump_->Quit();
}
void ThreadControllerWithMessagePumpImpl::EnsureWorkScheduled() {
if (work_deduplicator_.OnWorkRequested() ==
ShouldScheduleWork::kScheduleImmediate)
pump_->ScheduleWork();
}
void ThreadControllerWithMessagePumpImpl::SetTaskExecutionAllowed(
bool allowed) {
if (allowed) {
// We need to schedule work unconditionally because we might be about to
// enter an OS level nested message loop. Unlike a RunLoop().Run() we don't
// get a call to Do(Some)Work on entering for free.
work_deduplicator_.OnWorkRequested(); // Set the pending DoWork flag.
pump_->ScheduleWork();
} else {
// We've (probably) just left an OS level nested message loop. Make sure a
// subsequent PostTask within the same Task doesn't ScheduleWork with the
// pump (this will be done anyway when the task exits).
work_deduplicator_.OnWorkStarted();
}
main_thread_only().task_execution_allowed = allowed;
}
bool ThreadControllerWithMessagePumpImpl::IsTaskExecutionAllowed() const {
return main_thread_only().task_execution_allowed;
}
MessagePump* ThreadControllerWithMessagePumpImpl::GetBoundMessagePump() const {
return pump_.get();
}
#if defined(OS_IOS)
void ThreadControllerWithMessagePumpImpl::AttachToMessagePump() {
static_cast<MessagePumpCFRunLoopBase*>(pump_.get())->Attach(this);
}
void ThreadControllerWithMessagePumpImpl::DetachFromMessagePump() {
static_cast<MessagePumpCFRunLoopBase*>(pump_.get())->Detach();
}
#elif defined(OS_ANDROID)
void ThreadControllerWithMessagePumpImpl::AttachToMessagePump() {
static_cast<MessagePumpForUI*>(pump_.get())->Attach(this);
}
#endif
bool ThreadControllerWithMessagePumpImpl::ShouldQuitRunLoopWhenIdle() {
if (main_thread_only().runloop_count == 0)
return false;
// It's only safe to call ShouldQuitWhenIdle() when in a RunLoop.
return ShouldQuitWhenIdle();
}
} // namespace internal
} // namespace sequence_manager
} // namespace base

View file

@ -0,0 +1,193 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TASK_SEQUENCE_MANAGER_THREAD_CONTROLLER_WITH_MESSAGE_PUMP_IMPL_H_
#define BASE_TASK_SEQUENCE_MANAGER_THREAD_CONTROLLER_WITH_MESSAGE_PUMP_IMPL_H_
#include <memory>
#include "base/message_loop/message_pump.h"
#include "base/message_loop/work_id_provider.h"
#include "base/optional.h"
#include "base/run_loop.h"
#include "base/task/common/checked_lock.h"
#include "base/task/common/task_annotator.h"
#include "base/task/sequence_manager/associated_thread_id.h"
#include "base/task/sequence_manager/sequence_manager_impl.h"
#include "base/task/sequence_manager/sequenced_task_source.h"
#include "base/task/sequence_manager/thread_controller.h"
#include "base/task/sequence_manager/work_deduplicator.h"
#include "base/thread_annotations.h"
#include "base/threading/hang_watcher.h"
#include "base/threading/platform_thread.h"
#include "base/threading/sequence_local_storage_map.h"
#include "base/threading/thread_task_runner_handle.h"
#include "build/build_config.h"
namespace base {
namespace sequence_manager {
namespace internal {
// This is the interface between the SequenceManager and the MessagePump.
class BASE_EXPORT ThreadControllerWithMessagePumpImpl
: public ThreadController,
public MessagePump::Delegate,
public RunLoop::Delegate,
public RunLoop::NestingObserver {
public:
ThreadControllerWithMessagePumpImpl(
std::unique_ptr<MessagePump> message_pump,
const SequenceManager::Settings& settings);
~ThreadControllerWithMessagePumpImpl() override;
using ShouldScheduleWork = WorkDeduplicator::ShouldScheduleWork;
static std::unique_ptr<ThreadControllerWithMessagePumpImpl> CreateUnbound(
const SequenceManager::Settings& settings);
// ThreadController implementation:
void SetSequencedTaskSource(SequencedTaskSource* task_source) override;
void BindToCurrentThread(std::unique_ptr<MessagePump> message_pump) override;
void SetWorkBatchSize(int work_batch_size) override;
void WillQueueTask(PendingTask* pending_task,
const char* task_queue_name) override;
void ScheduleWork() override;
void SetNextDelayedDoWork(LazyNow* lazy_now, TimeTicks run_time) override;
void SetTimerSlack(TimerSlack timer_slack) override;
const TickClock* GetClock() override;
bool RunsTasksInCurrentSequence() override;
void SetDefaultTaskRunner(
scoped_refptr<SingleThreadTaskRunner> task_runner) override;
scoped_refptr<SingleThreadTaskRunner> GetDefaultTaskRunner() override;
void RestoreDefaultTaskRunner() override;
void AddNestingObserver(RunLoop::NestingObserver* observer) override;
void RemoveNestingObserver(RunLoop::NestingObserver* observer) override;
const scoped_refptr<AssociatedThreadId>& GetAssociatedThread() const override;
void SetTaskExecutionAllowed(bool allowed) override;
bool IsTaskExecutionAllowed() const override;
MessagePump* GetBoundMessagePump() const override;
#if defined(OS_IOS) || defined(OS_ANDROID)
void AttachToMessagePump() override;
#endif
#if defined(OS_IOS)
void DetachFromMessagePump() override;
#endif
bool ShouldQuitRunLoopWhenIdle() override;
// RunLoop::NestingObserver:
void OnBeginNestedRunLoop() override;
void OnExitNestedRunLoop() override;
protected:
explicit ThreadControllerWithMessagePumpImpl(
const SequenceManager::Settings& settings);
// MessagePump::Delegate implementation.
void BeforeDoInternalWork() override;
void BeforeWait() override;
MessagePump::Delegate::NextWorkInfo DoSomeWork() override;
bool DoIdleWork() override;
// RunLoop::Delegate implementation.
void Run(bool application_tasks_allowed, TimeDelta timeout) override;
void Quit() override;
void EnsureWorkScheduled() override;
private:
friend class DoWorkScope;
friend class RunScope;
// Returns the delay till the next task. If there's no delay TimeDelta::Max()
// will be returned.
TimeDelta DoWorkImpl(LazyNow* continuation_lazy_now, bool* ran_task);
void InitializeThreadTaskRunnerHandle()
EXCLUSIVE_LOCKS_REQUIRED(task_runner_lock_);
struct MainThreadOnly {
MainThreadOnly();
~MainThreadOnly();
SequencedTaskSource* task_source = nullptr; // Not owned.
RunLoop::NestingObserver* nesting_observer = nullptr; // Not owned.
std::unique_ptr<ThreadTaskRunnerHandle> thread_task_runner_handle;
// Indicates that we should yield DoWork between each task to let a possibly
// nested RunLoop exit.
bool quit_pending = false;
// Whether high resolution timing is enabled or not.
bool in_high_res_mode = false;
// Number of tasks processed in a single DoWork invocation.
int work_batch_size = 1;
int runloop_count = 0;
// When the next scheduled delayed work should run, if any.
TimeTicks next_delayed_do_work = TimeTicks::Max();
// The time after which the runloop should quit.
TimeTicks quit_runloop_after = TimeTicks::Max();
bool task_execution_allowed = true;
};
MainThreadOnly& main_thread_only() {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
return main_thread_only_;
}
const MainThreadOnly& main_thread_only() const {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
return main_thread_only_;
}
// TODO(altimin): Merge with the one in SequenceManager.
scoped_refptr<AssociatedThreadId> associated_thread_;
MainThreadOnly main_thread_only_;
mutable base::internal::CheckedLock task_runner_lock_;
scoped_refptr<SingleThreadTaskRunner> task_runner_
GUARDED_BY(task_runner_lock_);
WorkDeduplicator work_deduplicator_;
// Can only be set once (just before calling
// work_deduplicator_.BindToCurrentThread()). After that only read access is
// allowed.
std::unique_ptr<MessagePump> pump_;
TaskAnnotator task_annotator_;
#if DCHECK_IS_ON()
const bool log_runloop_quit_and_quit_when_idle_;
bool quit_when_idle_requested_ = false;
#endif
const TickClock* time_source_; // Not owned.
// Non-null provider of id state for identifying distinct work items executed
// by the message loop (task, event, etc.). Cached on the class to avoid TLS
// lookups on task execution.
WorkIdProvider* work_id_provider_ = nullptr;
// Required to register the current thread as a sequence.
base::internal::SequenceLocalStorageMap sequence_local_storage_map_;
std::unique_ptr<
base::internal::ScopedSetSequenceLocalStorageMapForCurrentThread>
scoped_set_sequence_local_storage_map_for_current_thread_;
// Reset at the start of each unit of work to cover the work itself and then
// transition to the next one.
base::Optional<HangWatchScope> hang_watch_scope_;
DISALLOW_COPY_AND_ASSIGN(ThreadControllerWithMessagePumpImpl);
};
} // namespace internal
} // namespace sequence_manager
} // namespace base
#endif // BASE_TASK_SEQUENCE_MANAGER_THREAD_CONTROLLER_WITH_MESSAGE_PUMP_IMPL_H_

View file

@ -0,0 +1,168 @@
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/task/sequence_manager/time_domain.h"
#include "base/task/sequence_manager/associated_thread_id.h"
#include "base/task/sequence_manager/sequence_manager_impl.h"
#include "base/task/sequence_manager/task_queue_impl.h"
#include "base/task/sequence_manager/work_queue.h"
#include "base/threading/thread_checker.h"
namespace base {
namespace sequence_manager {
TimeDomain::TimeDomain()
: sequence_manager_(nullptr),
associated_thread_(MakeRefCounted<internal::AssociatedThreadId>()) {}
TimeDomain::~TimeDomain() {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
}
void TimeDomain::OnRegisterWithSequenceManager(
internal::SequenceManagerImpl* sequence_manager) {
DCHECK(sequence_manager);
DCHECK(!sequence_manager_);
sequence_manager_ = sequence_manager;
associated_thread_ = sequence_manager_->associated_thread();
}
SequenceManager* TimeDomain::sequence_manager() const {
DCHECK(sequence_manager_);
return sequence_manager_;
}
// TODO(kraynov): https://crbug.com/857101 Consider making an interface
// for SequenceManagerImpl which will expose SetNextDelayedDoWork and
// MaybeScheduleImmediateWork methods to make the functions below pure-virtual.
void TimeDomain::SetNextDelayedDoWork(LazyNow* lazy_now, TimeTicks run_time) {
sequence_manager_->SetNextDelayedDoWork(lazy_now, run_time);
}
void TimeDomain::RequestDoWork() {
sequence_manager_->ScheduleWork();
}
void TimeDomain::UnregisterQueue(internal::TaskQueueImpl* queue) {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
DCHECK_EQ(queue->GetTimeDomain(), this);
LazyNow lazy_now(CreateLazyNow());
SetNextWakeUpForQueue(queue, nullopt, internal::WakeUpResolution::kLow,
&lazy_now);
}
void TimeDomain::SetNextWakeUpForQueue(
internal::TaskQueueImpl* queue,
Optional<internal::DelayedWakeUp> wake_up,
internal::WakeUpResolution resolution,
LazyNow* lazy_now) {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
DCHECK_EQ(queue->GetTimeDomain(), this);
DCHECK(queue->IsQueueEnabled() || !wake_up);
Optional<TimeTicks> previous_wake_up;
Optional<internal::WakeUpResolution> previous_queue_resolution;
if (!delayed_wake_up_queue_.empty())
previous_wake_up = delayed_wake_up_queue_.Min().wake_up.time;
if (queue->heap_handle().IsValid()) {
previous_queue_resolution =
delayed_wake_up_queue_.at(queue->heap_handle()).resolution;
}
if (wake_up) {
// Insert a new wake-up into the heap.
if (queue->heap_handle().IsValid()) {
// O(log n)
delayed_wake_up_queue_.ChangeKey(queue->heap_handle(),
{wake_up.value(), resolution, queue});
} else {
// O(log n)
delayed_wake_up_queue_.insert({wake_up.value(), resolution, queue});
}
} else {
// Remove a wake-up from heap if present.
if (queue->heap_handle().IsValid())
delayed_wake_up_queue_.erase(queue->heap_handle());
}
Optional<TimeTicks> new_wake_up;
if (!delayed_wake_up_queue_.empty())
new_wake_up = delayed_wake_up_queue_.Min().wake_up.time;
if (previous_queue_resolution &&
*previous_queue_resolution == internal::WakeUpResolution::kHigh) {
pending_high_res_wake_up_count_--;
}
if (wake_up && resolution == internal::WakeUpResolution::kHigh)
pending_high_res_wake_up_count_++;
DCHECK_GE(pending_high_res_wake_up_count_, 0);
// TODO(kraynov): https://crbug.com/857101 Review the relationship with
// SequenceManager's time. Right now it's not an issue since
// VirtualTimeDomain doesn't invoke SequenceManager itself.
if (new_wake_up == previous_wake_up) {
// Nothing to be done
return;
}
if (!new_wake_up) {
// No new wake-up to be set, cancel the previous one.
new_wake_up = TimeTicks::Max();
}
if (*new_wake_up <= lazy_now->Now()) {
RequestDoWork();
} else {
SetNextDelayedDoWork(lazy_now, *new_wake_up);
}
}
void TimeDomain::MoveReadyDelayedTasksToWorkQueues(LazyNow* lazy_now) {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
// Wake up any queues with pending delayed work. Note std::multimap stores
// the elements sorted by key, so the begin() iterator points to the earliest
// queue to wake-up.
while (!delayed_wake_up_queue_.empty() &&
delayed_wake_up_queue_.Min().wake_up.time <= lazy_now->Now()) {
internal::TaskQueueImpl* queue = delayed_wake_up_queue_.Min().queue;
queue->MoveReadyDelayedTasksToWorkQueue(lazy_now);
}
}
Optional<TimeTicks> TimeDomain::NextScheduledRunTime() const {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
if (delayed_wake_up_queue_.empty())
return nullopt;
return delayed_wake_up_queue_.Min().wake_up.time;
}
void TimeDomain::AsValueInto(trace_event::TracedValue* state) const {
state->BeginDictionary();
state->SetString("name", GetName());
state->SetInteger("registered_delay_count", delayed_wake_up_queue_.size());
if (!delayed_wake_up_queue_.empty()) {
TimeDelta delay = delayed_wake_up_queue_.Min().wake_up.time - Now();
state->SetDouble("next_delay_ms", delay.InMillisecondsF());
}
AsValueIntoInternal(state);
state->EndDictionary();
}
void TimeDomain::AsValueIntoInternal(trace_event::TracedValue* state) const {
// Can be overriden to trace some additional state.
}
bool TimeDomain::HasPendingHighResolutionTasks() const {
return pending_high_res_wake_up_count_;
}
bool TimeDomain::Empty() const {
return delayed_wake_up_queue_.empty();
}
} // namespace sequence_manager
} // namespace base

View file

@ -0,0 +1,160 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TASK_SEQUENCE_MANAGER_TIME_DOMAIN_H_
#define BASE_TASK_SEQUENCE_MANAGER_TIME_DOMAIN_H_
#include <map>
#include "base/callback.h"
#include "base/logging.h"
#include "base/macros.h"
#include "base/task/common/intrusive_heap.h"
#include "base/task/sequence_manager/lazy_now.h"
#include "base/task/sequence_manager/task_queue_impl.h"
#include "base/time/time.h"
namespace base {
namespace sequence_manager {
class SequenceManager;
namespace internal {
class AssociatedThreadId;
class SequenceManagerImpl;
class TaskQueueImpl;
} // namespace internal
// TimeDomain wakes up TaskQueues when their delayed tasks are due to run.
// This class allows overrides to enable clock overriding on some TaskQueues
// (e.g. auto-advancing virtual time, throttled clock, etc).
//
// TaskQueue maintains its own next wake-up time and communicates it
// to the TimeDomain, which aggregates wake-ups across registered TaskQueues
// into a global wake-up, which ultimately gets passed to the ThreadController.
class BASE_EXPORT TimeDomain {
public:
virtual ~TimeDomain();
// Returns LazyNow in TimeDomain's time.
// Can be called from any thread.
// TODO(alexclarke): Make this main thread only.
virtual LazyNow CreateLazyNow() const = 0;
// Evaluates TimeDomain's time.
// Can be called from any thread.
// TODO(alexclarke): Make this main thread only.
virtual TimeTicks Now() const = 0;
// Computes the delay until the time when TimeDomain needs to wake up some
// TaskQueue on the main thread. Specific time domains (e.g. virtual or
// throttled) may return TimeDelta() if TaskQueues have any delayed tasks they
// deem eligible to run. It's also allowed to advance time domains's internal
// clock when this method is called.
// Can be called from main thread only.
// NOTE: |lazy_now| and the return value are in the SequenceManager's time.
virtual Optional<TimeDelta> DelayTillNextTask(LazyNow* lazy_now) = 0;
void AsValueInto(trace_event::TracedValue* state) const;
bool HasPendingHighResolutionTasks() const;
// Returns true if there are no pending delayed tasks.
bool Empty() const;
// This is the signal that virtual time should step forward. If
// RunLoop::QuitWhenIdle has been called then |quit_when_idle_requested| will
// be true. Returns true if there is a task to run now.
virtual bool MaybeFastForwardToNextTask(bool quit_when_idle_requested) = 0;
protected:
TimeDomain();
SequenceManager* sequence_manager() const;
// Returns the earliest scheduled wake up in the TimeDomain's time.
Optional<TimeTicks> NextScheduledRunTime() const;
size_t NumberOfScheduledWakeUps() const {
return delayed_wake_up_queue_.size();
}
// Tells SequenceManager to schedule delayed work, use TimeTicks::Max()
// to unschedule. Also cancels any previous requests.
// May be overriden to control wake ups manually.
virtual void SetNextDelayedDoWork(LazyNow* lazy_now, TimeTicks run_time);
// Tells SequenceManager to schedule immediate work.
// May be overriden to control wake ups manually.
virtual void RequestDoWork();
// For implementation-specific tracing.
virtual void AsValueIntoInternal(trace_event::TracedValue* state) const;
virtual const char* GetName() const = 0;
// Called when the TimeDomain is registered. |sequence_manager| is expected to
// be valid for the duration of TimeDomain's existence.
// TODO(scheduler-dev): Pass SequenceManager in the constructor.
virtual void OnRegisterWithSequenceManager(
internal::SequenceManagerImpl* sequence_manager);
private:
friend class internal::TaskQueueImpl;
friend class internal::SequenceManagerImpl;
friend class TestTimeDomain;
// Schedule TaskQueue to wake up at certain time, repeating calls with
// the same |queue| invalidate previous requests.
// Nullopt |wake_up| cancels a previously set wake up for |queue|.
// NOTE: |lazy_now| is provided in TimeDomain's time.
void SetNextWakeUpForQueue(internal::TaskQueueImpl* queue,
Optional<internal::DelayedWakeUp> wake_up,
internal::WakeUpResolution resolution,
LazyNow* lazy_now);
// Remove the TaskQueue from any internal data sctructures.
void UnregisterQueue(internal::TaskQueueImpl* queue);
// Wake up each TaskQueue where the delay has elapsed. Note this doesn't
// ScheduleWork.
void MoveReadyDelayedTasksToWorkQueues(LazyNow* lazy_now);
struct ScheduledDelayedWakeUp {
internal::DelayedWakeUp wake_up;
internal::WakeUpResolution resolution;
internal::TaskQueueImpl* queue;
bool operator<=(const ScheduledDelayedWakeUp& other) const {
if (wake_up == other.wake_up) {
return static_cast<int>(resolution) <=
static_cast<int>(other.resolution);
}
return wake_up <= other.wake_up;
}
void SetHeapHandle(base::internal::HeapHandle handle) {
DCHECK(handle.IsValid());
queue->set_heap_handle(handle);
}
void ClearHeapHandle() {
DCHECK(queue->heap_handle().IsValid());
queue->set_heap_handle(base::internal::HeapHandle());
}
HeapHandle GetHeapHandle() const { return queue->heap_handle(); }
};
internal::SequenceManagerImpl* sequence_manager_; // Not owned.
base::internal::IntrusiveHeap<ScheduledDelayedWakeUp> delayed_wake_up_queue_;
int pending_high_res_wake_up_count_ = 0;
scoped_refptr<internal::AssociatedThreadId> associated_thread_;
DISALLOW_COPY_AND_ASSIGN(TimeDomain);
};
} // namespace sequence_manager
} // namespace base
#endif // BASE_TASK_SEQUENCE_MANAGER_TIME_DOMAIN_H_

View file

@ -0,0 +1,96 @@
// Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/task/sequence_manager/work_deduplicator.h"
#include <utility>
#include "base/logging.h"
namespace base {
namespace sequence_manager {
namespace internal {
WorkDeduplicator::WorkDeduplicator(
scoped_refptr<AssociatedThreadId> associated_thread)
: associated_thread_(std::move(associated_thread)) {}
WorkDeduplicator::~WorkDeduplicator() = default;
WorkDeduplicator::ShouldScheduleWork WorkDeduplicator::BindToCurrentThread() {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
int previous_flags = state_.fetch_or(kBoundFlag);
DCHECK_EQ(previous_flags & kBoundFlag, 0) << "Can't bind twice!";
return previous_flags & kPendingDoWorkFlag
? ShouldScheduleWork::kScheduleImmediate
: ShouldScheduleWork::kNotNeeded;
}
WorkDeduplicator::ShouldScheduleWork WorkDeduplicator::OnWorkRequested() {
// Set kPendingDoWorkFlag and return true if we were previously kIdle.
return state_.fetch_or(kPendingDoWorkFlag) == State::kIdle
? ShouldScheduleWork::kScheduleImmediate
: ShouldScheduleWork::kNotNeeded;
}
WorkDeduplicator::ShouldScheduleWork WorkDeduplicator::OnDelayedWorkRequested()
const {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
// This must be called on the associated thread or this read is racy.
return state_.load() == State::kIdle ? ShouldScheduleWork::kScheduleImmediate
: ShouldScheduleWork::kNotNeeded;
}
void WorkDeduplicator::OnWorkStarted() {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
DCHECK_EQ(state_.load() & kBoundFlag, kBoundFlag);
// Clear kPendingDoWorkFlag and mark us as in a DoWork.
state_.store(State::kInDoWork);
}
void WorkDeduplicator::WillCheckForMoreWork() {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
DCHECK_EQ(state_.load() & kBoundFlag, kBoundFlag);
// Clear kPendingDoWorkFlag if it was set.
state_.store(State::kInDoWork);
}
WorkDeduplicator::ShouldScheduleWork WorkDeduplicator::DidCheckForMoreWork(
NextTask next_task) {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
DCHECK_EQ(state_.load() & kBoundFlag, kBoundFlag);
last_work_check_result_ = ShouldScheduleWork::kScheduleImmediate;
if (next_task == NextTask::kIsImmediate) {
state_.store(State::kDoWorkPending);
} else {
// Another thread may have set kPendingDoWorkFlag between
// WillCheckForMoreWork() and here, if so we should return
// ShouldScheduleWork::kScheduleImmediate. Otherwise we don't need to
// schedule an immediate continuation.
if (!(state_.fetch_and(~kInDoWorkFlag) & kPendingDoWorkFlag))
last_work_check_result_ = ShouldScheduleWork::kNotNeeded;
}
return last_work_check_result_;
}
void WorkDeduplicator::OnDelayedWorkStarted() {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
OnWorkStarted();
}
WorkDeduplicator::ShouldScheduleWork WorkDeduplicator::OnDelayedWorkEnded(
NextTask next_task) {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
ShouldScheduleWork prev_last_work_check_result = last_work_check_result_;
WorkDeduplicator::ShouldScheduleWork should_schedule_work =
DidCheckForMoreWork(next_task);
if (prev_last_work_check_result == ShouldScheduleWork::kScheduleImmediate) {
prev_last_work_check_result = ShouldScheduleWork::kNotNeeded;
should_schedule_work = ShouldScheduleWork::kNotNeeded;
}
return should_schedule_work;
}
} // namespace internal
} // namespace sequence_manager
} // namespace base

View file

@ -0,0 +1,157 @@
// Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TASK_SEQUENCE_MANAGER_WORK_DEDUPLICATOR_H_
#define BASE_TASK_SEQUENCE_MANAGER_WORK_DEDUPLICATOR_H_
#include <atomic>
#include "base/base_export.h"
#include "base/task/sequence_manager/associated_thread_id.h"
namespace base {
namespace sequence_manager {
namespace internal {
// This class's job is to prevent redundant DoWorks being posted, which are
// expensive. The idea is a DoWork will (maybe) run a task before computing the
// delay till the next task. If the task run posts another task, we don't want
// it to schedule work because the DoWork will post a continuation as needed
// with the latest state taken into consideration (fences, enable / disable
// queue, task cancellation, etc...) Other threads can also post DoWork at any
// time, including while we're computing the delay till the next task. To
// account for that, we have split a DoWork up into two sections:
// [OnWorkStarted .. WillCheckForMoreWork] and
// [WillCheckForMoreWork .. DidCheckForMoreWork] where DidCheckForMoreWork
// detects if another thread called OnWorkRequested.
//
// Nesting is assumed to be dealt with by the ThreadController.
//
// Most methods are thread-affine except for On(Delayed)WorkRequested which are
// is thread-safe.
class BASE_EXPORT WorkDeduplicator {
public:
// Creates an unbound WorkDeduplicator. BindToCurrentThread must be called
// before work can be scheduled.
explicit WorkDeduplicator(
scoped_refptr<AssociatedThreadId> associated_thread);
~WorkDeduplicator();
enum ShouldScheduleWork {
kScheduleImmediate,
kNotNeeded,
};
// Returns ShouldScheduleWork::kSchedule if OnWorkRequested was called while
// unbound. Must be called on the associated thread.
ShouldScheduleWork BindToCurrentThread();
// Returns true if it's OK to schedule a DoWork without risk of task
// duplication. Returns false if:
// * We are unbound
// * We are in a DoWork
// * There is a pending DoWork
//
// Otherwise sets the pending DoWork flag and returns true.
// Can be called on any thread.
//
// DoWork
// ---------------------------------------------------------------------
// | <- OnWorkStarted | |
// | WillCheckForMoreWork -> | |
// | | DidCheckForMoreWork -> |
// ---------------------------------------------------------------------
// ^ ^ ^ ^
// | | | |
// A B C D
//
// Consider a DoWork and calls to OnWorkRequested at various times:
// A: return ShouldScheduleWork::kNotNeeded because there's a pending DoWork.
// B: return ShouldScheduleWork::kNotNeeded because we're in a DoWork.
// C: return ShouldScheduleWork::kNotNeeded because we're in a DoWork, however
// DidCheckForMoreWork should subsequently return
// ShouldScheduleWork::kSchedule.
// D: If DidCheckForMoreWork(kIsImmediate::kIsImmediate) was called then it
// should ShouldScheduleWork::kNotNeeded because there's a pending DoWork.
// Otherwise it should return ShouldScheduleWork::kSchedule, but a
// subsequent call to OnWorkRequested should return
// ShouldScheduleWork::kNotNeeded because there's now a pending DoWork.
ShouldScheduleWork OnWorkRequested();
// Returns ShouldScheduleWork::kSchedule if it's OK to schedule a
// DoDelayedWork without risk of redundancy. Deduplication of delayed work is
// assumed to have been done by the caller, the purpose of this method it to
// check if there's a pending Do(Some)Work which would schedule a delayed
// continuation as needed.
//
// Returns ShouldScheduleWork::kNotNeeded if:
// * We are unbound
// * We are in a DoWork
// * There is a pending DoWork
//
// Must be called on the associated thread.
ShouldScheduleWork OnDelayedWorkRequested() const;
// Marks us as having entered a DoWork, clearing the pending DoWork flag.
// Must be called on the associated thread.
void OnWorkStarted();
// Marks us as being about to check if we have more work. This notification
// helps prevent DoWork duplication in two scenarios:
// * A cross-thread immediate task is posted while we are running a task. If
// the TaskQueue is disabled we can avoid a potentially spurious DoWork.
// * A task is run which posts an immediate task but the ThreadControllerImpl
// work batch size is 2, and there's no further work. The immediate task ran
// in the work batch so we don't need another DoWork.
void WillCheckForMoreWork();
enum NextTask {
kIsImmediate,
kIsDelayed,
};
// Marks us as exiting DoWork. Returns ShouldScheduleWork::kSchedule if an
// immediate DoWork continuation should be posted. This method takes into
// account any OnWorkRequested's called between BeforeComputeDelayTillNextTask
// and here. Must be called on the associated thread.
ShouldScheduleWork DidCheckForMoreWork(NextTask next_task);
// For ThreadControllerWithMessagePumpImpl. The MessagePump calls DoWork and
// DoDelayed work sequentially. If DoWork returns
// ShouldScheduleWork::kSchedule, the pump will call ScheduleWork. We remember
// if DoWork will be scheduled so we don't accidentally call it twice from
// DoDelayedWork. Must be called on the associated thread.
// TODO(alexclarke): Remove these when the DoWork/DoDelayed work merger
// happens.
void OnDelayedWorkStarted();
ShouldScheduleWork OnDelayedWorkEnded(NextTask next_task);
private:
enum Flags {
kInDoWorkFlag = 1 << 0,
kPendingDoWorkFlag = 1 << 1,
kBoundFlag = 1 << 2,
};
enum State {
kUnbound = 0,
kIdle = Flags::kBoundFlag,
kDoWorkPending = Flags::kPendingDoWorkFlag | Flags::kBoundFlag,
kInDoWork = Flags::kInDoWorkFlag | Flags::kBoundFlag,
};
std::atomic<int> state_{State::kUnbound};
scoped_refptr<AssociatedThreadId> associated_thread_;
// TODO(alexclarke): Remove when the DoWork/DoDelayed work merger happens.
ShouldScheduleWork last_work_check_result_ = ShouldScheduleWork::kNotNeeded;
};
} // namespace internal
} // namespace sequence_manager
} // namespace base
#endif // BASE_TASK_SEQUENCE_MANAGER_WORK_DEDUPLICATOR_H_

View file

@ -0,0 +1,320 @@
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/task/sequence_manager/work_queue.h"
#include "base/task/sequence_manager/sequence_manager_impl.h"
#include "base/task/sequence_manager/work_queue_sets.h"
namespace base {
namespace sequence_manager {
namespace internal {
WorkQueue::WorkQueue(TaskQueueImpl* task_queue,
const char* name,
QueueType queue_type)
: task_queue_(task_queue), name_(name), queue_type_(queue_type) {}
void WorkQueue::AsValueInto(TimeTicks now,
trace_event::TracedValue* state) const {
for (const Task& task : tasks_) {
TaskQueueImpl::TaskAsValueInto(task, now, state);
}
}
WorkQueue::~WorkQueue() {
DCHECK(!work_queue_sets_) << task_queue_->GetName() << " : "
<< work_queue_sets_->GetName() << " : " << name_;
}
const Task* WorkQueue::GetFrontTask() const {
if (tasks_.empty())
return nullptr;
return &tasks_.front();
}
const Task* WorkQueue::GetBackTask() const {
if (tasks_.empty())
return nullptr;
return &tasks_.back();
}
bool WorkQueue::BlockedByFence() const {
if (!fence_)
return false;
// If the queue is empty then any future tasks will have a higher enqueue
// order and will be blocked. The queue is also blocked if the head is past
// the fence.
return tasks_.empty() || tasks_.front().enqueue_order() >= fence_;
}
bool WorkQueue::GetFrontTaskEnqueueOrder(EnqueueOrder* enqueue_order) const {
if (tasks_.empty() || BlockedByFence())
return false;
// Quick sanity check.
DCHECK_LE(tasks_.front().enqueue_order(), tasks_.back().enqueue_order())
<< task_queue_->GetName() << " : " << work_queue_sets_->GetName() << " : "
<< name_;
*enqueue_order = tasks_.front().enqueue_order();
return true;
}
void WorkQueue::Push(Task task) {
bool was_empty = tasks_.empty();
#ifndef NDEBUG
DCHECK(task.enqueue_order_set());
#endif
// Make sure the |enqueue_order()| is monotonically increasing.
DCHECK(was_empty || tasks_.back().enqueue_order() < task.enqueue_order());
// Amortized O(1).
tasks_.push_back(std::move(task));
if (!was_empty)
return;
// If we hit the fence, pretend to WorkQueueSets that we're empty.
if (work_queue_sets_ && !BlockedByFence())
work_queue_sets_->OnTaskPushedToEmptyQueue(this);
}
WorkQueue::TaskPusher::TaskPusher(WorkQueue* work_queue)
: work_queue_(work_queue), was_empty_(work_queue->Empty()) {}
WorkQueue::TaskPusher::TaskPusher(TaskPusher&& other)
: work_queue_(other.work_queue_), was_empty_(other.was_empty_) {
other.work_queue_ = nullptr;
}
void WorkQueue::TaskPusher::Push(Task* task) {
DCHECK(work_queue_);
#ifndef NDEBUG
DCHECK(task->enqueue_order_set());
#endif
// Make sure the |enqueue_order()| is monotonically increasing.
DCHECK(work_queue_->tasks_.empty() ||
work_queue_->tasks_.back().enqueue_order() < task->enqueue_order());
// Amortized O(1).
work_queue_->tasks_.push_back(std::move(*task));
}
WorkQueue::TaskPusher::~TaskPusher() {
// If |work_queue_| became non empty and it isn't blocked by a fence then we
// must notify |work_queue_->work_queue_sets_|.
if (was_empty_ && work_queue_ && !work_queue_->Empty() &&
work_queue_->work_queue_sets_ && !work_queue_->BlockedByFence()) {
work_queue_->work_queue_sets_->OnTaskPushedToEmptyQueue(work_queue_);
}
}
WorkQueue::TaskPusher WorkQueue::CreateTaskPusher() {
return TaskPusher(this);
}
void WorkQueue::PushNonNestableTaskToFront(Task task) {
DCHECK(task.nestable == Nestable::kNonNestable);
bool was_empty = tasks_.empty();
bool was_blocked = BlockedByFence();
#ifndef NDEBUG
DCHECK(task.enqueue_order_set());
#endif
if (!was_empty) {
// Make sure the |enqueue_order| is monotonically increasing.
DCHECK_LE(task.enqueue_order(), tasks_.front().enqueue_order())
<< task_queue_->GetName() << " : " << work_queue_sets_->GetName()
<< " : " << name_;
}
// Amortized O(1).
tasks_.push_front(std::move(task));
if (!work_queue_sets_)
return;
// Pretend to WorkQueueSets that nothing has changed if we're blocked.
if (BlockedByFence())
return;
// Pushing task to front may unblock the fence.
if (was_empty || was_blocked) {
work_queue_sets_->OnTaskPushedToEmptyQueue(this);
} else {
work_queue_sets_->OnQueuesFrontTaskChanged(this);
}
}
void WorkQueue::TakeImmediateIncomingQueueTasks() {
DCHECK(tasks_.empty());
task_queue_->TakeImmediateIncomingQueueTasks(&tasks_);
if (tasks_.empty())
return;
// If we hit the fence, pretend to WorkQueueSets that we're empty.
if (work_queue_sets_ && !BlockedByFence())
work_queue_sets_->OnTaskPushedToEmptyQueue(this);
}
Task WorkQueue::TakeTaskFromWorkQueue() {
DCHECK(work_queue_sets_);
DCHECK(!tasks_.empty());
Task pending_task = std::move(tasks_.front());
tasks_.pop_front();
// NB immediate tasks have a different pipeline to delayed ones.
if (tasks_.empty()) {
// NB delayed tasks are inserted via Push, no don't need to reload those.
if (queue_type_ == QueueType::kImmediate) {
// Short-circuit the queue reload so that OnPopMinQueueInSet does the
// right thing.
task_queue_->TakeImmediateIncomingQueueTasks(&tasks_);
}
// Since the queue is empty, now is a good time to consider reducing it's
// capacity if we're wasting memory.
tasks_.MaybeShrinkQueue();
}
DCHECK(work_queue_sets_);
#if DCHECK_IS_ON()
// If diagnostics are on it's possible task queues are being selected at
// random so we can't use the (slightly) more efficient OnPopMinQueueInSet.
work_queue_sets_->OnQueuesFrontTaskChanged(this);
#else
// OnPopMinQueueInSet calls GetFrontTaskEnqueueOrder which checks
// BlockedByFence() so we don't need to here.
work_queue_sets_->OnPopMinQueueInSet(this);
#endif
task_queue_->TraceQueueSize();
return pending_task;
}
bool WorkQueue::RemoveAllCanceledTasksFromFront() {
if (!work_queue_sets_)
return false;
bool task_removed = false;
while (!tasks_.empty() &&
(!tasks_.front().task || tasks_.front().task.IsCancelled())) {
tasks_.pop_front();
task_removed = true;
}
if (task_removed) {
if (tasks_.empty()) {
// NB delayed tasks are inserted via Push, no don't need to reload those.
if (queue_type_ == QueueType::kImmediate) {
// Short-circuit the queue reload so that OnPopMinQueueInSet does the
// right thing.
task_queue_->TakeImmediateIncomingQueueTasks(&tasks_);
}
// Since the queue is empty, now is a good time to consider reducing it's
// capacity if we're wasting memory.
tasks_.MaybeShrinkQueue();
}
// If we have a valid |heap_handle_| (i.e. we're not blocked by a fence or
// disabled) then |work_queue_sets_| needs to be told.
if (heap_handle_.IsValid())
work_queue_sets_->OnQueuesFrontTaskChanged(this);
task_queue_->TraceQueueSize();
}
return task_removed;
}
void WorkQueue::AssignToWorkQueueSets(WorkQueueSets* work_queue_sets) {
work_queue_sets_ = work_queue_sets;
}
void WorkQueue::AssignSetIndex(size_t work_queue_set_index) {
work_queue_set_index_ = work_queue_set_index;
}
bool WorkQueue::InsertFenceImpl(EnqueueOrder fence) {
DCHECK_NE(fence, 0u);
DCHECK(fence >= fence_ || fence == EnqueueOrder::blocking_fence());
bool was_blocked_by_fence = BlockedByFence();
fence_ = fence;
return was_blocked_by_fence;
}
void WorkQueue::InsertFenceSilently(EnqueueOrder fence) {
// Ensure that there is no fence present or a new one blocks queue completely.
DCHECK(!fence_ || fence_ == EnqueueOrder::blocking_fence());
InsertFenceImpl(fence);
}
bool WorkQueue::InsertFence(EnqueueOrder fence) {
bool was_blocked_by_fence = InsertFenceImpl(fence);
if (!work_queue_sets_)
return false;
// Moving the fence forward may unblock some tasks.
if (!tasks_.empty() && was_blocked_by_fence && !BlockedByFence()) {
work_queue_sets_->OnTaskPushedToEmptyQueue(this);
return true;
}
// Fence insertion may have blocked all tasks in this work queue.
if (BlockedByFence())
work_queue_sets_->OnQueueBlocked(this);
return false;
}
bool WorkQueue::RemoveFence() {
bool was_blocked_by_fence = BlockedByFence();
fence_ = EnqueueOrder::none();
if (work_queue_sets_ && !tasks_.empty() && was_blocked_by_fence) {
work_queue_sets_->OnTaskPushedToEmptyQueue(this);
return true;
}
return false;
}
bool WorkQueue::ShouldRunBefore(const WorkQueue* other_queue) const {
DCHECK(!tasks_.empty());
DCHECK(!other_queue->tasks_.empty());
EnqueueOrder enqueue_order;
EnqueueOrder other_enqueue_order;
bool have_task = GetFrontTaskEnqueueOrder(&enqueue_order);
bool have_other_task =
other_queue->GetFrontTaskEnqueueOrder(&other_enqueue_order);
DCHECK(have_task);
DCHECK(have_other_task);
return enqueue_order < other_enqueue_order;
}
void WorkQueue::MaybeShrinkQueue() {
tasks_.MaybeShrinkQueue();
}
void WorkQueue::DeletePendingTasks() {
tasks_.clear();
if (work_queue_sets_ && heap_handle().IsValid())
work_queue_sets_->OnQueuesFrontTaskChanged(this);
DCHECK(!heap_handle_.IsValid());
}
void WorkQueue::PopTaskForTesting() {
if (tasks_.empty())
return;
tasks_.pop_front();
}
void WorkQueue::CollectTasksOlderThan(EnqueueOrder reference,
std::vector<const Task*>* result) const {
for (const Task& task : tasks_) {
if (task.enqueue_order() >= reference)
break;
result->push_back(&task);
}
}
} // namespace internal
} // namespace sequence_manager
} // namespace base

View file

@ -0,0 +1,192 @@
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TASK_SEQUENCE_MANAGER_WORK_QUEUE_H_
#define BASE_TASK_SEQUENCE_MANAGER_WORK_QUEUE_H_
#include "base/base_export.h"
#include "base/task/common/intrusive_heap.h"
#include "base/task/sequence_manager/enqueue_order.h"
#include "base/task/sequence_manager/sequenced_task_source.h"
#include "base/task/sequence_manager/task_queue_impl.h"
#include "base/trace_event/trace_event.h"
#include "base/trace_event/traced_value.h"
namespace base {
namespace sequence_manager {
namespace internal {
class WorkQueueSets;
// This class keeps track of immediate and delayed tasks which are due to run
// now. It interfaces deeply with WorkQueueSets which keeps track of which queue
// (with a given priority) contains the oldest task.
//
// If a fence is inserted, WorkQueue behaves normally up until
// TakeTaskFromWorkQueue reaches or exceeds the fence. At that point it the
// API subset used by WorkQueueSets pretends the WorkQueue is empty until the
// fence is removed. This functionality is a primitive intended for use by
// throttling mechanisms.
class BASE_EXPORT WorkQueue {
public:
using QueueType = internal::TaskQueueImpl::WorkQueueType;
// Note |task_queue| can be null if queue_type is kNonNestable.
WorkQueue(TaskQueueImpl* task_queue, const char* name, QueueType queue_type);
~WorkQueue();
// Associates this work queue with the given work queue sets. This must be
// called before any tasks can be inserted into this work queue.
void AssignToWorkQueueSets(WorkQueueSets* work_queue_sets);
// Assigns the current set index.
void AssignSetIndex(size_t work_queue_set_index);
void AsValueInto(TimeTicks now, trace_event::TracedValue* state) const;
// Returns true if the |tasks_| is empty. This method ignores any fences.
bool Empty() const { return tasks_.empty(); }
// If the |tasks_| isn't empty and a fence hasn't been reached,
// |enqueue_order| gets set to the enqueue order of the front task and the
// function returns true. Otherwise the function returns false.
bool GetFrontTaskEnqueueOrder(EnqueueOrder* enqueue_order) const;
// Returns the first task in this queue or null if the queue is empty. This
// method ignores any fences.
const Task* GetFrontTask() const;
// Returns the last task in this queue or null if the queue is empty. This
// method ignores any fences.
const Task* GetBackTask() const;
// Pushes the task onto the |tasks_| and if a fence hasn't been reached
// it informs the WorkQueueSets if the head changed.
void Push(Task task);
// RAII helper that helps efficiently push N Tasks to a WorkQueue.
class BASE_EXPORT TaskPusher {
public:
TaskPusher(const TaskPusher&) = delete;
TaskPusher(TaskPusher&& other);
~TaskPusher();
void Push(Task* task);
private:
friend class WorkQueue;
explicit TaskPusher(WorkQueue* work_queue);
WorkQueue* work_queue_;
const bool was_empty_;
};
// Returns an RAII helper to efficiently push multiple tasks.
TaskPusher CreateTaskPusher();
// Pushes the task onto the front of the |tasks_| and if it's before any
// fence it informs the WorkQueueSets the head changed. Use with caution this
// API can easily lead to task starvation if misused.
void PushNonNestableTaskToFront(Task task);
// Reloads the empty |tasks_| with
// |task_queue_->TakeImmediateIncomingQueue| and if a fence hasn't been
// reached it informs the WorkQueueSets if the head changed.
void TakeImmediateIncomingQueueTasks();
size_t Size() const { return tasks_.size(); }
size_t Capacity() const { return tasks_.capacity(); }
// Pulls a task off the |tasks_| and informs the WorkQueueSets. If the
// task removed had an enqueue order >= the current fence then WorkQueue
// pretends to be empty as far as the WorkQueueSets is concerned.
Task TakeTaskFromWorkQueue();
// Removes all canceled tasks from the head of the list. Returns true if any
// tasks were removed.
bool RemoveAllCanceledTasksFromFront();
const char* name() const { return name_; }
TaskQueueImpl* task_queue() const { return task_queue_; }
WorkQueueSets* work_queue_sets() const { return work_queue_sets_; }
size_t work_queue_set_index() const { return work_queue_set_index_; }
base::internal::HeapHandle heap_handle() const { return heap_handle_; }
void set_heap_handle(base::internal::HeapHandle handle) {
heap_handle_ = handle;
}
QueueType queue_type() const { return queue_type_; }
// Returns true if the front task in this queue has an older enqueue order
// than the front task of |other_queue|. Both queue are assumed to be
// non-empty. This method ignores any fences.
bool ShouldRunBefore(const WorkQueue* other_queue) const;
// Submit a fence. When TakeTaskFromWorkQueue encounters a task whose
// enqueue_order is >= |fence| then the WorkQueue will start pretending to be.
// empty.
// Inserting a fence may supersede a previous one and unblock some tasks.
// Returns true if any tasks where unblocked, returns false otherwise.
bool InsertFence(EnqueueOrder fence);
// Submit a fence without triggering a WorkQueueSets notification.
// Caller must ensure that WorkQueueSets are properly updated.
// This method should not be called when a fence is already present.
void InsertFenceSilently(EnqueueOrder fence);
// Removes any fences that where added and if WorkQueue was pretending to be
// empty, then the real value is reported to WorkQueueSets. Returns true if
// any tasks where unblocked.
bool RemoveFence();
// Returns true if any tasks are blocked by the fence. Returns true if the
// queue is empty and fence has been set (i.e. future tasks would be blocked).
// Otherwise returns false.
bool BlockedByFence() const;
// Shrinks |tasks_| if it's wasting memory.
void MaybeShrinkQueue();
// Delete all tasks within this WorkQueue.
void DeletePendingTasks();
// Test support function. This should not be used in production code.
void PopTaskForTesting();
// Iterates through |tasks_| adding any that are older than |reference| to
// |result|.
void CollectTasksOlderThan(EnqueueOrder reference,
std::vector<const Task*>* result) const;
private:
bool InsertFenceImpl(EnqueueOrder fence);
TaskQueueImpl::TaskDeque tasks_;
WorkQueueSets* work_queue_sets_ = nullptr; // NOT OWNED.
TaskQueueImpl* const task_queue_; // NOT OWNED.
size_t work_queue_set_index_ = 0;
// Iff the queue isn't empty (or appearing to be empty due to a fence) then
// |heap_handle_| will be valid and correspond to this queue's location within
// an IntrusiveHeap inside the WorkQueueSet.
base::internal::HeapHandle heap_handle_;
const char* const name_;
EnqueueOrder fence_;
const QueueType queue_type_;
DISALLOW_COPY_AND_ASSIGN(WorkQueue);
};
} // namespace internal
} // namespace sequence_manager
} // namespace base
#endif // BASE_TASK_SEQUENCE_MANAGER_WORK_QUEUE_H_

View file

@ -0,0 +1,255 @@
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/task/sequence_manager/work_queue_sets.h"
#include "base/logging.h"
namespace base {
namespace sequence_manager {
namespace internal {
WorkQueueSets::WorkQueueSets(const char* name,
Observer* observer,
const SequenceManager::Settings& settings)
: name_(name),
#if DCHECK_IS_ON()
last_rand_(settings.random_task_selection_seed),
#endif
observer_(observer) {
}
WorkQueueSets::~WorkQueueSets() = default;
void WorkQueueSets::AddQueue(WorkQueue* work_queue, size_t set_index) {
DCHECK(!work_queue->work_queue_sets());
DCHECK_LT(set_index, work_queue_heaps_.size());
DCHECK(!work_queue->heap_handle().IsValid());
EnqueueOrder enqueue_order;
bool has_enqueue_order = work_queue->GetFrontTaskEnqueueOrder(&enqueue_order);
work_queue->AssignToWorkQueueSets(this);
work_queue->AssignSetIndex(set_index);
if (!has_enqueue_order)
return;
bool was_empty = work_queue_heaps_[set_index].empty();
work_queue_heaps_[set_index].insert({enqueue_order, work_queue});
if (was_empty)
observer_->WorkQueueSetBecameNonEmpty(set_index);
}
void WorkQueueSets::RemoveQueue(WorkQueue* work_queue) {
DCHECK_EQ(this, work_queue->work_queue_sets());
work_queue->AssignToWorkQueueSets(nullptr);
if (!work_queue->heap_handle().IsValid())
return;
size_t set_index = work_queue->work_queue_set_index();
DCHECK_LT(set_index, work_queue_heaps_.size());
work_queue_heaps_[set_index].erase(work_queue->heap_handle());
if (work_queue_heaps_[set_index].empty())
observer_->WorkQueueSetBecameEmpty(set_index);
DCHECK(!work_queue->heap_handle().IsValid());
}
void WorkQueueSets::ChangeSetIndex(WorkQueue* work_queue, size_t set_index) {
DCHECK_EQ(this, work_queue->work_queue_sets());
DCHECK_LT(set_index, work_queue_heaps_.size());
EnqueueOrder enqueue_order;
bool has_enqueue_order = work_queue->GetFrontTaskEnqueueOrder(&enqueue_order);
size_t old_set = work_queue->work_queue_set_index();
DCHECK_LT(old_set, work_queue_heaps_.size());
DCHECK_NE(old_set, set_index);
work_queue->AssignSetIndex(set_index);
DCHECK_EQ(has_enqueue_order, work_queue->heap_handle().IsValid());
if (!has_enqueue_order)
return;
work_queue_heaps_[old_set].erase(work_queue->heap_handle());
bool was_empty = work_queue_heaps_[set_index].empty();
work_queue_heaps_[set_index].insert({enqueue_order, work_queue});
if (work_queue_heaps_[old_set].empty())
observer_->WorkQueueSetBecameEmpty(old_set);
if (was_empty)
observer_->WorkQueueSetBecameNonEmpty(set_index);
}
void WorkQueueSets::OnQueuesFrontTaskChanged(WorkQueue* work_queue) {
EnqueueOrder enqueue_order;
size_t set_index = work_queue->work_queue_set_index();
DCHECK_EQ(this, work_queue->work_queue_sets());
DCHECK_LT(set_index, work_queue_heaps_.size());
DCHECK(work_queue->heap_handle().IsValid());
DCHECK(!work_queue_heaps_[set_index].empty()) << " set_index = " << set_index;
if (work_queue->GetFrontTaskEnqueueOrder(&enqueue_order)) {
// O(log n)
work_queue_heaps_[set_index].ChangeKey(work_queue->heap_handle(),
{enqueue_order, work_queue});
} else {
// O(log n)
work_queue_heaps_[set_index].erase(work_queue->heap_handle());
DCHECK(!work_queue->heap_handle().IsValid());
if (work_queue_heaps_[set_index].empty())
observer_->WorkQueueSetBecameEmpty(set_index);
}
}
void WorkQueueSets::OnTaskPushedToEmptyQueue(WorkQueue* work_queue) {
// NOTE if this function changes, we need to keep |WorkQueueSets::AddQueue| in
// sync.
DCHECK_EQ(this, work_queue->work_queue_sets());
EnqueueOrder enqueue_order;
bool has_enqueue_order = work_queue->GetFrontTaskEnqueueOrder(&enqueue_order);
DCHECK(has_enqueue_order);
size_t set_index = work_queue->work_queue_set_index();
DCHECK_LT(set_index, work_queue_heaps_.size())
<< " set_index = " << set_index;
// |work_queue| should not be in work_queue_heaps_[set_index].
DCHECK(!work_queue->heap_handle().IsValid());
bool was_empty = work_queue_heaps_[set_index].empty();
work_queue_heaps_[set_index].insert({enqueue_order, work_queue});
if (was_empty)
observer_->WorkQueueSetBecameNonEmpty(set_index);
}
void WorkQueueSets::OnPopMinQueueInSet(WorkQueue* work_queue) {
// Assume that |work_queue| contains the lowest enqueue_order.
size_t set_index = work_queue->work_queue_set_index();
DCHECK_EQ(this, work_queue->work_queue_sets());
DCHECK_LT(set_index, work_queue_heaps_.size());
DCHECK(!work_queue_heaps_[set_index].empty()) << " set_index = " << set_index;
DCHECK_EQ(work_queue_heaps_[set_index].Min().value, work_queue)
<< " set_index = " << set_index;
DCHECK(work_queue->heap_handle().IsValid());
EnqueueOrder enqueue_order;
if (work_queue->GetFrontTaskEnqueueOrder(&enqueue_order)) {
// O(log n)
work_queue_heaps_[set_index].ReplaceMin({enqueue_order, work_queue});
} else {
// O(log n)
work_queue_heaps_[set_index].Pop();
DCHECK(!work_queue->heap_handle().IsValid());
DCHECK(work_queue_heaps_[set_index].empty() ||
work_queue_heaps_[set_index].Min().value != work_queue);
if (work_queue_heaps_[set_index].empty()) {
observer_->WorkQueueSetBecameEmpty(set_index);
}
}
}
void WorkQueueSets::OnQueueBlocked(WorkQueue* work_queue) {
DCHECK_EQ(this, work_queue->work_queue_sets());
base::internal::HeapHandle heap_handle = work_queue->heap_handle();
if (!heap_handle.IsValid())
return;
size_t set_index = work_queue->work_queue_set_index();
DCHECK_LT(set_index, work_queue_heaps_.size());
work_queue_heaps_[set_index].erase(heap_handle);
if (work_queue_heaps_[set_index].empty())
observer_->WorkQueueSetBecameEmpty(set_index);
}
WorkQueue* WorkQueueSets::GetOldestQueueInSet(size_t set_index) const {
DCHECK_LT(set_index, work_queue_heaps_.size());
if (work_queue_heaps_[set_index].empty())
return nullptr;
WorkQueue* queue = work_queue_heaps_[set_index].Min().value;
DCHECK_EQ(set_index, queue->work_queue_set_index());
DCHECK(queue->heap_handle().IsValid());
return queue;
}
WorkQueue* WorkQueueSets::GetOldestQueueAndEnqueueOrderInSet(
size_t set_index,
EnqueueOrder* out_enqueue_order) const {
DCHECK_LT(set_index, work_queue_heaps_.size());
if (work_queue_heaps_[set_index].empty())
return nullptr;
const OldestTaskEnqueueOrder& oldest = work_queue_heaps_[set_index].Min();
DCHECK(oldest.value->heap_handle().IsValid());
*out_enqueue_order = oldest.key;
EnqueueOrder enqueue_order;
DCHECK(oldest.value->GetFrontTaskEnqueueOrder(&enqueue_order) &&
oldest.key == enqueue_order);
return oldest.value;
}
#if DCHECK_IS_ON()
WorkQueue* WorkQueueSets::GetRandomQueueInSet(size_t set_index) const {
DCHECK_LT(set_index, work_queue_heaps_.size());
if (work_queue_heaps_[set_index].empty())
return nullptr;
WorkQueue* queue =
work_queue_heaps_[set_index]
.begin()[Random() % work_queue_heaps_[set_index].size()]
.value;
DCHECK_EQ(set_index, queue->work_queue_set_index());
DCHECK(queue->heap_handle().IsValid());
return queue;
}
WorkQueue* WorkQueueSets::GetRandomQueueAndEnqueueOrderInSet(
size_t set_index,
EnqueueOrder* out_enqueue_order) const {
DCHECK_LT(set_index, work_queue_heaps_.size());
if (work_queue_heaps_[set_index].empty())
return nullptr;
const OldestTaskEnqueueOrder& chosen =
work_queue_heaps_[set_index]
.begin()[Random() % work_queue_heaps_[set_index].size()];
*out_enqueue_order = chosen.key;
EnqueueOrder enqueue_order;
DCHECK(chosen.value->GetFrontTaskEnqueueOrder(&enqueue_order) &&
chosen.key == enqueue_order);
return chosen.value;
}
#endif
bool WorkQueueSets::IsSetEmpty(size_t set_index) const {
DCHECK_LT(set_index, work_queue_heaps_.size())
<< " set_index = " << set_index;
return work_queue_heaps_[set_index].empty();
}
#if DCHECK_IS_ON() || !defined(NDEBUG)
bool WorkQueueSets::ContainsWorkQueueForTest(
const WorkQueue* work_queue) const {
EnqueueOrder enqueue_order;
bool has_enqueue_order = work_queue->GetFrontTaskEnqueueOrder(&enqueue_order);
for (const base::internal::IntrusiveHeap<OldestTaskEnqueueOrder>& heap :
work_queue_heaps_) {
for (const OldestTaskEnqueueOrder& heap_value_pair : heap) {
if (heap_value_pair.value == work_queue) {
DCHECK(has_enqueue_order);
DCHECK_EQ(heap_value_pair.key, enqueue_order);
DCHECK_EQ(this, work_queue->work_queue_sets());
return true;
}
}
}
if (work_queue->work_queue_sets() == this) {
DCHECK(!has_enqueue_order);
return true;
}
return false;
}
#endif
void WorkQueueSets::CollectSkippedOverLowerPriorityTasks(
const internal::WorkQueue* selected_work_queue,
std::vector<const Task*>* result) const {
EnqueueOrder selected_enqueue_order;
CHECK(selected_work_queue->GetFrontTaskEnqueueOrder(&selected_enqueue_order));
for (size_t priority = selected_work_queue->work_queue_set_index() + 1;
priority < TaskQueue::kQueuePriorityCount; priority++) {
for (const OldestTaskEnqueueOrder& pair : work_queue_heaps_[priority]) {
pair.value->CollectTasksOlderThan(selected_enqueue_order, result);
}
}
}
} // namespace internal
} // namespace sequence_manager
} // namespace base

View file

@ -0,0 +1,162 @@
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TASK_SEQUENCE_MANAGER_WORK_QUEUE_SETS_H_
#define BASE_TASK_SEQUENCE_MANAGER_WORK_QUEUE_SETS_H_
#include <array>
#include <map>
#include "base/base_export.h"
#include "base/logging.h"
#include "base/macros.h"
#include "base/task/common/intrusive_heap.h"
#include "base/task/sequence_manager/sequence_manager.h"
#include "base/task/sequence_manager/task_queue_impl.h"
#include "base/task/sequence_manager/work_queue.h"
#include "base/trace_event/traced_value.h"
namespace base {
namespace sequence_manager {
namespace internal {
// There is a WorkQueueSet for each scheduler priority and each WorkQueueSet
// uses a EnqueueOrderToWorkQueueMap to keep track of which queue in the set has
// the oldest task (i.e. the one that should be run next if the
// TaskQueueSelector chooses to run a task a given priority). The reason this
// works is because std::map is a tree based associative container and all the
// values are kept in sorted order.
class BASE_EXPORT WorkQueueSets {
public:
class Observer {
public:
virtual ~Observer() {}
virtual void WorkQueueSetBecameEmpty(size_t set_index) = 0;
virtual void WorkQueueSetBecameNonEmpty(size_t set_index) = 0;
};
WorkQueueSets(const char* name,
Observer* observer,
const SequenceManager::Settings& settings);
~WorkQueueSets();
// O(log num queues)
void AddQueue(WorkQueue* queue, size_t set_index);
// O(log num queues)
void RemoveQueue(WorkQueue* work_queue);
// O(log num queues)
void ChangeSetIndex(WorkQueue* queue, size_t set_index);
// O(log num queues)
void OnQueuesFrontTaskChanged(WorkQueue* queue);
// O(log num queues)
void OnTaskPushedToEmptyQueue(WorkQueue* work_queue);
// If empty it's O(1) amortized, otherwise it's O(log num queues). Slightly
// faster on average than OnQueuesFrontTaskChanged.
// Assumes |work_queue| contains the lowest enqueue order in the set.
void OnPopMinQueueInSet(WorkQueue* work_queue);
// O(log num queues)
void OnQueueBlocked(WorkQueue* work_queue);
// O(1)
WorkQueue* GetOldestQueueInSet(size_t set_index) const;
// O(1)
WorkQueue* GetOldestQueueAndEnqueueOrderInSet(
size_t set_index,
EnqueueOrder* out_enqueue_order) const;
#if DCHECK_IS_ON()
// O(1)
WorkQueue* GetRandomQueueInSet(size_t set_index) const;
// O(1)
WorkQueue* GetRandomQueueAndEnqueueOrderInSet(
size_t set_index,
EnqueueOrder* out_enqueue_order) const;
#endif
// O(1)
bool IsSetEmpty(size_t set_index) const;
#if DCHECK_IS_ON() || !defined(NDEBUG)
// Note this iterates over everything in |work_queue_heaps_|.
// It's intended for use with DCHECKS and for testing
bool ContainsWorkQueueForTest(const WorkQueue* queue) const;
#endif
const char* GetName() const { return name_; }
// Collects ready tasks which where skipped over when |selected_work_queue|
// was selected. Note this is somewhat expensive.
void CollectSkippedOverLowerPriorityTasks(
const internal::WorkQueue* selected_work_queue,
std::vector<const Task*>* result) const;
private:
struct OldestTaskEnqueueOrder {
EnqueueOrder key;
WorkQueue* value;
bool operator<=(const OldestTaskEnqueueOrder& other) const {
return key <= other.key;
}
void SetHeapHandle(base::internal::HeapHandle handle) {
value->set_heap_handle(handle);
}
void ClearHeapHandle() {
value->set_heap_handle(base::internal::HeapHandle());
}
HeapHandle GetHeapHandle() const { return value->heap_handle(); }
};
const char* const name_;
// For each set |work_queue_heaps_| has a queue of WorkQueue ordered by the
// oldest task in each WorkQueue.
std::array<base::internal::IntrusiveHeap<OldestTaskEnqueueOrder>,
TaskQueue::kQueuePriorityCount>
work_queue_heaps_;
#if DCHECK_IS_ON()
static inline uint64_t MurmurHash3(uint64_t value) {
value ^= value >> 33;
value *= uint64_t{0xFF51AFD7ED558CCD};
value ^= value >> 33;
value *= uint64_t{0xC4CEB9FE1A85EC53};
value ^= value >> 33;
return value;
}
// This is for a debugging feature which lets us randomize task selection. Its
// not for production use.
// TODO(alexclarke): Use a seedable PRNG from ::base if one is added.
uint64_t Random() const {
last_rand_ = MurmurHash3(last_rand_);
return last_rand_;
}
mutable uint64_t last_rand_;
#endif
Observer* const observer_;
DISALLOW_COPY_AND_ASSIGN(WorkQueueSets);
};
} // namespace internal
} // namespace sequence_manager
} // namespace base
#endif // BASE_TASK_SEQUENCE_MANAGER_WORK_QUEUE_SETS_H_