Repo created

This commit is contained in:
Fr4nz D13trich 2025-11-22 14:04:28 +01:00
parent 81b91f4139
commit f8c34fa5ee
22732 changed files with 4815320 additions and 2 deletions

View file

@ -0,0 +1,10 @@
etiennep@chromium.org
fdoray@chromium.org
gab@chromium.org
robliao@chromium.org
altimin@chromium.org
carlscab@google.com
skyostil@chromium.org
# TEAM: scheduler-dev@chromium.org
# COMPONENT: Internals>TaskScheduling

View file

@ -0,0 +1,14 @@
This directory has the following layout:
- base/task/: public APIs for posting tasks and managing task queues.
- base/task/thread_pool/: implementation of the ThreadPool.
- base/task/sequence_manager/: implementation of the SequenceManager.
Apart from embedders explicitly managing a ThreadPoolInstance and/or
SequenceManager instance(s) for their process/threads, the vast majority of
users should only need APIs in base/task/.
Documentation:
* [Threading and tasks](/docs/threading_and_tasks.md)
* [Callbacks](/docs/callback.md)
* [Vision for future API changes](https://docs.google.com/document/d/1pySz2xeJ6kLlbzDnS2jqAC1F8T_6pLEV8pgaMfURXAw/edit)

View file

@ -0,0 +1,275 @@
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/task/cancelable_task_tracker.h"
#include <stddef.h>
#include <utility>
#include "base/bind.h"
#include "base/bind_helpers.h"
#include "base/callback_helpers.h"
#include "base/feature_list.h"
#include "base/location.h"
#include "base/memory/ref_counted.h"
#include "base/metrics/histogram_macros.h"
#include "base/sequenced_task_runner.h"
#include "base/task/scoped_set_task_priority_for_current_thread.h"
#include "base/task_runner.h"
#include "base/threading/sequenced_task_runner_handle.h"
namespace base {
namespace {
void RunOrPostToTaskRunner(scoped_refptr<SequencedTaskRunner> task_runner,
OnceClosure closure) {
if (task_runner->RunsTasksInCurrentSequence())
std::move(closure).Run();
else
task_runner->PostTask(FROM_HERE, std::move(closure));
}
// TODO(https://crbug.com/1009795): Remove these once we have established
// whether off-sequence cancelation is worthwhile.
// These values are persisted to logs. Entries should not be renumbered and
// numeric values should never be reused.
enum class TaskStatus {
kSameSequenceLive = 0,
kOffSequenceLive = 1,
kSameSequenceCanceled = 2,
kOffSequenceCanceled = 3,
kMaxValue = kOffSequenceCanceled,
};
void UmaRecordTaskDuration(bool same_sequence,
bool background,
bool canceled,
TimeDelta duration) {
#define DECLARE_HISTOGRAM(suffix) \
Histogram::FactoryTimeGet( \
"Scheduler.CancelableTaskTracker.TaskDuration2_" suffix, \
TimeDelta::FromMilliseconds(1), TimeDelta::FromSeconds(10), 50, \
Histogram::kUmaTargetedHistogramFlag)
static HistogramBase* histograms[] = {
DECLARE_HISTOGRAM("LiveForegroundOffSequence"),
DECLARE_HISTOGRAM("LiveForegroundSameSequence"),
DECLARE_HISTOGRAM("LiveBackgroundOffSequence"),
DECLARE_HISTOGRAM("LiveBackgroundSameSequence"),
DECLARE_HISTOGRAM("CanceledForegroundOffSequence"),
DECLARE_HISTOGRAM("CanceledForegroundSameSequence"),
DECLARE_HISTOGRAM("CanceledBackgroundOffSequence"),
DECLARE_HISTOGRAM("CanceledBackgroundSameSequence")};
int i = (same_sequence ? 1 : 0) + (background ? 2 : 0) + (canceled ? 4 : 0);
histograms[i]->AddTimeMillisecondsGranularity(duration);
}
const base::Feature kAllowOffSequenceTaskCancelation{
"AllowOffSequenceTaskCancelation", base::FEATURE_ENABLED_BY_DEFAULT};
bool AllowOffSequenceTaskCancelation() {
if (!base::FeatureList::GetInstance())
return true;
return base::FeatureList::IsEnabled(kAllowOffSequenceTaskCancelation);
}
} // namespace
// static
const CancelableTaskTracker::TaskId CancelableTaskTracker::kBadTaskId = 0;
CancelableTaskTracker::CancelableTaskTracker() {
weak_this_ = weak_factory_.GetWeakPtr();
}
CancelableTaskTracker::~CancelableTaskTracker() {
DCHECK(sequence_checker_.CalledOnValidSequence());
TryCancelAll();
}
CancelableTaskTracker::TaskId CancelableTaskTracker::PostTask(
TaskRunner* task_runner,
const Location& from_here,
OnceClosure task) {
DCHECK(sequence_checker_.CalledOnValidSequence());
CHECK(weak_this_);
return PostTaskAndReply(task_runner, from_here, std::move(task), DoNothing());
}
CancelableTaskTracker::TaskId CancelableTaskTracker::PostTaskAndReply(
TaskRunner* task_runner,
const Location& from_here,
OnceClosure task,
OnceClosure reply) {
DCHECK(sequence_checker_.CalledOnValidSequence());
CHECK(weak_this_);
// We need a SequencedTaskRunnerHandle to run |reply|.
DCHECK(SequencedTaskRunnerHandle::IsSet());
auto flag = MakeRefCounted<TaskCancellationFlag>();
TaskId id = next_id_;
next_id_++; // int64_t is big enough that we ignore the potential overflow.
// Unretained(this) is safe because |flag| will have been set to the
// "canceled" state after |this| is deleted.
OnceClosure untrack_closure =
BindOnce(&CancelableTaskTracker::Untrack, Unretained(this), id);
bool success = task_runner->PostTaskAndReply(
from_here,
BindOnce(&RunIfNotCanceled, SequencedTaskRunnerHandle::Get(), flag,
std::move(task)),
BindOnce(&RunThenUntrackIfNotCanceled, SequencedTaskRunnerHandle::Get(),
flag, std::move(reply), std::move(untrack_closure)));
if (!success)
return kBadTaskId;
Track(id, std::move(flag));
return id;
}
CancelableTaskTracker::TaskId CancelableTaskTracker::NewTrackedTaskId(
IsCanceledCallback* is_canceled_cb) {
DCHECK(sequence_checker_.CalledOnValidSequence());
DCHECK(SequencedTaskRunnerHandle::IsSet());
TaskId id = next_id_;
next_id_++; // int64_t is big enough that we ignore the potential overflow.
auto flag = MakeRefCounted<TaskCancellationFlag>();
// Unretained(this) is safe because |flag| will have been set to the
// "canceled" state after |this| is deleted.
OnceClosure untrack_closure =
BindOnce(&CancelableTaskTracker::Untrack, Unretained(this), id);
// Will always run |untrack_closure| on current sequence.
ScopedClosureRunner untrack_runner(
BindOnce(&RunOrPostToTaskRunner, SequencedTaskRunnerHandle::Get(),
BindOnce(&RunIfNotCanceled, SequencedTaskRunnerHandle::Get(),
flag, std::move(untrack_closure))));
*is_canceled_cb = BindRepeating(&IsCanceled, SequencedTaskRunnerHandle::Get(),
flag, std::move(untrack_runner));
Track(id, std::move(flag));
return id;
}
void CancelableTaskTracker::TryCancel(TaskId id) {
DCHECK(sequence_checker_.CalledOnValidSequence());
const auto it = task_flags_.find(id);
if (it == task_flags_.end()) {
// Two possibilities:
//
// 1. The task has already been untracked.
// 2. The TaskId is bad or unknown.
//
// Since this function is best-effort, it's OK to ignore these.
return;
}
it->second->data.Set();
// Remove |id| from |task_flags_| immediately, since we have no further
// use for tracking it. This allows the reply closures (see
// PostTaskAndReply()) for cancelled tasks to be skipped, since they have
// no clean-up to perform.
task_flags_.erase(it);
}
void CancelableTaskTracker::TryCancelAll() {
DCHECK(sequence_checker_.CalledOnValidSequence());
for (const auto& it : task_flags_)
it.second->data.Set();
task_flags_.clear();
}
bool CancelableTaskTracker::HasTrackedTasks() const {
DCHECK(sequence_checker_.CalledOnValidSequence());
return !task_flags_.empty();
}
// static
void CancelableTaskTracker::RunIfNotCanceled(
const scoped_refptr<SequencedTaskRunner>& origin_task_runner,
const scoped_refptr<TaskCancellationFlag>& flag,
OnceClosure task) {
// TODO(https://crbug.com/1009795): Record durations for executed tasks,
// correlated with whether the task runs on a background or foreground
// sequence, and whether it is the same sequence as the CancelableTaskTracker.
// Also correlate with whether the task was run despite being canceled, to
// allow an experiment to assess the value of off-sequence cancelation.
// Record canceled & off-sequence status for all tasks.
const bool was_canceled = flag->data.IsSet();
const bool same_sequence = origin_task_runner->RunsTasksInCurrentSequence();
const TaskStatus task_status =
was_canceled ? (same_sequence ? TaskStatus::kSameSequenceCanceled
: TaskStatus::kOffSequenceCanceled)
: (same_sequence ? TaskStatus::kSameSequenceLive
: TaskStatus::kOffSequenceLive);
UMA_HISTOGRAM_ENUMERATION("Scheduler.CancelableTaskTracker.TaskStatus",
task_status);
// Skip tasks if they are canceled, taking into account the off-sequence
// cancelation experiment.
const bool skip_task =
was_canceled && (AllowOffSequenceTaskCancelation() || same_sequence);
if (skip_task)
return;
// Run the task and record its duration.
const TimeTicks before_task_ticks = TimeTicks::Now();
std::move(task).Run();
const TimeDelta duration = TimeTicks::Now() - before_task_ticks;
const bool is_background =
internal::GetTaskPriorityForCurrentThread() < TaskPriority::USER_VISIBLE;
UmaRecordTaskDuration(same_sequence, is_background, was_canceled, duration);
}
// static
void CancelableTaskTracker::RunThenUntrackIfNotCanceled(
const scoped_refptr<SequencedTaskRunner>& origin_task_runner,
const scoped_refptr<TaskCancellationFlag>& flag,
OnceClosure task,
OnceClosure untrack) {
RunIfNotCanceled(origin_task_runner, flag, std::move(task));
RunIfNotCanceled(origin_task_runner, flag, std::move(untrack));
}
// static
bool CancelableTaskTracker::IsCanceled(
const scoped_refptr<SequencedTaskRunner>& origin_task_runner,
const scoped_refptr<TaskCancellationFlag>& flag,
const ScopedClosureRunner& cleanup_runner) {
return flag->data.IsSet() &&
(AllowOffSequenceTaskCancelation() ||
origin_task_runner->RunsTasksInCurrentSequence());
}
void CancelableTaskTracker::Track(TaskId id,
scoped_refptr<TaskCancellationFlag> flag) {
DCHECK(sequence_checker_.CalledOnValidSequence());
CHECK(weak_this_);
bool success = task_flags_.insert(std::make_pair(id, std::move(flag))).second;
DCHECK(success);
}
void CancelableTaskTracker::Untrack(TaskId id) {
DCHECK(sequence_checker_.CalledOnValidSequence());
CHECK(weak_this_);
size_t num = task_flags_.erase(id);
DCHECK_EQ(1u, num);
}
} // namespace base

View file

@ -0,0 +1,169 @@
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// CancelableTaskTracker posts tasks (in the form of a OnceClosure) to a
// TaskRunner, and is able to cancel the task later if it's not needed
// anymore. On destruction, CancelableTaskTracker will cancel all
// tracked tasks.
//
// Each cancelable task can be associated with a reply (also a OnceClosure).
// After the task is run on the TaskRunner, |reply| will be posted back to
// originating TaskRunner.
//
// NOTE:
//
// CancelableCallback (base/cancelable_callback.h) and WeakPtr binding are
// preferred solutions for canceling a task. However, they don't support
// cancelation from another sequence. This is sometimes a performance critical
// requirement. E.g. We need to cancel database lookup task on DB thread when
// user changes inputed text. If it is performance critical to do a best effort
// cancelation of a task, then CancelableTaskTracker is appropriate, otherwise
// use one of the other mechanisms.
//
// THREAD-SAFETY:
//
// 1. A CancelableTaskTracker object must be created, used, and destroyed on a
// single sequence.
//
// 2. It's safe to destroy a CancelableTaskTracker while there are outstanding
// tasks. This is commonly used to cancel all outstanding tasks.
//
// 3. The task is deleted on the target sequence, and the reply are deleted on
// the originating sequence.
//
// 4. IsCanceledCallback can be run or deleted on any sequence.
#ifndef BASE_TASK_CANCELABLE_TASK_TRACKER_H_
#define BASE_TASK_CANCELABLE_TASK_TRACKER_H_
#include <stdint.h>
#include <memory>
#include <utility>
#include "base/base_export.h"
#include "base/bind.h"
#include "base/callback.h"
#include "base/callback_helpers.h"
#include "base/containers/small_map.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "base/memory/weak_ptr.h"
#include "base/post_task_and_reply_with_result_internal.h"
#include "base/sequence_checker.h"
#include "base/synchronization/atomic_flag.h"
namespace base {
class Location;
class ScopedClosureRunner;
class TaskRunner;
class BASE_EXPORT CancelableTaskTracker {
public:
// All values except kBadTaskId are valid.
typedef int64_t TaskId;
static const TaskId kBadTaskId;
using IsCanceledCallback = RepeatingCallback<bool()>;
CancelableTaskTracker();
// Cancels all tracked tasks.
~CancelableTaskTracker();
TaskId PostTask(TaskRunner* task_runner,
const Location& from_here,
OnceClosure task);
TaskId PostTaskAndReply(TaskRunner* task_runner,
const Location& from_here,
OnceClosure task,
OnceClosure reply);
template <typename TaskReturnType, typename ReplyArgType>
TaskId PostTaskAndReplyWithResult(TaskRunner* task_runner,
const Location& from_here,
OnceCallback<TaskReturnType()> task,
OnceCallback<void(ReplyArgType)> reply) {
auto* result = new std::unique_ptr<TaskReturnType>();
return PostTaskAndReply(
task_runner, from_here,
BindOnce(&internal::ReturnAsParamAdapter<TaskReturnType>,
std::move(task), Unretained(result)),
BindOnce(&internal::ReplyAdapter<TaskReturnType, ReplyArgType>,
std::move(reply), Owned(result)));
}
// Creates a tracked TaskId and an associated IsCanceledCallback. Client can
// later call TryCancel() with the returned TaskId, and run |is_canceled_cb|
// from any thread to check whether the TaskId is canceled.
//
// The returned task ID is tracked until the last copy of
// |is_canceled_cb| is destroyed.
//
// Note. This function is used to address some special cancelation requirement
// in existing code. You SHOULD NOT need this function in new code.
TaskId NewTrackedTaskId(IsCanceledCallback* is_canceled_cb);
// After calling this function, |task| and |reply| will not run. If the
// cancelation happens when |task| is running or has finished running, |reply|
// will not run. If |reply| is running or has finished running, cancellation
// is a noop.
//
// Note. It's OK to cancel a |task| for more than once. The later calls are
// noops.
void TryCancel(TaskId id);
// It's OK to call this function for more than once. The later calls are
// noops.
void TryCancelAll();
// Returns true iff there are in-flight tasks that are still being
// tracked.
bool HasTrackedTasks() const;
private:
// Cancellation flags are ref-counted to ensure they remain valid even if the
// tracker and its calling thread are torn down while there are still
// cancelable tasks queued to the target TaskRunner.
// See https://crbug.com/918948.
using TaskCancellationFlag = RefCountedData<AtomicFlag>;
static void RunIfNotCanceled(
const scoped_refptr<SequencedTaskRunner>& origin_task_runner,
const scoped_refptr<TaskCancellationFlag>& flag,
OnceClosure task);
static void RunThenUntrackIfNotCanceled(
const scoped_refptr<SequencedTaskRunner>& origin_task_runner,
const scoped_refptr<TaskCancellationFlag>& flag,
OnceClosure task,
OnceClosure untrack);
static bool IsCanceled(
const scoped_refptr<SequencedTaskRunner>& origin_task_runner,
const scoped_refptr<TaskCancellationFlag>& flag,
const ScopedClosureRunner& cleanup_runner);
void Track(TaskId id, scoped_refptr<TaskCancellationFlag> flag);
void Untrack(TaskId id);
// Typically the number of tasks are 0-2 and occationally 3-4. But since
// this is a general API that could be used in unexpected ways, use a
// small_map instead of a flat_map to avoid falling over if there are many
// tasks.
small_map<std::map<TaskId, scoped_refptr<TaskCancellationFlag>>, 4>
task_flags_;
TaskId next_id_ = 1;
SequenceChecker sequence_checker_;
// TODO(https://crbug.com/1009795): Remove once crasher is resolved.
base::WeakPtr<CancelableTaskTracker> weak_this_;
base::WeakPtrFactory<CancelableTaskTracker> weak_factory_{this};
DISALLOW_COPY_AND_ASSIGN(CancelableTaskTracker);
};
} // namespace base
#endif // BASE_TASK_CANCELABLE_TASK_TRACKER_H_

View file

@ -0,0 +1,131 @@
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TASK_COMMON_CHECKED_LOCK_H_
#define BASE_TASK_COMMON_CHECKED_LOCK_H_
#include <memory>
#include "base/base_export.h"
#include "base/macros.h"
#include "base/synchronization/condition_variable.h"
#include "base/synchronization/lock.h"
#include "base/task/common/checked_lock_impl.h"
#include "base/thread_annotations.h"
namespace base {
namespace internal {
// CheckedLock should be used anywhere a Lock would be used in the base/task
// impl. When DCHECK_IS_ON(), lock checking occurs. Otherwise, CheckedLock is
// equivalent to base::Lock.
//
// The shape of CheckedLock is as follows:
// CheckedLock()
// Default constructor, no predecessor lock.
// DCHECKs
// On Acquisition if any CheckedLock is acquired on this thread.
// Okay if a universal predecessor is acquired.
//
// CheckedLock(const CheckedLock* predecessor)
// Constructor that specifies an allowed predecessor for that lock.
// DCHECKs
// On Construction if |predecessor| forms a predecessor lock cycle.
// On Acquisition if the previous lock acquired on the thread is not
// either |predecessor| or a universal predecessor. Okay if there
// was no previous lock acquired.
//
// CheckedLock(UniversalPredecessor universal_predecessor)
// Constructor for a lock that will allow the acquisition of any lock after
// it, without needing to explicitly be named a predecessor. Can only be
// acquired if no locks are currently held by this thread.
// DCHECKs
// On Acquisition if any CheckedLock is acquired on this thread.
//
// void Acquire()
// Acquires the lock.
//
// void Release()
// Releases the lock.
//
// void AssertAcquired().
// DCHECKs if the lock is not acquired.
//
// std::unique_ptr<ConditionVariable> CreateConditionVariable()
// Creates a condition variable using this as a lock.
#if DCHECK_IS_ON()
class LOCKABLE CheckedLock : public CheckedLockImpl {
public:
CheckedLock() = default;
explicit CheckedLock(const CheckedLock* predecessor)
: CheckedLockImpl(predecessor) {}
explicit CheckedLock(UniversalPredecessor universal_predecessor)
: CheckedLockImpl(universal_predecessor) {}
};
#else // DCHECK_IS_ON()
class LOCKABLE CheckedLock : public Lock {
public:
CheckedLock() = default;
explicit CheckedLock(const CheckedLock*) {}
explicit CheckedLock(UniversalPredecessor) {}
static void AssertNoLockHeldOnCurrentThread() {}
std::unique_ptr<ConditionVariable> CreateConditionVariable() {
return std::unique_ptr<ConditionVariable>(new ConditionVariable(this));
}
};
#endif // DCHECK_IS_ON()
// Provides the same functionality as base::AutoLock for CheckedLock.
using CheckedAutoLock = internal::BasicAutoLock<CheckedLock>;
// Provides the same functionality as base::AutoUnlock for CheckedLock.
using CheckedAutoUnlock = internal::BasicAutoUnlock<CheckedLock>;
// Provides the same functionality as base::AutoLockMaybe for CheckedLock.
using CheckedAutoLockMaybe = internal::BasicAutoLockMaybe<CheckedLock>;
// Informs the clang thread safety analysis that an aliased lock is acquired.
// Because the clang thread safety analysis doesn't understand aliased locks
// [1], this code wouldn't compile without AnnotateAcquiredLockAlias:
//
// class Example {
// public:
// CheckedLock lock_;
// int value = 0 GUARDED_BY(lock_);
// };
//
// Example example;
// CheckedLock* acquired = &example.lock_;
// CheckedAutoLock auto_lock(*acquired);
// AnnotateAcquiredLockAlias annotate(*acquired, example.lock_);
// example.value = 42; // Doesn't compile without |annotate|.
//
// [1] https://clang.llvm.org/docs/ThreadSafetyAnalysis.html#no-alias-analysis
class SCOPED_LOCKABLE AnnotateAcquiredLockAlias {
public:
// |acquired_lock| is an acquired lock. |lock_alias| is an alias of
// |acquired_lock|.
AnnotateAcquiredLockAlias(const CheckedLock& acquired_lock,
const CheckedLock& lock_alias)
EXCLUSIVE_LOCK_FUNCTION(lock_alias)
: acquired_lock_(acquired_lock) {
DCHECK_EQ(&acquired_lock, &lock_alias);
acquired_lock_.AssertAcquired();
}
~AnnotateAcquiredLockAlias() UNLOCK_FUNCTION() {
acquired_lock_.AssertAcquired();
}
private:
const CheckedLock& acquired_lock_;
DISALLOW_COPY_AND_ASSIGN(AnnotateAcquiredLockAlias);
};
} // namespace internal
} // namespace base
#endif // BASE_TASK_COMMON_CHECKED_LOCK_H_

View file

@ -0,0 +1,170 @@
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/task/common/checked_lock_impl.h"
#include <algorithm>
#include <unordered_map>
#include <vector>
#include "base/lazy_instance.h"
#include "base/logging.h"
#include "base/synchronization/condition_variable.h"
#include "base/task/common/checked_lock.h"
#include "base/threading/platform_thread.h"
#include "base/threading/thread_local.h"
namespace base {
namespace internal {
namespace {
class SafeAcquisitionTracker {
public:
SafeAcquisitionTracker() = default;
void RegisterLock(const CheckedLockImpl* const lock,
const CheckedLockImpl* const predecessor) {
DCHECK_NE(lock, predecessor) << "Reentrant locks are unsupported.";
AutoLock auto_lock(allowed_predecessor_map_lock_);
allowed_predecessor_map_[lock] = predecessor;
AssertSafePredecessor(lock);
}
void UnregisterLock(const CheckedLockImpl* const lock) {
AutoLock auto_lock(allowed_predecessor_map_lock_);
allowed_predecessor_map_.erase(lock);
}
void RecordAcquisition(const CheckedLockImpl* const lock) {
AssertSafeAcquire(lock);
GetAcquiredLocksOnCurrentThread()->push_back(lock);
}
void RecordRelease(const CheckedLockImpl* const lock) {
LockVector* acquired_locks = GetAcquiredLocksOnCurrentThread();
const auto iter_at_lock =
std::find(acquired_locks->begin(), acquired_locks->end(), lock);
DCHECK(iter_at_lock != acquired_locks->end());
acquired_locks->erase(iter_at_lock);
}
void AssertNoLockHeldOnCurrentThread() {
DCHECK(GetAcquiredLocksOnCurrentThread()->empty());
}
private:
using LockVector = std::vector<const CheckedLockImpl*>;
using PredecessorMap =
std::unordered_map<const CheckedLockImpl*, const CheckedLockImpl*>;
// This asserts that the lock is safe to acquire. This means that this should
// be run before actually recording the acquisition.
void AssertSafeAcquire(const CheckedLockImpl* const lock) {
const LockVector* acquired_locks = GetAcquiredLocksOnCurrentThread();
// If the thread currently holds no locks, this is inherently safe.
if (acquired_locks->empty())
return;
// A universal predecessor may not be acquired after any other lock.
DCHECK(!lock->is_universal_predecessor());
// Otherwise, make sure that the previous lock acquired is either an
// allowed predecessor for this lock or a universal predecessor.
const CheckedLockImpl* previous_lock = acquired_locks->back();
if (previous_lock->is_universal_predecessor())
return;
AutoLock auto_lock(allowed_predecessor_map_lock_);
// Using at() is exception-safe here as |lock| was registered already.
const CheckedLockImpl* allowed_predecessor =
allowed_predecessor_map_.at(lock);
DCHECK_EQ(previous_lock, allowed_predecessor);
}
// Asserts that |lock|'s registered predecessor is safe. Because
// CheckedLocks are registered at construction time and any predecessor
// specified on a CheckedLock must already exist, the first registered
// CheckedLock in a potential chain must have a null predecessor and is thus
// cycle-free. Any subsequent CheckedLock with a predecessor must come from
// the set of registered CheckedLocks. Since the registered CheckedLocks
// only contain cycle-free CheckedLocks, this subsequent CheckedLock is
// itself cycle-free and may be safely added to the registered CheckedLock
// set.
void AssertSafePredecessor(const CheckedLockImpl* lock) const {
allowed_predecessor_map_lock_.AssertAcquired();
// Using at() is exception-safe here as |lock| was registered already.
const CheckedLockImpl* predecessor = allowed_predecessor_map_.at(lock);
if (predecessor) {
DCHECK(allowed_predecessor_map_.find(predecessor) !=
allowed_predecessor_map_.end())
<< "CheckedLock was registered before its predecessor. "
<< "Potential cycle detected";
}
}
LockVector* GetAcquiredLocksOnCurrentThread() {
if (!tls_acquired_locks_.Get())
tls_acquired_locks_.Set(std::make_unique<LockVector>());
return tls_acquired_locks_.Get();
}
// Synchronizes access to |allowed_predecessor_map_|.
Lock allowed_predecessor_map_lock_;
// A map of allowed predecessors.
PredecessorMap allowed_predecessor_map_;
// A thread-local slot holding a vector of locks currently acquired on the
// current thread.
ThreadLocalOwnedPointer<LockVector> tls_acquired_locks_;
DISALLOW_COPY_AND_ASSIGN(SafeAcquisitionTracker);
};
LazyInstance<SafeAcquisitionTracker>::Leaky g_safe_acquisition_tracker =
LAZY_INSTANCE_INITIALIZER;
} // namespace
CheckedLockImpl::CheckedLockImpl() : CheckedLockImpl(nullptr) {}
CheckedLockImpl::CheckedLockImpl(const CheckedLockImpl* predecessor)
: is_universal_predecessor_(false) {
g_safe_acquisition_tracker.Get().RegisterLock(this, predecessor);
}
CheckedLockImpl::CheckedLockImpl(UniversalPredecessor)
: is_universal_predecessor_(true) {}
CheckedLockImpl::~CheckedLockImpl() {
g_safe_acquisition_tracker.Get().UnregisterLock(this);
}
void CheckedLockImpl::AssertNoLockHeldOnCurrentThread() {
g_safe_acquisition_tracker.Get().AssertNoLockHeldOnCurrentThread();
}
void CheckedLockImpl::Acquire() {
lock_.Acquire();
g_safe_acquisition_tracker.Get().RecordAcquisition(this);
}
void CheckedLockImpl::Release() {
lock_.Release();
g_safe_acquisition_tracker.Get().RecordRelease(this);
}
void CheckedLockImpl::AssertAcquired() const {
lock_.AssertAcquired();
}
std::unique_ptr<ConditionVariable> CheckedLockImpl::CreateConditionVariable() {
return std::make_unique<ConditionVariable>(&lock_);
}
} // namespace internal
} // namespace base

View file

@ -0,0 +1,54 @@
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TASK_COMMON_CHECKED_LOCK_IMPL_H_
#define BASE_TASK_COMMON_CHECKED_LOCK_IMPL_H_
#include <memory>
#include "base/base_export.h"
#include "base/macros.h"
#include "base/synchronization/lock.h"
namespace base {
class ConditionVariable;
namespace internal {
struct UniversalPredecessor {};
// A regular lock with simple deadlock correctness checking.
// This lock tracks all of the available locks to make sure that any locks are
// acquired in an expected order.
// See scheduler_lock.h for details.
class BASE_EXPORT CheckedLockImpl {
public:
CheckedLockImpl();
explicit CheckedLockImpl(const CheckedLockImpl* predecessor);
explicit CheckedLockImpl(UniversalPredecessor);
~CheckedLockImpl();
static void AssertNoLockHeldOnCurrentThread();
void Acquire() EXCLUSIVE_LOCK_FUNCTION(lock_);
void Release() UNLOCK_FUNCTION(lock_);
void AssertAcquired() const;
std::unique_ptr<ConditionVariable> CreateConditionVariable();
bool is_universal_predecessor() const { return is_universal_predecessor_; }
private:
Lock lock_;
const bool is_universal_predecessor_;
DISALLOW_COPY_AND_ASSIGN(CheckedLockImpl);
};
} // namespace internal
} // namespace base
#endif // BASE_TASK_COMMON_CHECKED_LOCK_IMPL_H_

View file

@ -0,0 +1,81 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TASK_COMMON_INTRUSIVE_HEAP_H_
#define BASE_TASK_COMMON_INTRUSIVE_HEAP_H_
#include "base/containers/intrusive_heap.h"
namespace base {
namespace internal {
using HeapHandle = base::HeapHandle;
template <typename T>
struct IntrusiveHeapImpl {
struct GreaterUsingLessEqual {
bool operator()(const T& t1, const T& t2) const { return t2 <= t1; }
};
using type = base::IntrusiveHeap<T, GreaterUsingLessEqual>;
};
// base/task wants a min-heap that uses the <= operator, whereas
// base::IntrusiveHeap is a max-heap by default. This is a very thin adapter
// over that class that exposes minimal functionality required by the
// base/task IntrusiveHeap clients.
template <typename T>
class IntrusiveHeap : private IntrusiveHeapImpl<T>::type {
public:
using IntrusiveHeapImplType = typename IntrusiveHeapImpl<T>::type;
// The majority of sets in the scheduler have 0-3 items in them (a few will
// have perhaps up to 100), so this means we usually only have to allocate
// memory once.
static constexpr size_t kMinimumHeapSize = 4;
IntrusiveHeap() { IntrusiveHeapImplType::reserve(kMinimumHeapSize); }
~IntrusiveHeap() = default;
IntrusiveHeap& operator=(IntrusiveHeap&& other) = default;
bool empty() const { return IntrusiveHeapImplType::empty(); }
size_t size() const { return IntrusiveHeapImplType::size(); }
void Clear() {
IntrusiveHeapImplType::clear();
IntrusiveHeapImplType::reserve(kMinimumHeapSize);
}
const T& Min() const { return IntrusiveHeapImplType::top(); }
void Pop() { IntrusiveHeapImplType::pop(); }
void insert(T&& element) {
IntrusiveHeapImplType::insert(std::move(element));
}
void erase(HeapHandle handle) { IntrusiveHeapImplType::erase(handle); }
void ReplaceMin(T&& element) {
IntrusiveHeapImplType::ReplaceTop(std::move(element));
}
void ChangeKey(HeapHandle handle, T&& element) {
IntrusiveHeapImplType::Replace(handle, std::move(element));
}
const T& at(HeapHandle handle) const {
return IntrusiveHeapImplType::at(handle);
}
// Caution, mutating the heap invalidates iterators!
const T* begin() const { return IntrusiveHeapImplType::data(); }
const T* end() const { return IntrusiveHeapImplType::data() + size(); }
};
} // namespace internal
} // namespace base
#endif // BASE_TASK_COMMON_INTRUSIVE_HEAP_H_

View file

@ -0,0 +1,104 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/task/common/operations_controller.h"
#include "base/logging.h"
namespace base {
namespace internal {
OperationsController::OperationsController() = default;
OperationsController::~OperationsController() {
#if DCHECK_IS_ON()
// An OperationsController may only be deleted when it was either not
// accepting operations or after it was shutdown and there are no in flight
// attempts to perform operations.
auto value = state_and_count_.load();
DCHECK(
ExtractState(value) == State::kRejectingOperations ||
(ExtractState(value) == State::kShuttingDown && ExtractCount(value) == 0))
<< value;
#endif
}
bool OperationsController::StartAcceptingOperations() {
// Release semantics are required to ensure that all memory accesses made on
// this thread happen-before any others done on a thread which is later
// allowed to perform an operation.
auto prev_value = state_and_count_.fetch_or(kAcceptingOperationsBitMask,
std::memory_order_release);
DCHECK_EQ(ExtractState(prev_value), State::kRejectingOperations);
// The count is the number of rejected operations, unwind them now.
auto num_rejected = ExtractCount(prev_value);
DecrementBy(num_rejected);
return num_rejected != 0;
}
OperationsController::OperationToken OperationsController::TryBeginOperation() {
// Acquire semantics are required to ensure that a thread which is allowed to
// perform an operation sees all the memory side-effects that happened-before
// StartAcceptingOperations(). They're also required so that no operations on
// this thread (e.g. the operation itself) can be reordered before this one.
auto prev_value = state_and_count_.fetch_add(1, std::memory_order_acquire);
switch (ExtractState(prev_value)) {
case State::kRejectingOperations:
return OperationToken(nullptr);
case State::kAcceptingOperations:
return OperationToken(this);
case State::kShuttingDown:
DecrementBy(1);
return OperationToken(nullptr);
}
}
void OperationsController::ShutdownAndWaitForZeroOperations() {
// Acquire semantics are required to guarantee that all memory side-effects
// made by other threads that were allowed to perform operations are
// synchronized with this thread before it returns from this method.
auto prev_value = state_and_count_.fetch_or(kShuttingDownBitMask,
std::memory_order_acquire);
switch (ExtractState(prev_value)) {
case State::kRejectingOperations:
// The count is the number of rejected operations, unwind them now.
DecrementBy(ExtractCount(prev_value));
break;
case State::kAcceptingOperations:
if (ExtractCount(prev_value) != 0) {
shutdown_complete_.Wait();
}
break;
case State::kShuttingDown:
DCHECK(false) << "Multiple calls to ShutdownAndWaitForZeroOperations()";
break;
}
}
OperationsController::State OperationsController::ExtractState(uint32_t value) {
if (value & kShuttingDownBitMask) {
return State::kShuttingDown;
} else if (value & kAcceptingOperationsBitMask) {
return State::kAcceptingOperations;
} else {
return State::kRejectingOperations;
}
}
void OperationsController::DecrementBy(uint32_t n) {
// Release semantics are required to ensure that no operation on the current
// thread (e.g. the operation itself) can be reordered after this one.
auto prev_value = state_and_count_.fetch_sub(n, std::memory_order_release);
DCHECK_LE(n, ExtractCount(prev_value)) << "Decrement underflow";
if (ExtractState(prev_value) == State::kShuttingDown &&
ExtractCount(prev_value) == n) {
shutdown_complete_.Signal();
}
}
} // namespace internal
} // namespace base

View file

@ -0,0 +1,151 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TASK_COMMON_OPERATIONS_CONTROLLER_H_
#define BASE_TASK_COMMON_OPERATIONS_CONTROLLER_H_
#include <atomic>
#include <cstdint>
#include "base/synchronization/waitable_event.h"
namespace base {
namespace internal {
// A lock-free thread-safe controller to manage critical multi-threaded
// operations without locks.
//
// The controller is used to determine if operations are allowed, and to keep
// track of how many are currently active. Users will call TryBeginOperation()
// before starting such operations. If the call succeeds the user can run the
// operation and the controller will keep track of it until the user signals
// that the operation is completed. No operations are allowed before
// StartAcceptingOperations() is called, or after
// ShutdownAndWaitForZeroOperations() is called.
//
// There is no explicit way of telling the controller when an operation is
// completed, instead for convenience TryBeginOperation() will return a RAII
// like object that will do so on destruction.
//
// For example:
//
// OperationsController controller_;
//
// void SetUp() {
// controller_.StartAcceptingOperations();
// }
//
// void TearDown() {
// controller_.ShutdownAndWaitForZeroOperations();
// }
//
// void MaybeRunOperation() {
// auto operation_token = controller_.TryBeginOperation();
// if (operation_token) {
// Process();
// }
// }
//
// This class is thread-safe.
// But note that StartAcceptingOperations can never be called after
// ShutdownAndWaitForZeroOperations.
class BASE_EXPORT OperationsController {
public:
// The owner of an OperationToken which evaluates to true can safely perform
// an operation while being certain it happens-after
// StartAcceptingOperations() and happens-before
// ShutdownAndWaitForZeroOperations(). Releasing this OperationToken
// relinquishes this right.
//
// This class is thread-safe
class OperationToken {
public:
~OperationToken() {
if (outer_)
outer_->DecrementBy(1);
}
OperationToken(const OperationToken&) = delete;
OperationToken(OperationToken&& other) {
this->outer_ = other.outer_;
other.outer_ = nullptr;
}
operator bool() const { return !!outer_; }
private:
friend class OperationsController;
explicit OperationToken(OperationsController* outer) : outer_(outer) {}
OperationsController* outer_;
};
OperationsController();
// Users must call ShutdownAndWaitForZeroOperations() before destroying an
// instance of this class if StartAcceptingOperations() was called.
~OperationsController();
OperationsController(const OperationsController&) = delete;
OperationsController& operator=(const OperationsController&) = delete;
// Starts to accept operations (before this point TryBeginOperation() returns
// an invalid token). Returns true if an attempt to perform an operation was
// made and denied before StartAcceptingOperations() was called. Can be called
// at most once, never after ShutdownAndWaitForZeroOperations().
bool StartAcceptingOperations();
// Returns a RAII like object that implicitly converts to true if operations
// are allowed i.e. if this call happens-after StartAcceptingOperations() and
// happens-before Shutdown(), otherwise the object will convert to false. On
// successful return, this OperationsController will keep track of the
// operation until the returned object goes out of scope.
OperationToken TryBeginOperation();
// Prevents further calls to TryBeginOperation() from succeeding and waits for
// all the ongoing operations to complete.
//
// Attention: Can only be called once.
void ShutdownAndWaitForZeroOperations();
private:
// Atomic representation of the state of this class. We use the upper 2 bits
// to keep track of flag like values and the remainder bits are used as a
// counter. The 2 flags are used to represent 3 different states:
//
// State | AcceptOperations Bit | ShuttingDown Bit
// --------------------------------------------------------------
// kRejectingOperations | 0 | 0
// kAcceptingOperations | 1 | 0
// kShuttingDown | * | 1
//
// The counter keeps track of the rejected operations when we are in
// the kRejectingOperations state, the number of inflight operations
// otherwise. If the count reaches zero and we are in the shutting down state
// |shutdown_complete_| will be signaled.
static constexpr uint32_t kShuttingDownBitMask = uint32_t{1} << 31;
static constexpr uint32_t kAcceptingOperationsBitMask = uint32_t{1} << 30;
static constexpr uint32_t kFlagsBitMask =
(kShuttingDownBitMask | kAcceptingOperationsBitMask);
static constexpr uint32_t kCountBitMask = ~kFlagsBitMask;
enum class State {
kRejectingOperations,
kAcceptingOperations,
kShuttingDown,
};
// Helper methods for the bit fiddling. Pass a |state_and_count_| value to
// extract state or count out of it.
static uint32_t ExtractCount(uint32_t value) { return value & kCountBitMask; }
static State ExtractState(uint32_t value);
// Decrements the counter by |n| and signals |shutdown_complete_| if needed.
void DecrementBy(uint32_t n);
std::atomic<uint32_t> state_and_count_{0};
WaitableEvent shutdown_complete_;
};
} // namespace internal
} // namespace base
#endif // BASE_TASK_COMMON_OPERATIONS_CONTROLLER_H_

View file

@ -0,0 +1,97 @@
// Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/task/common/scoped_defer_task_posting.h"
#include "base/no_destructor.h"
#include "base/threading/thread_local.h"
namespace base {
namespace {
ThreadLocalPointer<ScopedDeferTaskPosting>& GetScopedDeferTaskPostingTLS() {
static NoDestructor<ThreadLocalPointer<ScopedDeferTaskPosting>> tls;
return *tls;
}
} // namespace
// static
void ScopedDeferTaskPosting::PostOrDefer(
scoped_refptr<SequencedTaskRunner> task_runner,
const Location& from_here,
OnceClosure task,
base::TimeDelta delay) {
ScopedDeferTaskPosting* scope = Get();
if (scope) {
scope->DeferTaskPosting(std::move(task_runner), from_here, std::move(task),
delay);
return;
}
task_runner->PostDelayedTask(from_here, std::move(task), delay);
}
// static
ScopedDeferTaskPosting* ScopedDeferTaskPosting::Get() {
return GetScopedDeferTaskPostingTLS().Get();
}
// static
bool ScopedDeferTaskPosting::Set(ScopedDeferTaskPosting* scope) {
// We can post a task from within a ScheduleWork in some tests, so we can
// get nested scopes. In this case ignore all except the top one.
if (Get() && scope)
return false;
GetScopedDeferTaskPostingTLS().Set(scope);
return true;
}
// static
bool ScopedDeferTaskPosting::IsPresent() {
return !!Get();
}
ScopedDeferTaskPosting::ScopedDeferTaskPosting() {
top_level_scope_ = Set(this);
}
ScopedDeferTaskPosting::~ScopedDeferTaskPosting() {
if (!top_level_scope_) {
DCHECK(deferred_tasks_.empty());
return;
}
Set(nullptr);
for (DeferredTask& deferred_task : deferred_tasks_) {
deferred_task.task_runner->PostDelayedTask(deferred_task.from_here,
std::move(deferred_task.task),
deferred_task.delay);
}
}
ScopedDeferTaskPosting::DeferredTask::DeferredTask(
scoped_refptr<SequencedTaskRunner> task_runner,
Location from_here,
OnceClosure task,
base::TimeDelta delay)
: task_runner(std::move(task_runner)),
from_here(from_here),
task(std::move(task)),
delay(delay) {}
ScopedDeferTaskPosting::DeferredTask::DeferredTask(DeferredTask&&) = default;
ScopedDeferTaskPosting::DeferredTask::~DeferredTask() = default;
void ScopedDeferTaskPosting::DeferTaskPosting(
scoped_refptr<SequencedTaskRunner> task_runner,
const Location& from_here,
OnceClosure task,
base::TimeDelta delay) {
deferred_tasks_.push_back(
{std::move(task_runner), from_here, std::move(task), delay});
}
} // namespace base

View file

@ -0,0 +1,75 @@
// Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TASK_COMMON_SCOPED_DEFER_TASK_POSTING_H_
#define BASE_TASK_COMMON_SCOPED_DEFER_TASK_POSTING_H_
#include "base/base_export.h"
#include "base/location.h"
#include "base/macros.h"
#include "base/sequenced_task_runner.h"
namespace base {
// Tracing wants to post tasks from within a trace event within PostTask, but
// this can lead to a deadlock. Create a scope to ensure that we are posting
// the tasks in question outside of the scope of the lock.
// NOTE: This scope affects only the thread it is created on. All other threads
// still can post tasks.
//
// TODO(altimin): It should be possible to get rid of this scope, but this
// requires refactoring TimeDomain to ensure that TimeDomain never changes and
// we can read current time without grabbing a lock.
class BASE_EXPORT ScopedDeferTaskPosting {
public:
static void PostOrDefer(scoped_refptr<SequencedTaskRunner> task_runner,
const Location& from_here,
OnceClosure task,
base::TimeDelta delay);
static bool IsPresent();
ScopedDeferTaskPosting();
~ScopedDeferTaskPosting();
private:
static ScopedDeferTaskPosting* Get();
// Returns whether the |scope| was set as active, which happens only
// when the scope wasn't set before.
static bool Set(ScopedDeferTaskPosting* scope);
void DeferTaskPosting(scoped_refptr<SequencedTaskRunner> task_runner,
const Location& from_here,
OnceClosure task,
base::TimeDelta delay);
struct DeferredTask {
DeferredTask(scoped_refptr<SequencedTaskRunner> task_runner,
Location from_here,
OnceClosure task,
base::TimeDelta delay);
DeferredTask(DeferredTask&& task);
~DeferredTask();
scoped_refptr<SequencedTaskRunner> task_runner;
Location from_here;
OnceClosure task;
base::TimeDelta delay;
DISALLOW_COPY_AND_ASSIGN(DeferredTask);
};
std::vector<DeferredTask> deferred_tasks_;
// Scopes can be nested (e.g. ScheduleWork inside PostTasks can post a task
// to another task runner), so we want to know whether the scope is top-level
// or not.
bool top_level_scope_ = false;
DISALLOW_COPY_AND_ASSIGN(ScopedDeferTaskPosting);
};
} // namespace base
#endif // BASE_TASK_COMMON_SCOPED_DEFER_TASK_POSTING_H_

View file

@ -0,0 +1,191 @@
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/task/common/task_annotator.h"
#include <array>
#include "base/debug/activity_tracker.h"
#include "base/debug/alias.h"
#include "base/no_destructor.h"
#include "base/threading/thread_local.h"
#include "base/trace_event/trace_event.h"
namespace base {
namespace {
TaskAnnotator::ObserverForTesting* g_task_annotator_observer = nullptr;
// Used as a sentinel to determine if a TLS-stored PendingTask is a dummy one.
static constexpr int kSentinelSequenceNum =
static_cast<int>(0xF00DBAADF00DBAAD);
// Returns the TLS slot that stores the PendingTask currently in progress on
// each thread. Used to allow creating a breadcrumb of program counters on the
// stack to help identify a task's origin in crashes.
ThreadLocalPointer<PendingTask>* GetTLSForCurrentPendingTask() {
static NoDestructor<ThreadLocalPointer<PendingTask>> instance;
return instance.get();
}
// Determines whether or not the given |task| is a dummy pending task that has
// been injected by ScopedSetIpcHash solely for the purposes of
// tracking IPC context.
bool IsDummyPendingTask(const PendingTask* task) {
if (task->sequence_num == kSentinelSequenceNum &&
!task->posted_from.has_source_info() &&
!task->posted_from.program_counter()) {
return true;
}
return false;
}
} // namespace
const PendingTask* TaskAnnotator::CurrentTaskForThread() {
auto* current_task = GetTLSForCurrentPendingTask()->Get();
// Don't return "dummy" current tasks that are only used for storing IPC
// context.
if (current_task && IsDummyPendingTask(current_task))
return nullptr;
return current_task;
}
TaskAnnotator::TaskAnnotator() = default;
TaskAnnotator::~TaskAnnotator() = default;
void TaskAnnotator::WillQueueTask(const char* trace_event_name,
PendingTask* pending_task,
const char* task_queue_name) {
DCHECK(trace_event_name);
DCHECK(pending_task);
DCHECK(task_queue_name);
TRACE_EVENT_WITH_FLOW1(
TRACE_DISABLED_BY_DEFAULT("toplevel.flow"), trace_event_name,
TRACE_ID_LOCAL(GetTaskTraceID(*pending_task)), TRACE_EVENT_FLAG_FLOW_OUT,
"task_queue_name", task_queue_name);
DCHECK(!pending_task->task_backtrace[0])
<< "Task backtrace was already set, task posted twice??";
if (pending_task->task_backtrace[0])
return;
const auto* parent_task = CurrentTaskForThread();
if (!parent_task)
return;
pending_task->ipc_hash = parent_task->ipc_hash;
pending_task->task_backtrace[0] = parent_task->posted_from.program_counter();
std::copy(parent_task->task_backtrace.begin(),
parent_task->task_backtrace.end() - 1,
pending_task->task_backtrace.begin() + 1);
pending_task->task_backtrace_overflow =
parent_task->task_backtrace_overflow ||
parent_task->task_backtrace.back() != nullptr;
}
void TaskAnnotator::RunTask(const char* trace_event_name,
PendingTask* pending_task) {
DCHECK(trace_event_name);
DCHECK(pending_task);
debug::ScopedTaskRunActivity task_activity(*pending_task);
TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("toplevel.ipc"),
"TaskAnnotator::RunTask", "ipc_hash", pending_task->ipc_hash);
TRACE_EVENT_WITH_FLOW0(
TRACE_DISABLED_BY_DEFAULT("toplevel.flow"), trace_event_name,
TRACE_ID_LOCAL(GetTaskTraceID(*pending_task)), TRACE_EVENT_FLAG_FLOW_IN);
// Before running the task, store the IPC context and the task backtrace with
// the chain of PostTasks that resulted in this call and deliberately alias it
// to ensure it is on the stack if the task crashes. Be careful not to assume
// that the variable itself will have the expected value when displayed by the
// optimizer in an optimized build. Look at a memory dump of the stack.
static constexpr int kStackTaskTraceSnapshotSize =
PendingTask::kTaskBacktraceLength + 4;
std::array<const void*, kStackTaskTraceSnapshotSize> task_backtrace;
// Store a marker to locate |task_backtrace| content easily on a memory
// dump. The layout is as follows:
//
// +------------ +----+---------+-----+-----------+----------+-------------+
// | Head Marker | PC | frame 0 | ... | frame N-1 | IPC hash | Tail Marker |
// +------------ +----+---------+-----+-----------+----------+-------------+
//
// Markers glossary (compliments of wez):
// cool code,do it dude!
// 0x c001 c0de d0 17 d00d
// o dude,i did it biig
// 0x 0 d00d 1 d1d 17 8119
task_backtrace.front() = reinterpret_cast<void*>(0xc001c0ded017d00d);
task_backtrace.back() = reinterpret_cast<void*>(0x0d00d1d1d178119);
task_backtrace[1] = pending_task->posted_from.program_counter();
std::copy(pending_task->task_backtrace.begin(),
pending_task->task_backtrace.end(), task_backtrace.begin() + 2);
task_backtrace[kStackTaskTraceSnapshotSize - 2] =
reinterpret_cast<void*>(pending_task->ipc_hash);
debug::Alias(&task_backtrace);
auto* tls = GetTLSForCurrentPendingTask();
auto* previous_pending_task = tls->Get();
tls->Set(pending_task);
if (g_task_annotator_observer)
g_task_annotator_observer->BeforeRunTask(pending_task);
std::move(pending_task->task).Run();
tls->Set(previous_pending_task);
}
uint64_t TaskAnnotator::GetTaskTraceID(const PendingTask& task) const {
return (static_cast<uint64_t>(task.sequence_num) << 32) |
((static_cast<uint64_t>(reinterpret_cast<intptr_t>(this)) << 32) >>
32);
}
// static
void TaskAnnotator::RegisterObserverForTesting(ObserverForTesting* observer) {
DCHECK(!g_task_annotator_observer);
g_task_annotator_observer = observer;
}
// static
void TaskAnnotator::ClearObserverForTesting() {
g_task_annotator_observer = nullptr;
}
TaskAnnotator::ScopedSetIpcHash::ScopedSetIpcHash(uint32_t ipc_hash) {
// We store the IPC context in the currently running task. If there is none
// then introduce a dummy task.
auto* tls = GetTLSForCurrentPendingTask();
auto* current_task = tls->Get();
if (!current_task) {
dummy_pending_task_ = std::make_unique<PendingTask>();
dummy_pending_task_->sequence_num = kSentinelSequenceNum;
current_task = dummy_pending_task_.get();
tls->Set(current_task);
}
old_ipc_hash_ = current_task->ipc_hash;
current_task->ipc_hash = ipc_hash;
}
TaskAnnotator::ScopedSetIpcHash::~ScopedSetIpcHash() {
auto* tls = GetTLSForCurrentPendingTask();
auto* current_task = tls->Get();
DCHECK(current_task);
if (current_task == dummy_pending_task_.get()) {
tls->Set(nullptr);
} else {
current_task->ipc_hash = old_ipc_hash_;
}
}
} // namespace base

View file

@ -0,0 +1,83 @@
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TASK_COMMON_TASK_ANNOTATOR_H_
#define BASE_TASK_COMMON_TASK_ANNOTATOR_H_
#include <stdint.h>
#include <memory>
#include "base/base_export.h"
#include "base/macros.h"
#include "base/pending_task.h"
namespace base {
// Implements common debug annotations for posted tasks. This includes data
// such as task origins, IPC message contexts, queueing durations and memory
// usage.
class BASE_EXPORT TaskAnnotator {
public:
class ObserverForTesting {
public:
// Invoked just before RunTask() in the scope in which the task is about to
// be executed.
virtual void BeforeRunTask(const PendingTask* pending_task) = 0;
};
// This is used to set the |ipc_hash| field for PendingTasks. It is intended
// to be used only from within generated IPC handler dispatch code.
class ScopedSetIpcHash;
static const PendingTask* CurrentTaskForThread();
TaskAnnotator();
~TaskAnnotator();
// Called to indicate that a task is about to be queued to run in the future,
// giving one last chance for this TaskAnnotator to add metadata to
// |pending_task| before it is moved into the queue. |task_queue_name| must
// live for the duration of the process.
void WillQueueTask(const char* trace_event_name,
PendingTask* pending_task,
const char* task_queue_name);
// Run a previously queued task.
void RunTask(const char* trace_event_name, PendingTask* pending_task);
// Creates a process-wide unique ID to represent this task in trace events.
// This will be mangled with a Process ID hash to reduce the likelyhood of
// colliding with TaskAnnotator pointers on other processes. Callers may use
// this when generating their own flow events (i.e. when passing
// |queue_function == nullptr| in above methods).
uint64_t GetTaskTraceID(const PendingTask& task) const;
private:
friend class TaskAnnotatorBacktraceIntegrationTest;
// Registers an ObserverForTesting that will be invoked by all TaskAnnotators'
// RunTask(). This registration and the implementation of BeforeRunTask() are
// responsible to ensure thread-safety.
static void RegisterObserverForTesting(ObserverForTesting* observer);
static void ClearObserverForTesting();
DISALLOW_COPY_AND_ASSIGN(TaskAnnotator);
};
class BASE_EXPORT TaskAnnotator::ScopedSetIpcHash {
public:
explicit ScopedSetIpcHash(uint32_t ipc_hash);
~ScopedSetIpcHash();
private:
std::unique_ptr<PendingTask> dummy_pending_task_;
uint32_t old_ipc_hash_ = 0;
DISALLOW_COPY_AND_ASSIGN(ScopedSetIpcHash);
};
} // namespace base
#endif // BASE_TASK_COMMON_TASK_ANNOTATOR_H_

View file

@ -0,0 +1,125 @@
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/task/lazy_thread_pool_task_runner.h"
#include <utility>
#include "base/lazy_instance_helpers.h"
#include "base/logging.h"
#include "base/task/thread_pool.h"
namespace base {
namespace internal {
namespace {
ScopedLazyTaskRunnerListForTesting* g_scoped_lazy_task_runner_list_for_testing =
nullptr;
} // namespace
template <typename TaskRunnerType, bool com_sta>
void LazyThreadPoolTaskRunner<TaskRunnerType, com_sta>::Reset() {
subtle::AtomicWord state = subtle::Acquire_Load(&state_);
DCHECK_NE(state, kLazyInstanceStateCreating) << "Race: all threads should be "
"unwound in unittests before "
"resetting TaskRunners.";
// Return if no reference is held by this instance.
if (!state)
return;
// Release the reference acquired in Get().
SequencedTaskRunner* task_runner = reinterpret_cast<TaskRunnerType*>(state);
task_runner->Release();
// Clear the state.
subtle::NoBarrier_Store(&state_, 0);
}
template <>
scoped_refptr<SequencedTaskRunner>
LazyThreadPoolTaskRunner<SequencedTaskRunner, false>::Create() {
// It is invalid to specify a SingleThreadTaskRunnerThreadMode with a
// LazyThreadPoolSequencedTaskRunner.
DCHECK_EQ(thread_mode_, SingleThreadTaskRunnerThreadMode::SHARED);
return ThreadPool::CreateSequencedTaskRunner(traits_);
}
template <>
scoped_refptr<SingleThreadTaskRunner>
LazyThreadPoolTaskRunner<SingleThreadTaskRunner, false>::Create() {
return ThreadPool::CreateSingleThreadTaskRunner(traits_, thread_mode_);
}
#if defined(OS_WIN)
template <>
scoped_refptr<SingleThreadTaskRunner>
LazyThreadPoolTaskRunner<SingleThreadTaskRunner, true>::Create() {
return ThreadPool::CreateCOMSTATaskRunner(traits_, thread_mode_);
}
#endif
// static
template <typename TaskRunnerType, bool com_sta>
TaskRunnerType* LazyThreadPoolTaskRunner<TaskRunnerType, com_sta>::CreateRaw(
void* void_self) {
auto self =
reinterpret_cast<LazyThreadPoolTaskRunner<TaskRunnerType, com_sta>*>(
void_self);
scoped_refptr<TaskRunnerType> task_runner = self->Create();
// Acquire a reference to the TaskRunner. The reference will either
// never be released or be released in Reset(). The reference is not
// managed by a scoped_refptr because adding a scoped_refptr member to
// LazyThreadPoolTaskRunner would prevent its static initialization.
task_runner->AddRef();
// Reset this instance when the current
// ScopedLazyTaskRunnerListForTesting is destroyed, if any.
if (g_scoped_lazy_task_runner_list_for_testing) {
g_scoped_lazy_task_runner_list_for_testing->AddCallback(
BindOnce(&LazyThreadPoolTaskRunner<TaskRunnerType, com_sta>::Reset,
Unretained(self)));
}
return task_runner.get();
}
template <typename TaskRunnerType, bool com_sta>
scoped_refptr<TaskRunnerType>
LazyThreadPoolTaskRunner<TaskRunnerType, com_sta>::Get() {
return WrapRefCounted(subtle::GetOrCreateLazyPointer(
&state_, &LazyThreadPoolTaskRunner<TaskRunnerType, com_sta>::CreateRaw,
reinterpret_cast<void*>(this), nullptr, nullptr));
}
template class LazyThreadPoolTaskRunner<SequencedTaskRunner, false>;
template class LazyThreadPoolTaskRunner<SingleThreadTaskRunner, false>;
#if defined(OS_WIN)
template class LazyThreadPoolTaskRunner<SingleThreadTaskRunner, true>;
#endif
ScopedLazyTaskRunnerListForTesting::ScopedLazyTaskRunnerListForTesting() {
DCHECK(!g_scoped_lazy_task_runner_list_for_testing);
g_scoped_lazy_task_runner_list_for_testing = this;
}
ScopedLazyTaskRunnerListForTesting::~ScopedLazyTaskRunnerListForTesting() {
internal::CheckedAutoLock auto_lock(lock_);
for (auto& callback : callbacks_)
std::move(callback).Run();
g_scoped_lazy_task_runner_list_for_testing = nullptr;
}
void ScopedLazyTaskRunnerListForTesting::AddCallback(OnceClosure callback) {
internal::CheckedAutoLock auto_lock(lock_);
callbacks_.push_back(std::move(callback));
}
} // namespace internal
} // namespace base

View file

@ -0,0 +1,226 @@
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TASK_LAZY_THREAD_POOL_TASK_RUNNER_H_
#define BASE_TASK_LAZY_THREAD_POOL_TASK_RUNNER_H_
#include <vector>
#include "base/atomicops.h"
#include "base/callback.h"
#include "base/compiler_specific.h"
#include "base/sequenced_task_runner.h"
#include "base/single_thread_task_runner.h"
#include "base/task/common/checked_lock.h"
#include "base/task/single_thread_task_runner_thread_mode.h"
#include "base/task/task_traits.h"
#include "base/thread_annotations.h"
#include "build/build_config.h"
// Lazy(Sequenced|SingleThread|COMSTA)TaskRunner lazily creates a TaskRunner.
//
// Lazy(Sequenced|SingleThread|COMSTA)TaskRunner is meant to be instantiated in
// an anonymous namespace (no static initializer is generated) and used to post
// tasks to the same thread-pool-bound sequence/thread from pieces of code that
// don't have a better way of sharing a TaskRunner. It is important to use this
// class instead of a self-managed global variable or LazyInstance so that the
// TaskRunners do not outlive the scope of the TaskEnvironment in unit tests
// (otherwise the next test in the same process will die in use-after-frees).
//
// IMPORTANT: Only use this API as a last resort. Prefer storing a
// (Sequenced|SingleThread)TaskRunner returned by
// base::ThreadPool::Create(Sequenced|SingleThread|COMSTA)TaskRunner() as a
// member on an object accessible by all PostTask() call sites.
//
// Example usage 1:
//
// namespace {
// base::LazyThreadPoolSequencedTaskRunner g_sequenced_task_runner =
// LAZY_THREAD_POOL_SEQUENCED_TASK_RUNNER_INITIALIZER(
// base::TaskTraits(base::MayBlock(),
// base::TaskPriority::USER_VISIBLE));
// } // namespace
//
// void SequencedFunction() {
// // Different invocations of this function post to the same
// // MayBlock() SequencedTaskRunner.
// g_sequenced_task_runner.Get()->PostTask(FROM_HERE, base::BindOnce(...));
// }
//
// Example usage 2:
//
// namespace {
// base::LazyThreadPoolSequencedTaskRunner g_sequenced_task_task_runner =
// LAZY_THREAD_POOL_SEQUENCED_TASK_RUNNER_INITIALIZER(
// base::TaskTraits(base::MayBlock()));
// } // namespace
//
// // Code from different files can access the SequencedTaskRunner via this
// // function.
// scoped_refptr<base::SequencedTaskRunner> GetTaskRunner() {
// return g_sequenced_task_runner.Get();
// }
namespace base {
namespace internal {
template <typename TaskRunnerType, bool com_sta>
class BASE_EXPORT LazyThreadPoolTaskRunner;
} // namespace internal
// Lazy SequencedTaskRunner.
using LazyThreadPoolSequencedTaskRunner =
internal::LazyThreadPoolTaskRunner<SequencedTaskRunner, false>;
// Lazy SingleThreadTaskRunner.
using LazyThreadPoolSingleThreadTaskRunner =
internal::LazyThreadPoolTaskRunner<SingleThreadTaskRunner, false>;
#if defined(OS_WIN)
// Lazy COM-STA enabled SingleThreadTaskRunner.
using LazyThreadPoolCOMSTATaskRunner =
internal::LazyThreadPoolTaskRunner<SingleThreadTaskRunner, true>;
#endif
// Helper macros to generate a variable name by concatenation.
#define LAZY_TASK_RUNNER_CONCATENATE_INTERNAL2(a, b) a##b
#define LAZY_TASK_RUNNER_CONCATENATE_INTERNAL(a, b) \
LAZY_TASK_RUNNER_CONCATENATE_INTERNAL2(a, b)
// Use the macros below to initialize a LazyThreadPoolTaskRunner. These macros
// verify that their arguments are constexpr, which is important to prevent the
// generation of a static initializer.
// |traits| are TaskTraits used when creating the SequencedTaskRunner.
#define LAZY_THREAD_POOL_SEQUENCED_TASK_RUNNER_INITIALIZER(traits) \
base::LazyThreadPoolSequencedTaskRunner::CreateInternal(traits); \
/* ThreadPool() as a trait is deprecated and implicit here */ \
static_assert(!traits.use_thread_pool(), ""); \
ALLOW_UNUSED_TYPE constexpr base::TaskTraits \
LAZY_TASK_RUNNER_CONCATENATE_INTERNAL(kVerifyTraitsAreConstexpr, \
__LINE__) = traits
// |traits| are TaskTraits used when creating the SingleThreadTaskRunner.
// |thread_mode| specifies whether the SingleThreadTaskRunner can share its
// thread with other SingleThreadTaskRunners.
#define LAZY_THREAD_POOL_SINGLE_THREAD_TASK_RUNNER_INITIALIZER(traits, \
thread_mode) \
base::LazyThreadPoolSingleThreadTaskRunner::CreateInternal(traits, \
thread_mode); \
/* ThreadPool() as a trait is deprecated and implicit here */ \
static_assert(!traits.use_thread_pool(), ""); \
ALLOW_UNUSED_TYPE constexpr base::TaskTraits \
LAZY_TASK_RUNNER_CONCATENATE_INTERNAL(kVerifyTraitsAreConstexpr, \
__LINE__) = traits; \
ALLOW_UNUSED_TYPE constexpr base::SingleThreadTaskRunnerThreadMode \
LAZY_TASK_RUNNER_CONCATENATE_INTERNAL(kVerifyThreadModeIsConstexpr, \
__LINE__) = thread_mode
// |traits| are TaskTraits used when creating the COM STA
// SingleThreadTaskRunner. |thread_mode| specifies whether the COM STA
// SingleThreadTaskRunner can share its thread with other
// SingleThreadTaskRunners.
#define LAZY_COM_STA_TASK_RUNNER_INITIALIZER(traits, thread_mode) \
base::LazyThreadPoolCOMSTATaskRunner::CreateInternal(traits, thread_mode); \
/* ThreadPool() as a trait is deprecated and implicit here */ \
static_assert(!traits.use_thread_pool(), ""); \
ALLOW_UNUSED_TYPE constexpr base::TaskTraits \
LAZY_TASK_RUNNER_CONCATENATE_INTERNAL(kVerifyTraitsAreConstexpr, \
__LINE__) = traits; \
ALLOW_UNUSED_TYPE constexpr base::SingleThreadTaskRunnerThreadMode \
LAZY_TASK_RUNNER_CONCATENATE_INTERNAL(kVerifyThreadModeIsConstexpr, \
__LINE__) = thread_mode
namespace internal {
template <typename TaskRunnerType, bool com_sta>
class BASE_EXPORT LazyThreadPoolTaskRunner {
public:
// Use the macros above rather than a direct call to this.
//
// |traits| are TaskTraits to use to create the TaskRunner. If this
// LazyThreadPoolTaskRunner is specialized to create a SingleThreadTaskRunner,
// |thread_mode| specifies whether the SingleThreadTaskRunner can share its
// thread with other SingleThreadTaskRunner. Otherwise, it is unused.
static constexpr LazyThreadPoolTaskRunner CreateInternal(
const TaskTraits& traits,
SingleThreadTaskRunnerThreadMode thread_mode =
SingleThreadTaskRunnerThreadMode::SHARED) {
return LazyThreadPoolTaskRunner(traits, thread_mode);
}
// Returns the TaskRunner held by this instance. Creates it if it didn't
// already exist. Thread-safe.
scoped_refptr<TaskRunnerType> Get();
private:
constexpr LazyThreadPoolTaskRunner(
const TaskTraits& traits,
SingleThreadTaskRunnerThreadMode thread_mode =
SingleThreadTaskRunnerThreadMode::SHARED)
: traits_(traits), thread_mode_(thread_mode) {}
// Releases the TaskRunner held by this instance.
void Reset();
// Creates and returns a new TaskRunner.
scoped_refptr<TaskRunnerType> Create();
// Creates a new TaskRunner via Create(), adds an explicit ref to it, and
// returns it raw. Used as an adapter for lazy instance helpers. Static and
// takes |this| as an explicit param to match the void* signature of
// GetOrCreateLazyPointer().
static TaskRunnerType* CreateRaw(void* void_self);
// TaskTraits to create the TaskRunner.
const TaskTraits traits_;
// SingleThreadTaskRunnerThreadMode to create the TaskRunner.
const SingleThreadTaskRunnerThreadMode thread_mode_;
// Can have 3 states:
// - This instance does not hold a TaskRunner: 0
// - This instance is creating a TaskRunner: kLazyInstanceStateCreating
// - This instance holds a TaskRunner: Pointer to the TaskRunner.
// LazyInstance's internals are reused to handle transition between states.
subtle::AtomicWord state_ = 0;
// No DISALLOW_COPY_AND_ASSIGN since that prevents static initialization with
// Visual Studio (warning C4592: 'symbol will be dynamically initialized
// (implementation limitation))'.
};
// When a LazyThreadPoolTaskRunner becomes active (invokes Get()), it adds a
// callback to the current ScopedLazyTaskRunnerListForTesting, if any.
// Callbacks run when the ScopedLazyTaskRunnerListForTesting is
// destroyed. In a test process, a ScopedLazyTaskRunnerListForTesting
// must be instantiated before any LazyThreadPoolTaskRunner becomes active.
class BASE_EXPORT ScopedLazyTaskRunnerListForTesting {
public:
ScopedLazyTaskRunnerListForTesting();
~ScopedLazyTaskRunnerListForTesting();
private:
friend class LazyThreadPoolTaskRunner<SequencedTaskRunner, false>;
friend class LazyThreadPoolTaskRunner<SingleThreadTaskRunner, false>;
#if defined(OS_WIN)
friend class LazyThreadPoolTaskRunner<SingleThreadTaskRunner, true>;
#endif
// Add |callback| to the list of callbacks to run on destruction.
void AddCallback(OnceClosure callback);
CheckedLock lock_;
// List of callbacks to run on destruction.
std::vector<OnceClosure> callbacks_ GUARDED_BY(lock_);
DISALLOW_COPY_AND_ASSIGN(ScopedLazyTaskRunnerListForTesting);
};
} // namespace internal
} // namespace base
#endif // BASE_TASK_LAZY_THREAD_POOL_TASK_RUNNER_H_

View file

@ -0,0 +1,188 @@
// Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/task/post_job.h"
#include "base/task/scoped_set_task_priority_for_current_thread.h"
#include "base/task/thread_pool/job_task_source.h"
#include "base/task/thread_pool/pooled_task_runner_delegate.h"
#include "base/task/thread_pool/thread_pool_impl.h"
#include "base/task/thread_pool/thread_pool_instance.h"
namespace base {
JobDelegate::JobDelegate(
internal::JobTaskSource* task_source,
internal::PooledTaskRunnerDelegate* pooled_task_runner_delegate)
: task_source_(task_source),
pooled_task_runner_delegate_(pooled_task_runner_delegate) {
DCHECK(task_source_);
#if DCHECK_IS_ON()
recorded_increase_version_ = task_source_->GetConcurrencyIncreaseVersion();
// Record max concurrency before running the worker task.
recorded_max_concurrency_ = task_source_->GetMaxConcurrency();
#endif // DCHECK_IS_ON()
}
JobDelegate::~JobDelegate() {
#if DCHECK_IS_ON()
// When ShouldYield() returns false, the worker task is expected to do
// work before returning.
size_t expected_max_concurrency = recorded_max_concurrency_;
if (!last_should_yield_ && expected_max_concurrency > 0)
--expected_max_concurrency;
AssertExpectedConcurrency(expected_max_concurrency);
#endif // DCHECK_IS_ON()
}
bool JobDelegate::ShouldYield() {
#if DCHECK_IS_ON()
// ShouldYield() shouldn't be called again after returning true.
DCHECK(!last_should_yield_);
#endif // DCHECK_IS_ON()
const bool should_yield =
task_source_->ShouldYield() ||
(pooled_task_runner_delegate_ &&
pooled_task_runner_delegate_->ShouldYield(task_source_));
#if DCHECK_IS_ON()
last_should_yield_ = should_yield;
#endif // DCHECK_IS_ON()
return should_yield;
}
void JobDelegate::YieldIfNeeded() {
// TODO(crbug.com/839091): Implement this.
}
void JobDelegate::NotifyConcurrencyIncrease() {
task_source_->NotifyConcurrencyIncrease();
}
void JobDelegate::AssertExpectedConcurrency(size_t expected_max_concurrency) {
// In dcheck builds, verify that max concurrency falls in one of the following
// cases:
// 1) max concurrency behaves normally and is below or equals the expected
// value.
// 2) max concurrency increased above the expected value, which implies
// there are new work items that the associated worker task didn't see and
// NotifyConcurrencyIncrease() should be called to adjust the number of
// worker.
// a) NotifyConcurrencyIncrease() was already called and the recorded
// concurrency version is out of date, i.e. less than the actual version.
// b) NotifyConcurrencyIncrease() has not yet been called, in which case the
// function waits for an imminent increase of the concurrency version,
// or for max concurrency to decrease below or equal the expected value.
// This prevent ill-formed GetMaxConcurrency() implementations that:
// - Don't decrease with the number of remaining work items.
// - Don't return an up-to-date value.
#if DCHECK_IS_ON()
// Case 1:
if (task_source_->GetMaxConcurrency() <= expected_max_concurrency)
return;
// Case 2a:
const size_t actual_version = task_source_->GetConcurrencyIncreaseVersion();
DCHECK_LE(recorded_increase_version_, actual_version);
if (recorded_increase_version_ < actual_version)
return;
// Case 2b:
const bool updated = task_source_->WaitForConcurrencyIncreaseUpdate(
recorded_increase_version_);
DCHECK(updated ||
task_source_->GetMaxConcurrency() <= expected_max_concurrency)
<< "Value returned by |max_concurrency_callback| is expected to "
"decrease, unless NotifyConcurrencyIncrease() is called.";
recorded_increase_version_ = task_source_->GetConcurrencyIncreaseVersion();
recorded_max_concurrency_ = task_source_->GetMaxConcurrency();
#endif // DCHECK_IS_ON()
}
JobHandle::JobHandle() = default;
JobHandle::JobHandle(scoped_refptr<internal::JobTaskSource> task_source)
: task_source_(std::move(task_source)) {}
JobHandle::~JobHandle() {
DCHECK(!task_source_)
<< "The Job must be cancelled, detached or joined before its "
"JobHandle is destroyed.";
}
JobHandle::JobHandle(JobHandle&&) = default;
JobHandle& JobHandle::operator=(JobHandle&& other) {
DCHECK(!task_source_)
<< "The Job must be cancelled, detached or joined before its "
"JobHandle is re-assigned.";
task_source_ = std::move(other.task_source_);
return *this;
}
void JobHandle::UpdatePriority(TaskPriority new_priority) {
task_source_->delegate()->UpdatePriority(task_source_, new_priority);
}
void JobHandle::NotifyConcurrencyIncrease() {
task_source_->NotifyConcurrencyIncrease();
}
void JobHandle::Join() {
DCHECK_GE(internal::GetTaskPriorityForCurrentThread(),
task_source_->priority_racy())
<< "Join may not be called on Job with higher priority than the current "
"thread.";
UpdatePriority(internal::GetTaskPriorityForCurrentThread());
bool must_run = task_source_->WillJoin();
while (must_run)
must_run = task_source_->RunJoinTask();
// Remove |task_source_| from the ThreadPool to prevent access to
// |max_concurrency_callback| after Join().
task_source_->delegate()->RemoveJobTaskSource(task_source_);
task_source_ = nullptr;
}
void JobHandle::Cancel() {
task_source_->Cancel();
Join();
}
void JobHandle::CancelAndDetach() {
task_source_->Cancel();
Detach();
}
void JobHandle::Detach() {
DCHECK(task_source_);
task_source_ = nullptr;
}
JobHandle PostJob(const Location& from_here,
const TaskTraits& traits,
RepeatingCallback<void(JobDelegate*)> worker_task,
RepeatingCallback<size_t()> max_concurrency_callback) {
DCHECK(ThreadPoolInstance::Get())
<< "Ref. Prerequisite section of post_task.h.\n\n"
"Hint: if this is in a unit test, you're likely merely missing a "
"base::test::TaskEnvironment member in your fixture.\n";
// ThreadPool is implicitly the destination for PostJob(). Extension traits
// cannot be used.
DCHECK_EQ(traits.extension_id(),
TaskTraitsExtensionStorage::kInvalidExtensionId);
auto task_source = base::MakeRefCounted<internal::JobTaskSource>(
from_here, traits, std::move(worker_task),
std::move(max_concurrency_callback),
static_cast<internal::ThreadPoolImpl*>(ThreadPoolInstance::Get()));
const bool queued =
static_cast<internal::ThreadPoolImpl*>(ThreadPoolInstance::Get())
->EnqueueJobTaskSource(task_source);
if (queued)
return internal::JobTaskSource::CreateJobHandle(std::move(task_source));
return JobHandle();
}
} // namespace base

View file

@ -0,0 +1,167 @@
// Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TASK_POST_JOB_H_
#define BASE_TASK_POST_JOB_H_
#include "base/base_export.h"
#include "base/callback.h"
#include "base/location.h"
#include "base/logging.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "base/task/task_traits.h"
#include "base/time/time.h"
namespace base {
namespace internal {
class JobTaskSource;
class PooledTaskRunnerDelegate;
}
// Delegate that's passed to Job's worker task, providing an entry point to
// communicate with the scheduler.
class BASE_EXPORT JobDelegate {
public:
// A JobDelegate is instantiated for each worker task that is run.
// |task_source| is the task source whose worker task is running with this
// delegate and |pooled_task_runner_delegate| is used by ShouldYield() to
// check whether the pool wants this worker task to yield (null if this worker
// should never yield -- e.g. when the main thread is a worker).
JobDelegate(internal::JobTaskSource* task_source,
internal::PooledTaskRunnerDelegate* pooled_task_runner_delegate);
~JobDelegate();
// Returns true if this thread should return from the worker task on the
// current thread ASAP. Workers should periodically invoke ShouldYield (or
// YieldIfNeeded()) as often as is reasonable.
bool ShouldYield();
// If ShouldYield(), this will pause the current thread (allowing it to be
// replaced in the pool); no-ops otherwise. If it pauses, it will resume and
// return from this call whenever higher priority work completes.
// Prefer ShouldYield() over this (only use YieldIfNeeded() when unwinding
// the stack is not possible).
void YieldIfNeeded();
// Notifies the scheduler that max concurrency was increased, and the number
// of worker should be adjusted accordingly. See PostJob() for more details.
void NotifyConcurrencyIncrease();
private:
// Verifies that either max concurrency is lower or equal to
// |expected_max_concurrency|, or there is an increase version update
// triggered by NotifyConcurrencyIncrease().
void AssertExpectedConcurrency(size_t expected_max_concurrency);
internal::JobTaskSource* const task_source_;
internal::PooledTaskRunnerDelegate* const pooled_task_runner_delegate_;
#if DCHECK_IS_ON()
// Used in AssertExpectedConcurrency(), see that method's impl for details.
// Value of max concurrency recorded before running the worker task.
size_t recorded_max_concurrency_;
// Value of the increase version recorded before running the worker task.
size_t recorded_increase_version_;
// Value returned by the last call to ShouldYield().
bool last_should_yield_ = false;
#endif
DISALLOW_COPY_AND_ASSIGN(JobDelegate);
};
// Handle returned when posting a Job. Provides methods to control execution of
// the posted Job.
class BASE_EXPORT JobHandle {
public:
JobHandle();
// A job must either be joined, canceled or detached before the JobHandle is
// destroyed.
~JobHandle();
JobHandle(JobHandle&&);
JobHandle& operator=(JobHandle&&);
// Returns true if associated with a Job.
explicit operator bool() const { return task_source_ != nullptr; }
// Update this Job's priority.
void UpdatePriority(TaskPriority new_priority);
// Notifies the scheduler that max concurrency was increased, and the number
// of workers should be adjusted accordingly. See PostJob() for more details.
void NotifyConcurrencyIncrease();
// Contributes to the job on this thread. Doesn't return until all tasks have
// completed and max concurrency becomes 0. This also promotes this Job's
// priority to be at least as high as the calling thread's priority.
void Join();
// Forces all existing workers to yield ASAP. Waits until they have all
// returned from the Job's callback before returning.
void Cancel();
// Forces all existing workers to yield ASAP but doesnt wait for them.
// Warning, this is dangerous if the Job's callback is bound to or has access
// to state which may be deleted after this call.
void CancelAndDetach();
// Can be invoked before ~JobHandle() to avoid waiting on the job completing.
void Detach();
private:
friend class internal::JobTaskSource;
explicit JobHandle(scoped_refptr<internal::JobTaskSource> task_source);
scoped_refptr<internal::JobTaskSource> task_source_;
DISALLOW_COPY_AND_ASSIGN(JobHandle);
};
// Posts a repeating |worker_task| with specific |traits| to run in parallel on
// base::ThreadPool.
// Returns a JobHandle associated with the Job, which can be joined, canceled or
// detached.
// To avoid scheduling overhead, |worker_task| should do as much work as
// possible in a loop when invoked, and JobDelegate::ShouldYield() should be
// periodically invoked to conditionally exit and let the scheduler prioritize
// work.
//
// A canonical implementation of |worker_task| looks like:
// void WorkerTask(JobDelegate* job_delegate) {
// while (!job_delegate->ShouldYield()) {
// auto work_item = worker_queue.TakeWorkItem(); // Smallest unit of work.
// if (!work_item)
// return:
// ProcessWork(work_item);
// }
// }
//
// |max_concurrency_callback| controls the maximum number of threads calling
// |worker_task| concurrently. |worker_task| is only invoked if the number of
// threads previously running |worker_task| was less than the value returned by
// |max_concurrency_callback|. In general, |max_concurrency_callback| should
// return the latest number of incomplete work items (smallest unit of work)
// left to processed. JobHandle/JobDelegate::NotifyConcurrencyIncrease() *must*
// be invoked shortly after |max_concurrency_callback| starts returning a value
// larger than previously returned values. This usually happens when new work
// items are added and the API user wants additional threads to invoke
// |worker_task| concurrently. The callbacks may be called concurrently on any
// thread until the job is complete. If the job handle is detached, the
// callbacks may still be called, so they must not access global state that
// could be destroyed.
//
// |traits| requirements:
// - base::ThreadPolicy must be specified if the priority of the task runner
// will ever be increased from BEST_EFFORT.
JobHandle BASE_EXPORT
PostJob(const Location& from_here,
const TaskTraits& traits,
RepeatingCallback<void(JobDelegate*)> worker_task,
RepeatingCallback<size_t()> max_concurrency_callback);
} // namespace base
#endif // BASE_TASK_POST_JOB_H_

View file

@ -0,0 +1,142 @@
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/task/post_task.h"
#include <utility>
#include "base/logging.h"
#include "base/task/scoped_set_task_priority_for_current_thread.h"
#include "base/task/task_executor.h"
#include "base/task/thread_pool/thread_pool_impl.h"
#include "base/task/thread_pool/thread_pool_instance.h"
#include "base/threading/post_task_and_reply_impl.h"
namespace base {
namespace {
class PostTaskAndReplyWithTraitsTaskRunner
: public internal::PostTaskAndReplyImpl {
public:
explicit PostTaskAndReplyWithTraitsTaskRunner(const TaskTraits& traits)
: traits_(traits) {}
private:
bool PostTask(const Location& from_here, OnceClosure task) override {
::base::PostTask(from_here, traits_, std::move(task));
return true;
}
const TaskTraits traits_;
};
TaskExecutor* GetTaskExecutorForTraits(const TaskTraits& traits) {
const bool has_extension =
traits.extension_id() != TaskTraitsExtensionStorage::kInvalidExtensionId;
DCHECK(has_extension ^ traits.use_thread_pool())
<< "A destination (e.g. ThreadPool or BrowserThread) must be specified "
"to use the post_task.h API. However, you should prefer the direct "
"thread_pool.h or browser_thread.h APIs in new code.";
if (traits.use_thread_pool()) {
DCHECK(ThreadPoolInstance::Get())
<< "Ref. Prerequisite section of post_task.h for base::ThreadPool "
"usage.\n"
"Hint: if this is in a unit test, you're likely merely missing a "
"base::test::TaskEnvironment member in your fixture (or your "
"fixture is using a base::test::SingleThreadTaskEnvironment and now "
"needs a full base::test::TaskEnvironment).\n";
return static_cast<internal::ThreadPoolImpl*>(ThreadPoolInstance::Get());
}
// Assume |has_extension| per above invariant.
TaskExecutor* executor = GetRegisteredTaskExecutorForTraits(traits);
DCHECK(executor)
<< "A TaskExecutor wasn't yet registered for this extension.\n"
"Hint: if this is in a unit test, you're likely missing a "
"content::BrowserTaskEnvironment member in your fixture.";
return executor;
}
} // namespace
bool PostTask(const Location& from_here, OnceClosure task) {
// TODO(skyostil): Make task traits required here too.
return PostDelayedTask(from_here, {ThreadPool()}, std::move(task),
TimeDelta());
}
bool PostTaskAndReply(const Location& from_here,
OnceClosure task,
OnceClosure reply) {
return PostTaskAndReply(from_here, {ThreadPool()}, std::move(task),
std::move(reply));
}
bool PostTask(const Location& from_here,
const TaskTraits& traits,
OnceClosure task) {
return PostDelayedTask(from_here, traits, std::move(task), TimeDelta());
}
bool PostDelayedTask(const Location& from_here,
const TaskTraits& traits,
OnceClosure task,
TimeDelta delay) {
return GetTaskExecutorForTraits(traits)->PostDelayedTask(
from_here, traits, std::move(task), delay);
}
bool PostTaskAndReply(const Location& from_here,
const TaskTraits& traits,
OnceClosure task,
OnceClosure reply) {
return PostTaskAndReplyWithTraitsTaskRunner(traits).PostTaskAndReply(
from_here, std::move(task), std::move(reply));
}
scoped_refptr<TaskRunner> CreateTaskRunner(const TaskTraits& traits) {
return GetTaskExecutorForTraits(traits)->CreateTaskRunner(traits);
}
scoped_refptr<SequencedTaskRunner> CreateSequencedTaskRunner(
const TaskTraits& traits) {
return GetTaskExecutorForTraits(traits)->CreateSequencedTaskRunner(traits);
}
scoped_refptr<UpdateableSequencedTaskRunner>
CreateUpdateableSequencedTaskRunner(const TaskTraits& traits) {
DCHECK(ThreadPoolInstance::Get())
<< "Ref. Prerequisite section of post_task.h.\n\n"
"Hint: if this is in a unit test, you're likely merely missing a "
"base::test::TaskEnvironment member in your fixture.\n";
DCHECK(traits.use_thread_pool())
<< "The base::UseThreadPool() trait is mandatory with "
"CreateUpdateableSequencedTaskRunner().";
CHECK_EQ(traits.extension_id(),
TaskTraitsExtensionStorage::kInvalidExtensionId)
<< "Extension traits cannot be used with "
"CreateUpdateableSequencedTaskRunner().";
return static_cast<internal::ThreadPoolImpl*>(ThreadPoolInstance::Get())
->CreateUpdateableSequencedTaskRunner(traits);
}
scoped_refptr<SingleThreadTaskRunner> CreateSingleThreadTaskRunner(
const TaskTraits& traits,
SingleThreadTaskRunnerThreadMode thread_mode) {
return GetTaskExecutorForTraits(traits)->CreateSingleThreadTaskRunner(
traits, thread_mode);
}
#if defined(OS_WIN)
scoped_refptr<SingleThreadTaskRunner> CreateCOMSTATaskRunner(
const TaskTraits& traits,
SingleThreadTaskRunnerThreadMode thread_mode) {
return GetTaskExecutorForTraits(traits)->CreateCOMSTATaskRunner(traits,
thread_mode);
}
#endif // defined(OS_WIN)
} // namespace base

View file

@ -0,0 +1,265 @@
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TASK_POST_TASK_H_
#define BASE_TASK_POST_TASK_H_
#include <memory>
#include <utility>
#include "base/base_export.h"
#include "base/bind.h"
#include "base/callback.h"
#include "base/callback_helpers.h"
#include "base/location.h"
#include "base/memory/ref_counted.h"
#include "base/post_task_and_reply_with_result_internal.h"
#include "base/sequenced_task_runner.h"
#include "base/single_thread_task_runner.h"
#include "base/task/single_thread_task_runner_thread_mode.h"
#include "base/task/task_traits.h"
#include "base/task_runner.h"
#include "base/time/time.h"
#include "base/updateable_sequenced_task_runner.h"
#include "build/build_config.h"
namespace base {
// This is the interface to post tasks.
//
// Note: A migration is in-progress away from this API and in favor of explicit
// API-as-a-destination. thread_pool.h is now preferred to the
// base::ThreadPool() to post to the thread pool
//
// To post a simple one-off task with default traits:
// PostTask(FROM_HERE, BindOnce(...));
// modern equivalent:
// ThreadPool::PostTask(FROM_HERE, BindOnce(...));
//
// To post a high priority one-off task to respond to a user interaction:
// PostTask(
// FROM_HERE,
// {ThreadPool(), TaskPriority::USER_BLOCKING},
// BindOnce(...));
// modern equivalent:
// ThreadPool::PostTask(
// FROM_HERE,
// {TaskPriority::USER_BLOCKING},
// BindOnce(...));
//
// To post tasks that must run in sequence with default traits:
// scoped_refptr<SequencedTaskRunner> task_runner =
// CreateSequencedTaskRunner({ThreadPool()});
// task_runner->PostTask(FROM_HERE, BindOnce(...));
// task_runner->PostTask(FROM_HERE, BindOnce(...));
// modern equivalent:
// scoped_refptr<SequencedTaskRunner> task_runner =
// ThreadPool::CreateSequencedTaskRunner({});
// task_runner->PostTask(FROM_HERE, BindOnce(...));
// task_runner->PostTask(FROM_HERE, BindOnce(...));
//
// To post tasks that may block, must run in sequence and can be skipped on
// shutdown:
// scoped_refptr<SequencedTaskRunner> task_runner =
// CreateSequencedTaskRunner({ThreadPool(), MayBlock(),
// TaskShutdownBehavior::SKIP_ON_SHUTDOWN});
// task_runner->PostTask(FROM_HERE, BindOnce(...));
// task_runner->PostTask(FROM_HERE, BindOnce(...));
// modern equivalent:
// scoped_refptr<SequencedTaskRunner> task_runner =
// ThreadPool::CreateSequencedTaskRunner(
// {MayBlock(), TaskShutdownBehavior::SKIP_ON_SHUTDOWN});
// task_runner->PostTask(FROM_HERE, BindOnce(...));
// task_runner->PostTask(FROM_HERE, BindOnce(...));
//
// The default traits apply to tasks that:
// (1) don't block (ref. MayBlock() and WithBaseSyncPrimitives()),
// (2) prefer inheriting the current priority to specifying their own, and
// (3) can either block shutdown or be skipped on shutdown
// (implementation is free to choose a fitting default).
// Explicit traits must be specified for tasks for which these loose
// requirements are not sufficient.
//
// Tasks posted with only traits defined in base/task/task_traits.h run on
// threads owned by the registered ThreadPoolInstance (i.e. not on the main
// thread). An embedder (e.g. Chrome) can define additional traits to make tasks
// run on threads of their choosing.
//
// Tasks posted with the same traits will be scheduled in the order they were
// posted. IMPORTANT: Please note however that, unless the traits imply a
// single thread or sequence, this doesn't guarantee any *execution ordering*
// for tasks posted in a given order (being scheduled first doesn't mean it will
// run first -- could run in parallel or have its physical thread preempted).
//
// Prerequisite: A ThreadPoolInstance must have been registered for the current
// process via ThreadPoolInstance::Set() before the functions below are
// valid. This is typically done during the initialization phase in each
// process. If your code is not running in that phase, you most likely don't
// have to worry about this. You will encounter DCHECKs or nullptr dereferences
// if this is violated. For tests, prefer base::test::TaskEnvironment.
// Equivalent to calling PostTask with default TaskTraits.
BASE_EXPORT bool PostTask(const Location& from_here, OnceClosure task);
inline bool PostTask(OnceClosure task,
const Location& from_here = Location::Current()) {
return PostTask(from_here, std::move(task));
}
// Equivalent to calling PostTaskAndReply with default TaskTraits.
BASE_EXPORT bool PostTaskAndReply(const Location& from_here,
OnceClosure task,
OnceClosure reply);
// Equivalent to calling PostTaskAndReplyWithResult with default TaskTraits.
template <typename TaskReturnType, typename ReplyArgType>
bool PostTaskAndReplyWithResult(const Location& from_here,
OnceCallback<TaskReturnType()> task,
OnceCallback<void(ReplyArgType)> reply) {
return PostTaskAndReplyWithResult(from_here, {ThreadPool()}, std::move(task),
std::move(reply));
}
// Posts |task| with specific |traits|. Returns false if the task definitely
// won't run because of current shutdown state.
BASE_EXPORT bool PostTask(const Location& from_here,
const TaskTraits& traits,
OnceClosure task);
// Posts |task| with specific |traits|. |task| will not run before |delay|
// expires. Returns false if the task definitely won't run because of current
// shutdown state.
//
// Specify a BEST_EFFORT priority via |traits| if the task doesn't have to run
// as soon as |delay| expires.
BASE_EXPORT bool PostDelayedTask(const Location& from_here,
const TaskTraits& traits,
OnceClosure task,
TimeDelta delay);
// Posts |task| with specific |traits| and posts |reply| on the caller's
// execution context (i.e. same sequence or thread and same TaskTraits if
// applicable) when |task| completes. Returns false if the task definitely won't
// run because of current shutdown state. Can only be called when
// SequencedTaskRunnerHandle::IsSet().
BASE_EXPORT bool PostTaskAndReply(const Location& from_here,
const TaskTraits& traits,
OnceClosure task,
OnceClosure reply);
// Posts |task| with specific |traits| and posts |reply| with the return value
// of |task| as argument on the caller's execution context (i.e. same sequence
// or thread and same TaskTraits if applicable) when |task| completes. Returns
// false if the task definitely won't run because of current shutdown state. Can
// only be called when SequencedTaskRunnerHandle::IsSet().
template <typename TaskReturnType, typename ReplyArgType>
bool PostTaskAndReplyWithResult(const Location& from_here,
const TaskTraits& traits,
OnceCallback<TaskReturnType()> task,
OnceCallback<void(ReplyArgType)> reply) {
auto* result = new std::unique_ptr<TaskReturnType>();
return PostTaskAndReply(
from_here, traits,
BindOnce(&internal::ReturnAsParamAdapter<TaskReturnType>, std::move(task),
result),
BindOnce(&internal::ReplyAdapter<TaskReturnType, ReplyArgType>,
std::move(reply), Owned(result)));
}
// Returns a TaskRunner whose PostTask invocations result in scheduling tasks
// using |traits|. Tasks may run in any order and in parallel.
BASE_EXPORT scoped_refptr<TaskRunner> CreateTaskRunner(
const TaskTraits& traits);
// Returns a SequencedTaskRunner whose PostTask invocations result in scheduling
// tasks using |traits|. Tasks run one at a time in posting order.
BASE_EXPORT scoped_refptr<SequencedTaskRunner> CreateSequencedTaskRunner(
const TaskTraits& traits);
// Returns a task runner whose PostTask invocations result in scheduling tasks
// using |traits|. The priority in |traits| can be updated at any time via
// UpdateableSequencedTaskRunner::UpdatePriority(). An update affects all tasks
// posted to the task runner that aren't running yet. Tasks run one at a time in
// posting order.
//
// |traits| requirements:
// - base::ThreadPool() must be specified.
// Note: Prefer the explicit (thread_pool.h) version of this API while we
// migrate this one to it.
// - Extension traits (e.g. BrowserThread) cannot be specified.
// - base::ThreadPolicy must be specified if the priority of the task runner
// will ever be increased from BEST_EFFORT.
BASE_EXPORT scoped_refptr<UpdateableSequencedTaskRunner>
CreateUpdateableSequencedTaskRunner(const TaskTraits& traits);
// Returns a SingleThreadTaskRunner whose PostTask invocations result in
// scheduling tasks using |traits| on a thread determined by |thread_mode|. See
// base/task/single_thread_task_runner_thread_mode.h for |thread_mode| details.
// If |traits| identifies an existing thread,
// SingleThreadTaskRunnerThreadMode::SHARED must be used. Tasks run on a single
// thread in posting order.
//
// If all you need is to make sure that tasks don't run concurrently (e.g.
// because they access a data structure which is not thread-safe), use
// CreateSequencedTaskRunner(). Only use this if you rely on a thread-affine API
// (it might be safer to assume thread-affinity when dealing with
// under-documented third-party APIs, e.g. other OS') or share data across tasks
// using thread-local storage.
BASE_EXPORT scoped_refptr<SingleThreadTaskRunner> CreateSingleThreadTaskRunner(
const TaskTraits& traits,
SingleThreadTaskRunnerThreadMode thread_mode =
SingleThreadTaskRunnerThreadMode::SHARED);
#if defined(OS_WIN)
// Returns a SingleThreadTaskRunner whose PostTask invocations result in
// scheduling tasks using |traits| in a COM Single-Threaded Apartment on a
// thread determined by |thread_mode|. See
// base/task/single_thread_task_runner_thread_mode.h for |thread_mode| details.
// If |traits| identifies an existing thread,
// SingleThreadTaskRunnerThreadMode::SHARED must be used. Tasks run in the same
// Single-Threaded Apartment in posting order for the returned
// SingleThreadTaskRunner. There is not necessarily a one-to-one correspondence
// between SingleThreadTaskRunners and Single-Threaded Apartments. The
// implementation is free to share apartments or create new apartments as
// necessary. In either case, care should be taken to make sure COM pointers are
// not smuggled across apartments.
BASE_EXPORT scoped_refptr<SingleThreadTaskRunner> CreateCOMSTATaskRunner(
const TaskTraits& traits,
SingleThreadTaskRunnerThreadMode thread_mode =
SingleThreadTaskRunnerThreadMode::SHARED);
#endif // defined(OS_WIN)
// Helpers to send a Delete/ReleaseSoon to a new SequencedTaskRunner created
// from |traits|. The semantics match base::PostTask in that the deletion is
// guaranteed to be scheduled in order with other tasks using the same |traits|.
//
// Prefer using an existing SequencedTaskRunner's Delete/ReleaseSoon over this
// to encode execution order requirements when possible.
//
// Note: base::ThreadPool is not a valid destination as it'd result in a one-off
// parallel task which is generally ill-suited for deletion. Use an existing
// SequencedTaskRunner's DeleteSoon to post a safely ordered deletion.
template <class T>
bool DeleteSoon(const Location& from_here,
const TaskTraits& traits,
const T* object) {
DCHECK(!traits.use_thread_pool());
return CreateSequencedTaskRunner(traits)->DeleteSoon(from_here, object);
}
template <class T>
bool DeleteSoon(const Location& from_here,
const TaskTraits& traits,
std::unique_ptr<T> object) {
return DeleteSoon(from_here, traits, object.release());
}
template <class T>
void ReleaseSoon(const Location& from_here,
const TaskTraits& traits,
scoped_refptr<T>&& object) {
DCHECK(!traits.use_thread_pool());
CreateSequencedTaskRunner(traits)->ReleaseSoon(from_here, std::move(object));
}
} // namespace base
#endif // BASE_TASK_POST_TASK_H_

View file

@ -0,0 +1,41 @@
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/task/scoped_set_task_priority_for_current_thread.h"
#include "base/lazy_instance.h"
#include "base/logging.h"
#include "base/threading/thread_local.h"
namespace base {
namespace internal {
namespace {
LazyInstance<ThreadLocalPointer<const TaskPriority>>::Leaky
tls_task_priority_for_current_thread = LAZY_INSTANCE_INITIALIZER;
} // namespace
ScopedSetTaskPriorityForCurrentThread::ScopedSetTaskPriorityForCurrentThread(
TaskPriority priority)
: priority_(priority) {
DCHECK(!tls_task_priority_for_current_thread.Get().Get());
tls_task_priority_for_current_thread.Get().Set(&priority_);
}
ScopedSetTaskPriorityForCurrentThread::
~ScopedSetTaskPriorityForCurrentThread() {
DCHECK_EQ(&priority_, tls_task_priority_for_current_thread.Get().Get());
tls_task_priority_for_current_thread.Get().Set(nullptr);
}
TaskPriority GetTaskPriorityForCurrentThread() {
const TaskPriority* priority =
tls_task_priority_for_current_thread.Get().Get();
return priority ? *priority : TaskPriority::USER_BLOCKING;
}
} // namespace internal
} // namespace base

View file

@ -0,0 +1,35 @@
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TASK_SCOPED_SET_TASK_PRIORITY_FOR_CURRENT_THREAD_H_
#define BASE_TASK_SCOPED_SET_TASK_PRIORITY_FOR_CURRENT_THREAD_H_
#include "base/base_export.h"
#include "base/macros.h"
#include "base/task/task_traits.h"
namespace base {
namespace internal {
class BASE_EXPORT ScopedSetTaskPriorityForCurrentThread {
public:
// Within the scope of this object, GetTaskPriorityForCurrentThread() will
// return |priority|.
ScopedSetTaskPriorityForCurrentThread(TaskPriority priority);
~ScopedSetTaskPriorityForCurrentThread();
private:
const TaskPriority priority_;
DISALLOW_COPY_AND_ASSIGN(ScopedSetTaskPriorityForCurrentThread);
};
// Returns the priority of the task running on the current thread,
// or TaskPriority::USER_BLOCKING by default if none.
BASE_EXPORT TaskPriority GetTaskPriorityForCurrentThread();
} // namespace internal
} // namespace base
#endif // BASE_TASK_SCOPED_SET_TASK_PRIORITY_FOR_CURRENT_THREAD_H_

View file

@ -0,0 +1,6 @@
altimin@chromium.org
carlscab@google.com
skyostil@chromium.org
# TEAM: scheduler-dev@chromium.org
# Component: Internals>SequenceManager

View file

@ -0,0 +1,34 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/task/sequence_manager/associated_thread_id.h"
namespace base {
namespace sequence_manager {
namespace internal {
AssociatedThreadId::AssociatedThreadId() = default;
AssociatedThreadId::~AssociatedThreadId() = default;
void AssociatedThreadId::BindToCurrentThread() {
// TODO(altimin): Remove this after MessageLoopImpl is gone and
// initialisation is simplified.
auto current_thread_id = PlatformThread::CurrentId();
auto prev_thread_id =
thread_id_.exchange(current_thread_id, std::memory_order_release);
ANALYZER_ALLOW_UNUSED(prev_thread_id);
DCHECK(prev_thread_id == current_thread_id ||
prev_thread_id == kInvalidThreadId);
// Rebind the thread and sequence checkers to the current thread/sequence.
DETACH_FROM_THREAD(thread_checker);
DCHECK_CALLED_ON_VALID_THREAD(thread_checker);
DETACH_FROM_SEQUENCE(sequence_checker);
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker);
}
} // namespace internal
} // namespace sequence_manager
} // namespace base

View file

@ -0,0 +1,113 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TASK_SEQUENCE_MANAGER_ASSOCIATED_THREAD_ID_H_
#define BASE_TASK_SEQUENCE_MANAGER_ASSOCIATED_THREAD_ID_H_
#include <atomic>
#include <memory>
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "base/optional.h"
#include "base/sequence_checker.h"
#include "base/threading/platform_thread.h"
#include "base/threading/thread_checker.h"
namespace base {
namespace sequence_manager {
namespace internal {
// TODO(eseckler): Make this owned by SequenceManager once the TaskQueue
// refactor has happened (https://crbug.com/865411).
//
// This class is thread-safe. But see notes about memory ordering guarantees for
// the various methods.
class BASE_EXPORT AssociatedThreadId
: public base::RefCountedThreadSafe<AssociatedThreadId> {
public:
AssociatedThreadId();
// TODO(eseckler): Replace thread_checker with sequence_checker everywhere.
THREAD_CHECKER(thread_checker);
SEQUENCE_CHECKER(sequence_checker);
static scoped_refptr<AssociatedThreadId> CreateUnbound() {
return MakeRefCounted<AssociatedThreadId>();
}
static scoped_refptr<AssociatedThreadId> CreateBound() {
auto associated_thread = MakeRefCounted<AssociatedThreadId>();
associated_thread->BindToCurrentThread();
return associated_thread;
}
// Rebind the associated thread to the current thread. This allows creating
// the SequenceManager and TaskQueues on a different thread/sequence than the
// one it will manage.
//
// Can only be called once.
void BindToCurrentThread();
// Returns the id of the thread bound to this object via a previous call to
// BindToCurrentThread(), nullopt if no thread was bound yet.
//
// This method guarantees a happens-before ordering with
// BindToCurrentThread(), that is all memory writes that happened-before the
// call to BindToCurrentThread() will become visible side-effects in the
// current thread.
//
// Attention: The result might be stale by the time this method returns.
Optional<PlatformThreadId> GetBoundThreadId() const {
auto thread_id = thread_id_.load(std::memory_order_acquire);
if (thread_id == kInvalidThreadId) {
return nullopt;
} else {
return thread_id;
}
}
// Checks whether this object has already been bound to a thread.
//
// This method guarantees a happens-before ordering with
// BindToCurrentThread(), that is all memory writes that happened-before the
// call to BindToCurrentThread() will become visible side-effects in the
// current thread.
//
// Attention: The result might be stale by the time this method returns.
bool IsBound() const {
return thread_id_.load(std::memory_order_acquire) != kInvalidThreadId;
}
// Checks whether this object is bound to the current thread. Returns false if
// this object is not bound to any thread.
//
// Note that this method provides no memory ordering guarantees but those are
// not really needed. If this method returns true we are on the same thread
// that called BindToCurrentThread(). If the method returns false this object
// could be unbound, so there is no possible ordering.
//
// Attention:: The result might be stale by the time this method returns.
bool IsBoundToCurrentThread() const {
return thread_id_.load(std::memory_order_relaxed) ==
PlatformThread::CurrentId();
}
// TODO(eseckler): Add a method that checks that we are either bound already
// or on the thread which created us and use it in any_thread() accessors.
private:
friend class base::RefCountedThreadSafe<AssociatedThreadId>;
~AssociatedThreadId();
// All access to this member can be std::memory_order_relaxed as this class
// provides no ordering guarantees.
std::atomic<PlatformThreadId> thread_id_{kInvalidThreadId};
};
} // namespace internal
} // namespace sequence_manager
} // namespace base
#endif // BASE_TASK_SEQUENCE_MANAGER_ASSOCIATED_THREAD_ID_H_

View file

@ -0,0 +1,212 @@
// Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/task/sequence_manager/atomic_flag_set.h"
#include <utility>
#include "base/bits.h"
#include "base/callback.h"
#include "base/logging.h"
namespace base {
namespace sequence_manager {
namespace internal {
AtomicFlagSet::AtomicFlagSet(
scoped_refptr<AssociatedThreadId> associated_thread)
: associated_thread_(std::move(associated_thread)) {}
AtomicFlagSet::~AtomicFlagSet() {
DCHECK(!alloc_list_head_);
DCHECK(!partially_free_list_head_);
}
AtomicFlagSet::AtomicFlag::AtomicFlag() = default;
AtomicFlagSet::AtomicFlag::~AtomicFlag() {
ReleaseAtomicFlag();
}
AtomicFlagSet::AtomicFlag::AtomicFlag(AtomicFlagSet* outer,
Group* element,
size_t flag_bit)
: outer_(outer), group_(element), flag_bit_(flag_bit) {}
AtomicFlagSet::AtomicFlag::AtomicFlag(AtomicFlag&& other)
: outer_(other.outer_), group_(other.group_), flag_bit_(other.flag_bit_) {
other.outer_ = nullptr;
other.group_ = nullptr;
}
void AtomicFlagSet::AtomicFlag::SetActive(bool active) {
DCHECK(group_);
if (active) {
// Release semantics are required to ensure that all memory accesses made on
// this thread happen-before any others done on the thread running the
// active callbacks.
group_->flags.fetch_or(flag_bit_, std::memory_order_release);
} else {
// No operation is being performed based on the bit *not* being set (i.e.
// state of other memory is irrelevant); hence no memory order is required
// when unsetting it.
group_->flags.fetch_and(~flag_bit_, std::memory_order_relaxed);
}
}
void AtomicFlagSet::AtomicFlag::ReleaseAtomicFlag() {
if (!group_)
return;
DCHECK_CALLED_ON_VALID_THREAD(outer_->associated_thread_->thread_checker);
SetActive(false);
// If |group_| was full then add it on the partially free list.
if (group_->IsFull())
outer_->AddToPartiallyFreeList(group_);
int index = Group::IndexOfFirstFlagSet(flag_bit_);
DCHECK(!group_->flag_callbacks[index].is_null());
group_->flag_callbacks[index] = RepeatingClosure();
group_->allocated_flags &= ~flag_bit_;
// If |group_| has become empty delete it.
if (group_->IsEmpty()) {
outer_->RemoveFromPartiallyFreeList(group_);
outer_->RemoveFromAllocList(group_);
}
outer_ = nullptr;
group_ = nullptr;
}
AtomicFlagSet::AtomicFlag AtomicFlagSet::AddFlag(RepeatingClosure callback) {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
// Allocate a new Group if needed.
if (!partially_free_list_head_) {
AddToAllocList(std::make_unique<Group>());
AddToPartiallyFreeList(alloc_list_head_.get());
}
DCHECK(partially_free_list_head_);
Group* group = partially_free_list_head_;
size_t first_unoccupied_index =
static_cast<size_t>(group->FindFirstUnallocatedFlag());
DCHECK(!group->flag_callbacks[first_unoccupied_index]);
group->flag_callbacks[first_unoccupied_index] = std::move(callback);
size_t flag_bit = size_t{1} << first_unoccupied_index;
group->allocated_flags |= flag_bit;
if (group->IsFull())
RemoveFromPartiallyFreeList(group);
return AtomicFlag(this, group, flag_bit);
}
void AtomicFlagSet::RunActiveCallbacks() const {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
for (Group* iter = alloc_list_head_.get(); iter; iter = iter->next.get()) {
// Acquire semantics are required to guarantee that all memory side-effects
// made by other threads that were allowed to perform operations are
// synchronized with this thread before it returns from this method.
size_t active_flags = std::atomic_exchange_explicit(
&iter->flags, size_t{0}, std::memory_order_acquire);
// This is O(number of bits set).
while (active_flags) {
int index = Group::IndexOfFirstFlagSet(active_flags);
// Clear the flag.
active_flags ^= size_t{1} << index;
iter->flag_callbacks[index].Run();
}
}
}
AtomicFlagSet::Group::Group() = default;
AtomicFlagSet::Group::~Group() {
DCHECK_EQ(allocated_flags, 0u);
DCHECK(!partially_free_list_prev);
DCHECK(!partially_free_list_next);
}
bool AtomicFlagSet::Group::IsFull() const {
return (~allocated_flags) == 0u;
}
bool AtomicFlagSet::Group::IsEmpty() const {
return allocated_flags == 0u;
}
int AtomicFlagSet::Group::FindFirstUnallocatedFlag() const {
size_t unallocated_flags = ~allocated_flags;
DCHECK_NE(unallocated_flags, 0u);
int index = IndexOfFirstFlagSet(unallocated_flags);
DCHECK_LT(index, kNumFlags);
return index;
}
// static
int AtomicFlagSet::Group::IndexOfFirstFlagSet(size_t flag) {
DCHECK_NE(flag, 0u);
return bits::CountTrailingZeroBits(flag);
}
void AtomicFlagSet::AddToAllocList(std::unique_ptr<Group> group) {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
if (alloc_list_head_)
alloc_list_head_->prev = group.get();
group->next = std::move(alloc_list_head_);
alloc_list_head_ = std::move(group);
}
void AtomicFlagSet::RemoveFromAllocList(Group* group) {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
if (group->next)
group->next->prev = group->prev;
if (group->prev) {
group->prev->next = std::move(group->next);
} else {
alloc_list_head_ = std::move(group->next);
}
}
void AtomicFlagSet::AddToPartiallyFreeList(Group* group) {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
DCHECK_NE(partially_free_list_head_, group);
DCHECK(!group->partially_free_list_prev);
DCHECK(!group->partially_free_list_next);
if (partially_free_list_head_)
partially_free_list_head_->partially_free_list_prev = group;
group->partially_free_list_next = partially_free_list_head_;
partially_free_list_head_ = group;
}
void AtomicFlagSet::RemoveFromPartiallyFreeList(Group* group) {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
DCHECK(partially_free_list_head_);
// Check |group| is in the list.
DCHECK(partially_free_list_head_ == group || group->partially_free_list_prev);
if (group->partially_free_list_next) {
group->partially_free_list_next->partially_free_list_prev =
group->partially_free_list_prev;
}
if (group->partially_free_list_prev) {
group->partially_free_list_prev->partially_free_list_next =
group->partially_free_list_next;
} else {
partially_free_list_head_ = group->partially_free_list_next;
}
group->partially_free_list_prev = nullptr;
group->partially_free_list_next = nullptr;
}
} // namespace internal
} // namespace sequence_manager
} // namespace base

View file

@ -0,0 +1,142 @@
// Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TASK_SEQUENCE_MANAGER_ATOMIC_FLAG_SET_H_
#define BASE_TASK_SEQUENCE_MANAGER_ATOMIC_FLAG_SET_H_
#include <atomic>
#include <memory>
#include "base/base_export.h"
#include "base/callback.h"
#include "base/task/sequence_manager/associated_thread_id.h"
namespace base {
namespace sequence_manager {
namespace internal {
// This class maintains a set of AtomicFlags which can be activated or
// deactivated at any time by any thread. When a flag is created a callback is
// specified and the RunActiveCallbacks method can be invoked to fire callbacks
// for all active flags. Creating releasing or destroying an AtomicFlag must be
// done on the associated thread, as must calling RunActiveCallbacks. This
// class is thread-affine.
class BASE_EXPORT AtomicFlagSet {
protected:
struct Group;
public:
explicit AtomicFlagSet(scoped_refptr<AssociatedThreadId> associated_thread);
// AtomicFlags need to be released (or deleted) before this can be deleted.
~AtomicFlagSet();
// This class is thread-affine in addition SetActive can be called
// concurrently from any thread.
class BASE_EXPORT AtomicFlag {
public:
AtomicFlag();
// Automatically releases the AtomicFlag.
~AtomicFlag();
AtomicFlag(const AtomicFlag&) = delete;
AtomicFlag(AtomicFlag&& other);
// Can be called on any thread. Marks whether the flag is active or not,
// which controls whether RunActiveCallbacks() will fire the associated
// callback or not. In the absence of external synchronization, the value
// set by this call might not immediately be visible to a thread calling
// RunActiveCallbacks(); the only guarantee is that a value set by this will
// eventually be visible to other threads due to cache coherency. Release /
// acquire semantics are used on the underlying atomic operations so if
// RunActiveCallbacks sees the value set by a call to SetActive(), it will
// also see the memory changes that happened prior to that SetActive() call.
void SetActive(bool active);
// Releases the flag. Must be called on the associated thread. SetActive
// can't be called after this.
void ReleaseAtomicFlag();
private:
friend AtomicFlagSet;
AtomicFlag(AtomicFlagSet* outer, Group* element, size_t flag_bit);
AtomicFlagSet* outer_ = nullptr;
Group* group_ = nullptr; // Null when AtomicFlag is invalid.
size_t flag_bit_ = 0; // This is 1 << index of this flag within the group.
};
// Adds a new flag to the set. The |callback| will be fired by
// RunActiveCallbacks if the flag is active. Must be called on the associated
// thread.
AtomicFlag AddFlag(RepeatingClosure callback);
// Runs the registered callback for all flags marked as active and atomically
// resets all flags to inactive. Must be called on the associated thread.
void RunActiveCallbacks() const;
protected:
Group* GetAllocListForTesting() const { return alloc_list_head_.get(); }
Group* GetPartiallyFreeListForTesting() const {
return partially_free_list_head_;
}
// Wraps a single std::atomic<size_t> which is shared by a number of
// AtomicFlag's with one bit per flag.
struct BASE_EXPORT Group {
Group();
~Group();
static constexpr int kNumFlags = sizeof(size_t) * 8;
std::atomic<size_t> flags = {0};
size_t allocated_flags = 0;
RepeatingClosure flag_callbacks[kNumFlags];
Group* prev = nullptr;
std::unique_ptr<Group> next;
Group* partially_free_list_prev = nullptr;
Group* partially_free_list_next = nullptr;
bool IsFull() const;
bool IsEmpty() const;
// Returns the index of the first unallocated flag. Must not be called when
// all flags are set.
int FindFirstUnallocatedFlag() const;
// Computes the index of the |flag_callbacks| based on the number of leading
// zero bits in |flag|.
static int IndexOfFirstFlagSet(size_t flag);
private:
DISALLOW_COPY_AND_ASSIGN(Group);
};
private:
void AddToAllocList(std::unique_ptr<Group> element);
// This deletes |element|.
void RemoveFromAllocList(Group* element);
void AddToPartiallyFreeList(Group* element);
// This does not delete |element|.
void RemoveFromPartiallyFreeList(Group* element);
scoped_refptr<AssociatedThreadId> associated_thread_;
std::unique_ptr<Group> alloc_list_head_;
Group* partially_free_list_head_ = nullptr;
DISALLOW_COPY_AND_ASSIGN(AtomicFlagSet);
};
} // namespace internal
} // namespace sequence_manager
} // namespace base
#endif // BASE_TASK_SEQUENCE_MANAGER_ATOMIC_FLAG_SET_H_

View file

@ -0,0 +1,61 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TASK_SEQUENCE_MANAGER_ENQUEUE_ORDER_H_
#define BASE_TASK_SEQUENCE_MANAGER_ENQUEUE_ORDER_H_
#include <stdint.h>
#include <limits>
namespace base {
namespace sequence_manager {
namespace internal {
class EnqueueOrderGenerator;
}
// 64-bit number which is used to order tasks.
// SequenceManager assumes this number will never overflow.
class EnqueueOrder {
public:
EnqueueOrder() : value_(kNone) {}
~EnqueueOrder() = default;
static EnqueueOrder none() { return EnqueueOrder(kNone); }
static EnqueueOrder blocking_fence() { return EnqueueOrder(kBlockingFence); }
// Returns an EnqueueOrder that compares greater than any other EnqueueOrder.
static EnqueueOrder max() {
return EnqueueOrder(std::numeric_limits<uint64_t>::max());
}
// It's okay to use EnqueueOrder in boolean expressions keeping in mind
// that some non-zero values have a special meaning.
operator uint64_t() const { return value_; }
static EnqueueOrder FromIntForTesting(uint64_t value) {
return EnqueueOrder(value);
}
private:
// EnqueueOrderGenerator is the only class allowed to create an EnqueueOrder
// with a non-default constructor.
friend class internal::EnqueueOrderGenerator;
explicit EnqueueOrder(uint64_t value) : value_(value) {}
enum SpecialValues : uint64_t {
kNone = 0,
kBlockingFence = 1,
kFirst = 2,
};
uint64_t value_;
};
} // namespace sequence_manager
} // namespace base
#endif // BASE_TASK_SEQUENCE_MANAGER_ENQUEUE_ORDER_H_

View file

@ -0,0 +1,18 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/task/sequence_manager/enqueue_order_generator.h"
namespace base {
namespace sequence_manager {
namespace internal {
EnqueueOrderGenerator::EnqueueOrderGenerator()
: counter_(EnqueueOrder::kFirst) {}
EnqueueOrderGenerator::~EnqueueOrderGenerator() = default;
} // namespace internal
} // namespace sequence_manager
} // namespace base

View file

@ -0,0 +1,43 @@
// Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TASK_SEQUENCE_MANAGER_ENQUEUE_ORDER_GENERATOR_H_
#define BASE_TASK_SEQUENCE_MANAGER_ENQUEUE_ORDER_GENERATOR_H_
#include <stdint.h>
#include <atomic>
#include "base/base_export.h"
#include "base/macros.h"
#include "base/task/sequence_manager/enqueue_order.h"
namespace base {
namespace sequence_manager {
namespace internal {
// EnqueueOrder can't be created from a raw number in non-test code.
// EnqueueOrderGenerator is used to create it with strictly monotonic guarantee.
class BASE_EXPORT EnqueueOrderGenerator {
public:
EnqueueOrderGenerator();
~EnqueueOrderGenerator();
// Can be called from any thread.
EnqueueOrder GenerateNext() {
return EnqueueOrder(std::atomic_fetch_add_explicit(
&counter_, uint64_t(1), std::memory_order_relaxed));
}
private:
std::atomic<uint64_t> counter_;
DISALLOW_COPY_AND_ASSIGN(EnqueueOrderGenerator);
};
} // namespace internal
} // namespace sequence_manager
} // namespace base
#endif // BASE_TASK_SEQUENCE_MANAGER_ENQUEUE_ORDER_GENERATOR_H_

View file

@ -0,0 +1,380 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TASK_SEQUENCE_MANAGER_LAZILY_DEALLOCATED_DEQUE_H_
#define BASE_TASK_SEQUENCE_MANAGER_LAZILY_DEALLOCATED_DEQUE_H_
#include <algorithm>
#include <cmath>
#include <memory>
#include <vector>
#include "base/debug/alias.h"
#include "base/gtest_prod_util.h"
#include "base/logging.h"
#include "base/time/time.h"
namespace base {
namespace sequence_manager {
namespace internal {
// A LazilyDeallocatedDeque specialized for the SequenceManager's usage
// patterns. The queue generally grows while tasks are added and then removed
// until empty and the cycle repeats.
//
// The main difference between sequence_manager::LazilyDeallocatedDeque and
// others is memory management. For performance (memory allocation isn't free)
// we don't automatically reclaiming memory when the queue becomes empty.
// Instead we rely on the surrounding code periodically calling
// MaybeShrinkQueue, ideally when the queue is empty.
//
// We keep track of the maximum recent queue size and rate limit
// MaybeShrinkQueue to avoid unnecessary churn.
//
// NB this queue isn't by itself thread safe.
template <typename T, TimeTicks (*now_source)() = TimeTicks::Now>
class LazilyDeallocatedDeque {
public:
enum {
// Minimum allocation for a ring. Note a ring of size 4 will only hold up to
// 3 elements.
kMinimumRingSize = 4,
// Maximum "wasted" capacity allowed when considering if we should resize
// the backing store.
kReclaimThreshold = 16,
// Used to rate limit how frequently MaybeShrinkQueue actually shrinks the
// queue.
kMinimumShrinkIntervalInSeconds = 5
};
LazilyDeallocatedDeque() {}
~LazilyDeallocatedDeque() { clear(); }
bool empty() const { return size_ == 0; }
size_t max_size() const { return max_size_; }
size_t size() const { return size_; }
size_t capacity() const {
size_t capacity = 0;
for (const Ring* iter = head_.get(); iter; iter = iter->next_.get()) {
capacity += iter->capacity();
}
return capacity;
}
void clear() {
while (head_) {
head_ = std::move(head_->next_);
}
tail_ = nullptr;
size_ = 0;
}
// Assumed to be an uncommon operation.
void push_front(T t) {
if (!head_) {
DCHECK(!tail_);
head_ = std::make_unique<Ring>(kMinimumRingSize);
tail_ = head_.get();
}
// Grow if needed, by the minimum amount.
if (!head_->CanPush()) {
// TODO(alexclarke): Remove once we've understood the OOMs.
size_t size = size_;
base::debug::Alias(&size);
std::unique_ptr<Ring> new_ring = std::make_unique<Ring>(kMinimumRingSize);
new_ring->next_ = std::move(head_);
head_ = std::move(new_ring);
}
head_->push_front(std::move(t));
max_size_ = std::max(max_size_, ++size_);
}
// Assumed to be a common operation.
void push_back(T t) {
if (!head_) {
DCHECK(!tail_);
head_ = std::make_unique<Ring>(kMinimumRingSize);
tail_ = head_.get();
}
// Grow if needed.
if (!tail_->CanPush()) {
// TODO(alexclarke): Remove once we've understood the OOMs.
size_t size = size_;
base::debug::Alias(&size);
// Doubling the size is a common strategy, but one which can be wasteful
// so we use a (somewhat) slower growth curve.
tail_->next_ = std::make_unique<Ring>(2 + tail_->capacity() +
(tail_->capacity() / 2));
tail_ = tail_->next_.get();
}
tail_->push_back(std::move(t));
max_size_ = std::max(max_size_, ++size_);
}
T& front() {
DCHECK(head_);
return head_->front();
}
const T& front() const {
DCHECK(head_);
return head_->front();
}
T& back() {
DCHECK(tail_);
return tail_->back();
}
const T& back() const {
DCHECK(tail_);
return tail_->back();
}
void pop_front() {
DCHECK(head_);
DCHECK(!head_->empty());
DCHECK(tail_);
DCHECK_GT(size_, 0u);
head_->pop_front();
// If the ring has become empty and we have several rings then, remove the
// head one (which we expect to have lower capacity than the remaining
// ones).
if (head_->empty() && head_->next_) {
head_ = std::move(head_->next_);
}
--size_;
}
void swap(LazilyDeallocatedDeque& other) {
std::swap(head_, other.head_);
std::swap(tail_, other.tail_);
std::swap(size_, other.size_);
std::swap(max_size_, other.max_size_);
std::swap(next_resize_time_, other.next_resize_time_);
}
void MaybeShrinkQueue() {
if (!tail_)
return;
DCHECK_GE(max_size_, size_);
// Rate limit how often we shrink the queue because it's somewhat expensive.
TimeTicks current_time = now_source();
if (current_time < next_resize_time_)
return;
// Due to the way the Ring works we need 1 more slot than is used.
size_t new_capacity = max_size_ + 1;
if (new_capacity < kMinimumRingSize)
new_capacity = kMinimumRingSize;
// Reset |max_size_| so that unless usage has spiked up we will consider
// reclaiming it next time.
max_size_ = size_;
// Only realloc if the current capacity is sufficiently greater than the
// observed maximum size for the previous period.
if (new_capacity + kReclaimThreshold >= capacity())
return;
SetCapacity(new_capacity);
next_resize_time_ =
current_time + TimeDelta::FromSeconds(kMinimumShrinkIntervalInSeconds);
}
void SetCapacity(size_t new_capacity) {
std::unique_ptr<Ring> new_ring = std::make_unique<Ring>(new_capacity);
DCHECK_GE(new_capacity, size_ + 1);
// Preserve the |size_| which counts down to zero in the while loop.
size_t real_size = size_;
while (!empty()) {
DCHECK(new_ring->CanPush());
new_ring->push_back(std::move(head_->front()));
pop_front();
}
size_ = real_size;
DCHECK_EQ(head_.get(), tail_);
head_ = std::move(new_ring);
tail_ = head_.get();
}
private:
FRIEND_TEST_ALL_PREFIXES(LazilyDeallocatedDequeTest, RingPushFront);
FRIEND_TEST_ALL_PREFIXES(LazilyDeallocatedDequeTest, RingPushBack);
FRIEND_TEST_ALL_PREFIXES(LazilyDeallocatedDequeTest, RingCanPush);
FRIEND_TEST_ALL_PREFIXES(LazilyDeallocatedDequeTest, RingPushPopPushPop);
struct Ring {
explicit Ring(size_t capacity)
: capacity_(capacity),
front_index_(0),
back_index_(0),
data_(reinterpret_cast<T*>(new char[sizeof(T) * capacity])),
next_(nullptr) {
DCHECK_GE(capacity_, kMinimumRingSize);
}
~Ring() {
while (!empty()) {
pop_front();
}
delete[] reinterpret_cast<char*>(data_);
}
bool empty() const { return back_index_ == front_index_; }
size_t capacity() const { return capacity_; }
bool CanPush() const {
return front_index_ != CircularIncrement(back_index_);
}
void push_front(T&& t) {
// Mustn't appear to become empty.
DCHECK_NE(CircularDecrement(front_index_), back_index_);
new (&data_[front_index_]) T(std::move(t));
front_index_ = CircularDecrement(front_index_);
}
void push_back(T&& t) {
back_index_ = CircularIncrement(back_index_);
DCHECK(!empty()); // Mustn't appear to become empty.
new (&data_[back_index_]) T(std::move(t));
}
bool CanPop() const { return front_index_ != back_index_; }
void pop_front() {
DCHECK(!empty());
front_index_ = CircularIncrement(front_index_);
data_[front_index_].~T();
}
T& front() {
DCHECK(!empty());
return data_[CircularIncrement(front_index_)];
}
const T& front() const {
DCHECK(!empty());
return data_[CircularIncrement(front_index_)];
}
T& back() {
DCHECK(!empty());
return data_[back_index_];
}
const T& back() const {
DCHECK(!empty());
return data_[back_index_];
}
size_t CircularDecrement(size_t index) const {
if (index == 0)
return capacity_ - 1;
return index - 1;
}
size_t CircularIncrement(size_t index) const {
DCHECK_LT(index, capacity_);
++index;
if (index == capacity_)
return 0;
return index;
}
size_t capacity_;
size_t front_index_;
size_t back_index_;
T* data_;
std::unique_ptr<Ring> next_;
DISALLOW_COPY_AND_ASSIGN(Ring);
};
public:
class Iterator {
public:
using value_type = T;
using pointer = const T*;
using reference = const T&;
const T& operator->() const { return ring_->data_[index_]; }
const T& operator*() const { return ring_->data_[index_]; }
Iterator& operator++() {
if (index_ == ring_->back_index_) {
ring_ = ring_->next_.get();
index_ = ring_ ? ring_->CircularIncrement(ring_->front_index_) : 0;
} else {
index_ = ring_->CircularIncrement(index_);
}
return *this;
}
operator bool() const { return !!ring_; }
private:
explicit Iterator(const Ring* ring) {
if (!ring || ring->empty()) {
ring_ = nullptr;
index_ = 0;
return;
}
ring_ = ring;
index_ = ring_->CircularIncrement(ring->front_index_);
}
const Ring* ring_;
size_t index_;
friend class LazilyDeallocatedDeque;
};
Iterator begin() const { return Iterator(head_.get()); }
Iterator end() const { return Iterator(nullptr); }
private:
// We maintain a list of Ring buffers, to enable us to grow without copying,
// but most of the time we aim to have only one active Ring.
std::unique_ptr<Ring> head_;
Ring* tail_ = nullptr;
size_t size_ = 0;
size_t max_size_ = 0;
TimeTicks next_resize_time_;
DISALLOW_COPY_AND_ASSIGN(LazilyDeallocatedDeque);
};
} // namespace internal
} // namespace sequence_manager
} // namespace base
#endif // BASE_TASK_SEQUENCE_MANAGER_LAZILY_DEALLOCATED_DEQUE_H_

View file

@ -0,0 +1,36 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/task/sequence_manager/lazy_now.h"
#include "base/time/tick_clock.h"
namespace base {
namespace sequence_manager {
LazyNow::LazyNow(TimeTicks now) : tick_clock_(nullptr), now_(now) {}
LazyNow::LazyNow(const TickClock* tick_clock)
: tick_clock_(tick_clock), now_() {
DCHECK(tick_clock);
}
LazyNow::LazyNow(LazyNow&& move_from) noexcept
: tick_clock_(move_from.tick_clock_), now_(move_from.now_) {
move_from.tick_clock_ = nullptr;
move_from.now_ = nullopt;
}
TimeTicks LazyNow::Now() {
// It looks tempting to avoid using Optional and to rely on is_null() instead,
// but in some test environments clock intentionally starts from zero.
if (!now_) {
DCHECK(tick_clock_); // It can fire only on use after std::move.
now_ = tick_clock_->NowTicks();
}
return *now_;
}
} // namespace sequence_manager
} // namespace base

View file

@ -0,0 +1,43 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TASK_SEQUENCE_MANAGER_LAZY_NOW_H_
#define BASE_TASK_SEQUENCE_MANAGER_LAZY_NOW_H_
#include "base/base_export.h"
#include "base/optional.h"
#include "base/time/time.h"
namespace base {
class TickClock;
namespace sequence_manager {
// Now() is somewhat expensive so it makes sense not to call Now() unless we
// really need to and to avoid subsequent calls if already called once.
// LazyNow objects are expected to be short-living to represent accurate time.
class BASE_EXPORT LazyNow {
public:
explicit LazyNow(TimeTicks now);
explicit LazyNow(const TickClock* tick_clock);
LazyNow(LazyNow&& move_from) noexcept;
// Result will not be updated on any subsesequent calls.
TimeTicks Now();
bool has_value() const { return !!now_; }
private:
const TickClock* tick_clock_; // Not owned.
Optional<TimeTicks> now_;
DISALLOW_COPY_AND_ASSIGN(LazyNow);
};
} // namespace sequence_manager
} // namespace base
#endif // BASE_TASK_SEQUENCE_MANAGER_LAZY_NOW_H_

View file

@ -0,0 +1,58 @@
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/task/sequence_manager/real_time_domain.h"
#include "base/task/sequence_manager/sequence_manager_impl.h"
namespace base {
namespace sequence_manager {
namespace internal {
RealTimeDomain::RealTimeDomain() {}
RealTimeDomain::~RealTimeDomain() = default;
void RealTimeDomain::OnRegisterWithSequenceManager(
SequenceManagerImpl* sequence_manager) {
TimeDomain::OnRegisterWithSequenceManager(sequence_manager);
tick_clock_ = sequence_manager->GetTickClock();
}
LazyNow RealTimeDomain::CreateLazyNow() const {
return LazyNow(tick_clock_);
}
TimeTicks RealTimeDomain::Now() const {
return tick_clock_->NowTicks();
}
Optional<TimeDelta> RealTimeDomain::DelayTillNextTask(LazyNow* lazy_now) {
Optional<TimeTicks> next_run_time = NextScheduledRunTime();
if (!next_run_time)
return nullopt;
TimeTicks now = lazy_now->Now();
if (now >= next_run_time) {
// Overdue work needs to be run immediately.
return TimeDelta();
}
TimeDelta delay = *next_run_time - now;
TRACE_EVENT1("sequence_manager", "RealTimeDomain::DelayTillNextTask",
"delay_ms", delay.InMillisecondsF());
return delay;
}
bool RealTimeDomain::MaybeFastForwardToNextTask(bool quit_when_idle_requested) {
return false;
}
const char* RealTimeDomain::GetName() const {
return "RealTimeDomain";
}
} // namespace internal
} // namespace sequence_manager
} // namespace base

View file

@ -0,0 +1,42 @@
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TASK_SEQUENCE_MANAGER_REAL_TIME_DOMAIN_H_
#define BASE_TASK_SEQUENCE_MANAGER_REAL_TIME_DOMAIN_H_
#include "base/base_export.h"
#include "base/macros.h"
#include "base/task/sequence_manager/time_domain.h"
namespace base {
namespace sequence_manager {
namespace internal {
class BASE_EXPORT RealTimeDomain : public TimeDomain {
public:
RealTimeDomain();
~RealTimeDomain() override;
// TimeDomain implementation:
LazyNow CreateLazyNow() const override;
TimeTicks Now() const override;
Optional<TimeDelta> DelayTillNextTask(LazyNow* lazy_now) override;
bool MaybeFastForwardToNextTask(bool quit_when_idle_requested) override;
protected:
void OnRegisterWithSequenceManager(
SequenceManagerImpl* sequence_manager) override;
const char* GetName() const override;
private:
const TickClock* tick_clock_ = nullptr;
DISALLOW_COPY_AND_ASSIGN(RealTimeDomain);
};
} // namespace internal
} // namespace sequence_manager
} // namespace base
#endif // BASE_TASK_SEQUENCE_MANAGER_REAL_TIME_DOMAIN_H_

View file

@ -0,0 +1,115 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/task/sequence_manager/sequence_manager.h"
namespace base {
namespace sequence_manager {
NativeWorkHandle::~NativeWorkHandle() = default;
SequenceManager::MetricRecordingSettings::MetricRecordingSettings(
double task_thread_time_sampling_rate)
: task_sampling_rate_for_recording_cpu_time(
base::ThreadTicks::IsSupported() ? task_thread_time_sampling_rate
: 0) {}
SequenceManager::Settings::Settings() = default;
SequenceManager::Settings::Settings(Settings&& move_from) noexcept = default;
SequenceManager::Settings::Builder::Builder() = default;
SequenceManager::Settings::Builder::~Builder() = default;
SequenceManager::Settings::Builder&
SequenceManager::Settings::Builder::SetMessagePumpType(
MessagePumpType message_loop_type_val) {
settings_.message_loop_type = message_loop_type_val;
return *this;
}
SequenceManager::Settings::Builder&
SequenceManager::Settings::Builder::SetRandomisedSamplingEnabled(
bool randomised_sampling_enabled_val) {
settings_.randomised_sampling_enabled = randomised_sampling_enabled_val;
return *this;
}
SequenceManager::Settings::Builder&
SequenceManager::Settings::Builder::SetTickClock(const TickClock* clock_val) {
settings_.clock = clock_val;
return *this;
}
SequenceManager::Settings::Builder&
SequenceManager::Settings::Builder::SetAddQueueTimeToTasks(
bool add_queue_time_to_tasks_val) {
settings_.add_queue_time_to_tasks = add_queue_time_to_tasks_val;
return *this;
}
#if DCHECK_IS_ON()
SequenceManager::Settings::Builder&
SequenceManager::Settings::Builder::SetTaskLogging(
TaskLogging task_execution_logging_val) {
settings_.task_execution_logging = task_execution_logging_val;
return *this;
}
SequenceManager::Settings::Builder&
SequenceManager::Settings::Builder::SetLogPostTask(bool log_post_task_val) {
settings_.log_post_task = log_post_task_val;
return *this;
}
SequenceManager::Settings::Builder&
SequenceManager::Settings::Builder::SetLogTaskDelayExpiry(
bool log_task_delay_expiry_val) {
settings_.log_task_delay_expiry = log_task_delay_expiry_val;
return *this;
}
SequenceManager::Settings::Builder&
SequenceManager::Settings::Builder::SetLogRunloopQuitAndQuitWhenIdle(
bool log_runloop_quit_and_quit_when_idle_val) {
settings_.log_runloop_quit_and_quit_when_idle =
log_runloop_quit_and_quit_when_idle_val;
return *this;
}
SequenceManager::Settings::Builder&
SequenceManager::Settings::Builder::SetPerPriorityCrossThreadTaskDelay(
std::array<TimeDelta, TaskQueue::kQueuePriorityCount>
per_priority_cross_thread_task_delay_val) {
settings_.per_priority_cross_thread_task_delay =
per_priority_cross_thread_task_delay_val;
return *this;
}
SequenceManager::Settings::Builder&
SequenceManager::Settings::Builder::SetPerPrioritySameThreadTaskDelay(
std::array<TimeDelta, TaskQueue::kQueuePriorityCount>
per_priority_same_thread_task_delay_val) {
settings_.per_priority_same_thread_task_delay =
per_priority_same_thread_task_delay_val;
return *this;
}
SequenceManager::Settings::Builder&
SequenceManager::Settings::Builder::SetRandomTaskSelectionSeed(
int random_task_selection_seed_val) {
settings_.random_task_selection_seed = random_task_selection_seed_val;
return *this;
}
#endif // DCHECK_IS_ON()
SequenceManager::Settings SequenceManager::Settings::Builder::Build() {
return std::move(settings_);
}
} // namespace sequence_manager
} // namespace base

View file

@ -0,0 +1,339 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TASK_SEQUENCE_MANAGER_SEQUENCE_MANAGER_H_
#define BASE_TASK_SEQUENCE_MANAGER_SEQUENCE_MANAGER_H_
#include <memory>
#include <utility>
#include "base/macros.h"
#include "base/message_loop/message_pump_type.h"
#include "base/message_loop/timer_slack.h"
#include "base/sequenced_task_runner.h"
#include "base/single_thread_task_runner.h"
#include "base/task/sequence_manager/task_queue_impl.h"
#include "base/task/sequence_manager/task_time_observer.h"
#include "base/time/default_tick_clock.h"
namespace base {
class MessagePump;
class TaskObserver;
namespace sequence_manager {
class TimeDomain;
// Represent outstanding work the sequence underlying a SequenceManager (e.g.,
// a native system task for drawing the UI). As long as this handle is alive,
// the work is considered to be pending.
class NativeWorkHandle {
public:
virtual ~NativeWorkHandle();
NativeWorkHandle(const NativeWorkHandle&) = delete;
protected:
NativeWorkHandle() = default;
};
// SequenceManager manages TaskQueues which have different properties
// (e.g. priority, common task type) multiplexing all posted tasks into
// a single backing sequence (currently bound to a single thread, which is
// refererred as *main thread* in the comments below). SequenceManager
// implementation can be used in a various ways to apply scheduling logic.
class BASE_EXPORT SequenceManager {
public:
class Observer {
public:
virtual ~Observer() = default;
// Called back on the main thread.
virtual void OnBeginNestedRunLoop() = 0;
virtual void OnExitNestedRunLoop() = 0;
};
struct MetricRecordingSettings {
// This parameter will be updated for consistency on creation (setting
// value to 0 when ThreadTicks are not supported).
MetricRecordingSettings(double task_sampling_rate_for_recording_cpu_time);
// The proportion of the tasks for which the cpu time will be
// sampled or 0 if this is not enabled.
// Since randomised sampling requires the use of Rand(), it is enabled only
// on platforms which support it.
// If it is 1 then cpu time is measured for each task, so the integral
// metrics (as opposed to per-task metrics) can be recorded.
double task_sampling_rate_for_recording_cpu_time = 0;
bool records_cpu_time_for_some_tasks() const {
return task_sampling_rate_for_recording_cpu_time > 0.0;
}
bool records_cpu_time_for_all_tasks() const {
return task_sampling_rate_for_recording_cpu_time == 1.0;
}
};
// Settings defining the desired SequenceManager behaviour: the type of the
// MessageLoop and whether randomised sampling should be enabled.
struct BASE_EXPORT Settings {
class Builder;
Settings();
// In the future MessagePump (which is move-only) will also be a setting,
// so we are making Settings move-only in preparation.
Settings(Settings&& move_from) noexcept;
MessagePumpType message_loop_type = MessagePumpType::DEFAULT;
bool randomised_sampling_enabled = false;
const TickClock* clock = DefaultTickClock::GetInstance();
// If true, add the timestamp the task got queued to the task.
bool add_queue_time_to_tasks = false;
#if DCHECK_IS_ON()
// TODO(alexclarke): Consider adding command line flags to control these.
enum class TaskLogging {
kNone,
kEnabled,
kEnabledWithBacktrace,
// Logs high priority tasks and the lower priority tasks they skipped
// past. Useful for debugging test failures caused by scheduler policy
// changes.
kReorderedOnly,
};
TaskLogging task_execution_logging = TaskLogging::kNone;
// If true PostTask will emit a debug log.
bool log_post_task = false;
// If true debug logs will be emitted when a delayed task becomes eligible
// to run.
bool log_task_delay_expiry = false;
// If true usages of the RunLoop API will be logged.
bool log_runloop_quit_and_quit_when_idle = false;
// Scheduler policy induced raciness is an area of concern. This lets us
// apply an extra delay per priority for cross thread posting.
std::array<TimeDelta, TaskQueue::kQueuePriorityCount>
per_priority_cross_thread_task_delay;
// Like the above but for same thread posting.
std::array<TimeDelta, TaskQueue::kQueuePriorityCount>
per_priority_same_thread_task_delay;
// If not zero this seeds a PRNG used by the task selection logic to choose
// a random TaskQueue for a given priority rather than the TaskQueue with
// the oldest EnqueueOrder.
int random_task_selection_seed = 0;
#endif // DCHECK_IS_ON()
DISALLOW_COPY_AND_ASSIGN(Settings);
};
virtual ~SequenceManager() = default;
// Binds the SequenceManager and its TaskQueues to the current thread. Should
// only be called once. Note that CreateSequenceManagerOnCurrentThread()
// performs this initialization automatically.
virtual void BindToCurrentThread() = 0;
// Returns the task runner the current task was posted on. Returns null if no
// task is currently running. Must be called on the bound thread.
virtual scoped_refptr<SequencedTaskRunner> GetTaskRunnerForCurrentTask() = 0;
// Finishes the initialization for a SequenceManager created via
// CreateUnboundSequenceManager(). Must not be called in any other
// circumstances. The ownership of the pump is transferred to SequenceManager.
virtual void BindToMessagePump(std::unique_ptr<MessagePump> message_pump) = 0;
// Must be called on the main thread.
// Can be called only once, before creating TaskQueues.
// Observer must outlive the SequenceManager.
virtual void SetObserver(Observer* observer) = 0;
// Must be called on the main thread.
virtual void AddTaskTimeObserver(TaskTimeObserver* task_time_observer) = 0;
virtual void RemoveTaskTimeObserver(TaskTimeObserver* task_time_observer) = 0;
// Registers a TimeDomain with SequenceManager.
// TaskQueues must only be created with a registered TimeDomain.
// Conversely, any TimeDomain must remain registered until no
// TaskQueues (using that TimeDomain) remain.
virtual void RegisterTimeDomain(TimeDomain* time_domain) = 0;
virtual void UnregisterTimeDomain(TimeDomain* time_domain) = 0;
virtual TimeDomain* GetRealTimeDomain() const = 0;
virtual const TickClock* GetTickClock() const = 0;
virtual TimeTicks NowTicks() const = 0;
// Sets the SingleThreadTaskRunner that will be returned by
// ThreadTaskRunnerHandle::Get on the main thread.
virtual void SetDefaultTaskRunner(
scoped_refptr<SingleThreadTaskRunner> task_runner) = 0;
// Removes all canceled delayed tasks, and considers resizing to fit all
// internal queues.
virtual void ReclaimMemory() = 0;
// Returns true if no tasks were executed in TaskQueues that monitor
// quiescence since the last call to this method.
virtual bool GetAndClearSystemIsQuiescentBit() = 0;
// Set the number of tasks executed in a single SequenceManager invocation.
// Increasing this number reduces the overhead of the tasks dispatching
// logic at the cost of a potentially worse latency. 1 by default.
virtual void SetWorkBatchSize(int work_batch_size) = 0;
// Requests desired timer precision from the OS.
// Has no effect on some platforms.
virtual void SetTimerSlack(TimerSlack timer_slack) = 0;
// Enables crash keys that can be set in the scope of a task which help
// to identify the culprit if upcoming work results in a crash.
// Key names must be thread-specific to avoid races and corrupted crash dumps.
virtual void EnableCrashKeys(const char* async_stack_crash_key) = 0;
// Returns the metric recording configuration for the current SequenceManager.
virtual const MetricRecordingSettings& GetMetricRecordingSettings() const = 0;
// Creates a task queue with the given type, |spec| and args.
// Must be called on the main thread.
// TODO(scheduler-dev): SequenceManager should not create TaskQueues.
template <typename TaskQueueType, typename... Args>
scoped_refptr<TaskQueueType> CreateTaskQueueWithType(
const TaskQueue::Spec& spec,
Args&&... args) {
return WrapRefCounted(new TaskQueueType(CreateTaskQueueImpl(spec), spec,
std::forward<Args>(args)...));
}
// Creates a vanilla TaskQueue rather than a user type derived from it. This
// should be used if you don't wish to sub class TaskQueue.
// Must be called on the main thread.
virtual scoped_refptr<TaskQueue> CreateTaskQueue(
const TaskQueue::Spec& spec) = 0;
// Returns true iff this SequenceManager has no immediate work to do. I.e.
// there are no pending non-delayed tasks or delayed tasks that are due to
// run. This method ignores any pending delayed tasks that might have become
// eligible to run since the last task was executed. This is important because
// if it did tests would become flaky depending on the exact timing of this
// call. This is moderately expensive.
virtual bool IsIdleForTesting() = 0;
// The total number of posted tasks that haven't executed yet.
virtual size_t GetPendingTaskCountForTesting() const = 0;
// Returns a JSON string which describes all pending tasks.
virtual std::string DescribeAllPendingTasks() const = 0;
// Indicates that the underlying sequence (e.g., the message pump) has pending
// work at priority |priority|. If the priority of the work in this
// SequenceManager is lower, it will yield to let the native work run. The
// native work is assumed to remain pending while the returned handle is
// valid.
//
// Must be called on the main thread, and the returned handle must also be
// deleted on the main thread.
virtual std::unique_ptr<NativeWorkHandle> OnNativeWorkPending(
TaskQueue::QueuePriority priority) = 0;
// Adds an observer which reports task execution. Can only be called on the
// same thread that |this| is running on.
virtual void AddTaskObserver(TaskObserver* task_observer) = 0;
// Removes an observer which reports task execution. Can only be called on the
// same thread that |this| is running on.
virtual void RemoveTaskObserver(TaskObserver* task_observer) = 0;
protected:
virtual std::unique_ptr<internal::TaskQueueImpl> CreateTaskQueueImpl(
const TaskQueue::Spec& spec) = 0;
};
class BASE_EXPORT SequenceManager::Settings::Builder {
public:
Builder();
~Builder();
// Sets the MessagePumpType which is used to create a MessagePump.
Builder& SetMessagePumpType(MessagePumpType message_loop_type);
Builder& SetRandomisedSamplingEnabled(bool randomised_sampling_enabled);
// Sets the TickClock the SequenceManager uses to obtain Now.
Builder& SetTickClock(const TickClock* clock);
// Whether or not queueing timestamp will be added to tasks.
Builder& SetAddQueueTimeToTasks(bool add_queue_time_to_tasks);
#if DCHECK_IS_ON()
// Controls task execution logging.
Builder& SetTaskLogging(TaskLogging task_execution_logging);
// Whether or not PostTask will emit a debug log.
Builder& SetLogPostTask(bool log_post_task);
// Whether or not debug logs will be emitted when a delayed task becomes
// eligible to run.
Builder& SetLogTaskDelayExpiry(bool log_task_delay_expiry);
// Whether or not usages of the RunLoop API will be logged.
Builder& SetLogRunloopQuitAndQuitWhenIdle(
bool log_runloop_quit_and_quit_when_idle);
// Scheduler policy induced raciness is an area of concern. This lets us
// apply an extra delay per priority for cross thread posting.
Builder& SetPerPriorityCrossThreadTaskDelay(
std::array<TimeDelta, TaskQueue::kQueuePriorityCount>
per_priority_cross_thread_task_delay);
// Scheduler policy induced raciness is an area of concern. This lets us
// apply an extra delay per priority for same thread posting.
Builder& SetPerPrioritySameThreadTaskDelay(
std::array<TimeDelta, TaskQueue::kQueuePriorityCount>
per_priority_same_thread_task_delay);
// If not zero this seeds a PRNG used by the task selection logic to choose a
// random TaskQueue for a given priority rather than the TaskQueue with the
// oldest EnqueueOrder.
Builder& SetRandomTaskSelectionSeed(int random_task_selection_seed);
#endif // DCHECK_IS_ON()
Settings Build();
private:
Settings settings_;
};
// Create SequenceManager using MessageLoop on the current thread.
// Implementation is located in sequence_manager_impl.cc.
// TODO(scheduler-dev): Remove after every thread has a SequenceManager.
BASE_EXPORT std::unique_ptr<SequenceManager>
CreateSequenceManagerOnCurrentThread(SequenceManager::Settings settings);
// Create a SequenceManager using the given MessagePump on the current thread.
// MessagePump instances can be created with
// MessagePump::CreateMessagePumpForType().
BASE_EXPORT std::unique_ptr<SequenceManager>
CreateSequenceManagerOnCurrentThreadWithPump(
std::unique_ptr<MessagePump> message_pump,
SequenceManager::Settings settings = SequenceManager::Settings());
// Create an unbound SequenceManager (typically for a future thread or because
// additional setup is required before binding). The SequenceManager can be
// initialized on the current thread and then needs to be bound and initialized
// on the target thread by calling one of the Bind*() methods.
BASE_EXPORT std::unique_ptr<SequenceManager> CreateUnboundSequenceManager(
SequenceManager::Settings settings = SequenceManager::Settings());
} // namespace sequence_manager
} // namespace base
#endif // BASE_TASK_SEQUENCE_MANAGER_SEQUENCE_MANAGER_H_

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,437 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TASK_SEQUENCE_MANAGER_SEQUENCE_MANAGER_IMPL_H_
#define BASE_TASK_SEQUENCE_MANAGER_SEQUENCE_MANAGER_IMPL_H_
#include <list>
#include <map>
#include <memory>
#include <random>
#include <set>
#include <unordered_map>
#include <utility>
#include <vector>
#include "base/atomic_sequence_num.h"
#include "base/cancelable_callback.h"
#include "base/containers/circular_deque.h"
#include "base/debug/crash_logging.h"
#include "base/macros.h"
#include "base/memory/scoped_refptr.h"
#include "base/memory/weak_ptr.h"
#include "base/message_loop/message_loop_current.h"
#include "base/message_loop/message_pump_type.h"
#include "base/pending_task.h"
#include "base/run_loop.h"
#include "base/sequenced_task_runner.h"
#include "base/single_thread_task_runner.h"
#include "base/synchronization/lock.h"
#include "base/task/common/task_annotator.h"
#include "base/task/sequence_manager/associated_thread_id.h"
#include "base/task/sequence_manager/enqueue_order.h"
#include "base/task/sequence_manager/enqueue_order_generator.h"
#include "base/task/sequence_manager/sequence_manager.h"
#include "base/task/sequence_manager/task_queue_impl.h"
#include "base/task/sequence_manager/task_queue_selector.h"
#include "base/task/sequence_manager/thread_controller.h"
#include "base/threading/thread_checker.h"
#include "base/time/default_tick_clock.h"
#include "build/build_config.h"
namespace base {
namespace trace_event {
class ConvertableToTraceFormat;
} // namespace trace_event
namespace sequence_manager {
class SequenceManagerForTest;
class TaskQueue;
class TaskTimeObserver;
class TimeDomain;
namespace internal {
class RealTimeDomain;
class TaskQueueImpl;
class ThreadControllerImpl;
// The task queue manager provides N task queues and a selector interface for
// choosing which task queue to service next. Each task queue consists of two
// sub queues:
//
// 1. Incoming task queue. Tasks that are posted get immediately appended here.
// When a task is appended into an empty incoming queue, the task manager
// work function (DoWork()) is scheduled to run on the main task runner.
//
// 2. Work queue. If a work queue is empty when DoWork() is entered, tasks from
// the incoming task queue (if any) are moved here. The work queues are
// registered with the selector as input to the scheduling decision.
//
class BASE_EXPORT SequenceManagerImpl
: public SequenceManager,
public internal::SequencedTaskSource,
public internal::TaskQueueSelector::Observer,
public RunLoop::NestingObserver {
public:
using Observer = SequenceManager::Observer;
~SequenceManagerImpl() override;
// Assume direct control over current thread and create a SequenceManager.
// This function should be called only once per thread.
// This function assumes that a MessageLoop is initialized for
// the current thread.
static std::unique_ptr<SequenceManagerImpl> CreateOnCurrentThread(
SequenceManager::Settings settings = SequenceManager::Settings());
// Create an unbound SequenceManager (typically for a future thread). The
// SequenceManager can be initialized on the current thread and then needs to
// be bound and initialized on the target thread by calling one of the Bind*()
// methods.
static std::unique_ptr<SequenceManagerImpl> CreateUnbound(
SequenceManager::Settings settings);
// SequenceManager implementation:
void BindToCurrentThread() override;
scoped_refptr<SequencedTaskRunner> GetTaskRunnerForCurrentTask() override;
void BindToMessagePump(std::unique_ptr<MessagePump> message_pump) override;
void SetObserver(Observer* observer) override;
void AddTaskTimeObserver(TaskTimeObserver* task_time_observer) override;
void RemoveTaskTimeObserver(TaskTimeObserver* task_time_observer) override;
void RegisterTimeDomain(TimeDomain* time_domain) override;
void UnregisterTimeDomain(TimeDomain* time_domain) override;
TimeDomain* GetRealTimeDomain() const override;
const TickClock* GetTickClock() const override;
TimeTicks NowTicks() const override;
void SetDefaultTaskRunner(
scoped_refptr<SingleThreadTaskRunner> task_runner) override;
void ReclaimMemory() override;
bool GetAndClearSystemIsQuiescentBit() override;
void SetWorkBatchSize(int work_batch_size) override;
void SetTimerSlack(TimerSlack timer_slack) override;
void EnableCrashKeys(const char* async_stack_crash_key) override;
const MetricRecordingSettings& GetMetricRecordingSettings() const override;
size_t GetPendingTaskCountForTesting() const override;
scoped_refptr<TaskQueue> CreateTaskQueue(
const TaskQueue::Spec& spec) override;
std::string DescribeAllPendingTasks() const override;
std::unique_ptr<NativeWorkHandle> OnNativeWorkPending(
TaskQueue::QueuePriority priority) override;
void AddTaskObserver(TaskObserver* task_observer) override;
void RemoveTaskObserver(TaskObserver* task_observer) override;
// SequencedTaskSource implementation:
Task* SelectNextTask() override;
void DidRunTask() override;
TimeDelta DelayTillNextTask(LazyNow* lazy_now) const override;
bool HasPendingHighResolutionTasks() override;
bool OnSystemIdle() override;
void AddDestructionObserver(
MessageLoopCurrent::DestructionObserver* destruction_observer);
void RemoveDestructionObserver(
MessageLoopCurrent::DestructionObserver* destruction_observer);
// TODO(alexclarke): Remove this as part of https://crbug.com/825327.
void SetTaskRunner(scoped_refptr<SingleThreadTaskRunner> task_runner);
// TODO(alexclarke): Remove this as part of https://crbug.com/825327.
scoped_refptr<SingleThreadTaskRunner> GetTaskRunner();
bool IsBoundToCurrentThread() const;
MessagePump* GetMessagePump() const;
bool IsType(MessagePumpType type) const;
void SetAddQueueTimeToTasks(bool enable);
void SetTaskExecutionAllowed(bool allowed);
bool IsTaskExecutionAllowed() const;
#if defined(OS_IOS)
void AttachToMessagePump();
#endif
bool IsIdleForTesting() override;
void BindToCurrentThread(std::unique_ptr<MessagePump> pump);
void DeletePendingTasks();
bool HasTasks();
MessagePumpType GetType() const;
// Requests that a task to process work is scheduled.
void ScheduleWork();
// Requests that a delayed task to process work is posted on the main task
// runner. These delayed tasks are de-duplicated. Must be called on the thread
// this class was created on.
// Schedules next wake-up at the given time, cancels any previous requests.
// Use TimeTicks::Max() to cancel a wake-up.
// Must be called from a TimeDomain only.
void SetNextDelayedDoWork(LazyNow* lazy_now, TimeTicks run_time);
// Returns the currently executing TaskQueue if any. Must be called on the
// thread this class was created on.
internal::TaskQueueImpl* currently_executing_task_queue() const;
// Unregisters a TaskQueue previously created by |NewTaskQueue()|.
// No tasks will run on this queue after this call.
void UnregisterTaskQueueImpl(
std::unique_ptr<internal::TaskQueueImpl> task_queue);
// Schedule a call to UnregisterTaskQueueImpl as soon as it's safe to do so.
void ShutdownTaskQueueGracefully(
std::unique_ptr<internal::TaskQueueImpl> task_queue);
const scoped_refptr<AssociatedThreadId>& associated_thread() const {
return associated_thread_;
}
const Settings& settings() const { return settings_; }
WeakPtr<SequenceManagerImpl> GetWeakPtr();
// How frequently to perform housekeeping tasks (sweeping canceled tasks etc).
static constexpr TimeDelta kReclaimMemoryInterval =
TimeDelta::FromSeconds(30);
protected:
static std::unique_ptr<ThreadControllerImpl>
CreateThreadControllerImplForCurrentThread(const TickClock* clock);
// Create a task queue manager where |controller| controls the thread
// on which the tasks are eventually run.
SequenceManagerImpl(std::unique_ptr<internal::ThreadController> controller,
SequenceManager::Settings settings = Settings());
friend class internal::TaskQueueImpl;
friend class ::base::sequence_manager::SequenceManagerForTest;
private:
class NativeWorkHandleImpl;
// Returns the SequenceManager running the
// current thread. It must only be used on the thread it was obtained.
// Only to be used by MessageLoopCurrent for the moment
static SequenceManagerImpl* GetCurrent();
friend class ::base::MessageLoopCurrent;
enum class ProcessTaskResult {
kDeferred,
kExecuted,
kSequenceManagerDeleted,
};
// SequenceManager maintains a queue of non-nestable tasks since they're
// uncommon and allocating an extra deque per TaskQueue will waste the memory.
using NonNestableTaskDeque =
circular_deque<internal::TaskQueueImpl::DeferredNonNestableTask>;
// We have to track rentrancy because we support nested runloops but the
// selector interface is unaware of those. This struct keeps track off all
// task related state needed to make pairs of SelectNextTask() / DidRunTask()
// work.
struct ExecutingTask {
ExecutingTask(Task&& task,
internal::TaskQueueImpl* task_queue,
TaskQueue::TaskTiming task_timing)
: pending_task(std::move(task)),
task_queue(task_queue),
task_queue_name(task_queue->GetName()),
task_timing(task_timing),
priority(task_queue->GetQueuePriority()),
task_type(pending_task.task_type) {}
Task pending_task;
internal::TaskQueueImpl* task_queue = nullptr;
// Save task_queue_name as the task queue can be deleted within the task.
const char* task_queue_name;
TaskQueue::TaskTiming task_timing;
// Save priority as it might change after running a task.
TaskQueue::QueuePriority priority;
// Save task metadata to use in after running a task as |pending_task|
// won't be available then.
int task_type;
};
struct MainThreadOnly {
explicit MainThreadOnly(
const scoped_refptr<AssociatedThreadId>& associated_thread,
const SequenceManager::Settings& settings);
~MainThreadOnly();
int nesting_depth = 0;
NonNestableTaskDeque non_nestable_task_queue;
// TODO(altimin): Switch to instruction pointer crash key when it's
// available.
debug::CrashKeyString* file_name_crash_key = nullptr;
debug::CrashKeyString* function_name_crash_key = nullptr;
debug::CrashKeyString* async_stack_crash_key = nullptr;
std::array<char, static_cast<size_t>(debug::CrashKeySize::Size64)>
async_stack_buffer = {};
std::mt19937_64 random_generator;
std::uniform_real_distribution<double> uniform_distribution;
internal::TaskQueueSelector selector;
ObserverList<TaskObserver>::Unchecked task_observers;
ObserverList<TaskTimeObserver>::Unchecked task_time_observers;
std::set<TimeDomain*> time_domains;
std::unique_ptr<internal::RealTimeDomain> real_time_domain;
// If true MaybeReclaimMemory will attempt to reclaim memory.
bool memory_reclaim_scheduled = false;
// Used to ensure we don't perform expensive housekeeping too frequently.
TimeTicks next_time_to_reclaim_memory;
// List of task queues managed by this SequenceManager.
// - active_queues contains queues that are still running tasks.
// Most often they are owned by relevant TaskQueues, but
// queues_to_gracefully_shutdown_ are included here too.
// - queues_to_gracefully_shutdown contains queues which should be deleted
// when they become empty.
// - queues_to_delete contains soon-to-be-deleted queues, because some
// internal scheduling code does not expect queues to be pulled
// from underneath.
std::set<internal::TaskQueueImpl*> active_queues;
std::map<internal::TaskQueueImpl*, std::unique_ptr<internal::TaskQueueImpl>>
queues_to_gracefully_shutdown;
std::map<internal::TaskQueueImpl*, std::unique_ptr<internal::TaskQueueImpl>>
queues_to_delete;
bool task_was_run_on_quiescence_monitored_queue = false;
bool nesting_observer_registered_ = false;
// Due to nested runloops more than one task can be executing concurrently.
std::vector<ExecutingTask> task_execution_stack;
Observer* observer = nullptr; // NOT OWNED
ObserverList<MessageLoopCurrent::DestructionObserver>::Unchecked
destruction_observers;
// By default native work is not prioritized at all.
std::multiset<TaskQueue::QueuePriority> pending_native_work{
TaskQueue::kBestEffortPriority};
};
void CompleteInitializationOnBoundThread();
// TaskQueueSelector::Observer:
void OnTaskQueueEnabled(internal::TaskQueueImpl* queue) override;
// RunLoop::NestingObserver:
void OnBeginNestedRunLoop() override;
void OnExitNestedRunLoop() override;
// Called by the task queue to inform this SequenceManager of a task that's
// about to be queued. This SequenceManager may use this opportunity to add
// metadata to |pending_task| before it is moved into the queue.
void WillQueueTask(Task* pending_task, const char* task_queue_name);
// Delayed Tasks with run_times <= Now() are enqueued onto the work queue and
// reloads any empty work queues.
void MoveReadyDelayedTasksToWorkQueues(LazyNow* lazy_now);
void NotifyWillProcessTask(ExecutingTask* task, LazyNow* time_before_task);
void NotifyDidProcessTask(ExecutingTask* task, LazyNow* time_after_task);
EnqueueOrder GetNextSequenceNumber();
bool GetAddQueueTimeToTasks();
std::unique_ptr<trace_event::ConvertableToTraceFormat>
AsValueWithSelectorResult(internal::WorkQueue* selected_work_queue,
bool force_verbose) const;
void AsValueWithSelectorResultInto(trace_event::TracedValue*,
internal::WorkQueue* selected_work_queue,
bool force_verbose) const;
// Used in construction of TaskQueueImpl to obtain an AtomicFlag which it can
// use to request reload by ReloadEmptyWorkQueues. The lifetime of
// TaskQueueImpl is managed by this class and the handle will be released by
// TaskQueueImpl::UnregisterTaskQueue which is always called before the
// queue's destruction.
AtomicFlagSet::AtomicFlag GetFlagToRequestReloadForEmptyQueue(
TaskQueueImpl* task_queue);
// Calls |TakeImmediateIncomingQueueTasks| on all queues with their reload
// flag set in |empty_queues_to_reload_|.
void ReloadEmptyWorkQueues() const;
std::unique_ptr<internal::TaskQueueImpl> CreateTaskQueueImpl(
const TaskQueue::Spec& spec) override;
// Periodically reclaims memory by sweeping away canceled tasks and shrinking
// buffers.
void MaybeReclaimMemory();
// Deletes queues marked for deletion and empty queues marked for shutdown.
void CleanUpQueues();
void RemoveAllCanceledTasksFromFrontOfWorkQueues();
TaskQueue::TaskTiming::TimeRecordingPolicy ShouldRecordTaskTiming(
const internal::TaskQueueImpl* task_queue);
bool ShouldRecordCPUTimeForTask();
void RecordCrashKeys(const PendingTask&);
// Helper to terminate all scoped trace events to allow starting new ones
// in SelectNextTask().
Task* SelectNextTaskImpl();
// Check if a task of priority |priority| should run given the pending set of
// native work.
bool ShouldRunTaskOfPriority(TaskQueue::QueuePriority priority) const;
// Ignores any immediate work.
TimeDelta GetDelayTillNextDelayedTask(LazyNow* lazy_now) const;
#if DCHECK_IS_ON()
void LogTaskDebugInfo(const internal::WorkQueue* work_queue) const;
#endif
// Determines if wall time or thread time should be recorded for the next
// task.
TaskQueue::TaskTiming InitializeTaskTiming(
internal::TaskQueueImpl* task_queue);
scoped_refptr<AssociatedThreadId> associated_thread_;
EnqueueOrderGenerator enqueue_order_generator_;
const std::unique_ptr<internal::ThreadController> controller_;
const Settings settings_;
const MetricRecordingSettings metric_recording_settings_;
// Whether to add the queue time to tasks.
base::subtle::Atomic32 add_queue_time_to_tasks_;
AtomicFlagSet empty_queues_to_reload_;
// A check to bail out early during memory corruption.
// https://crbug.com/757940
bool Validate();
volatile int32_t memory_corruption_sentinel_;
MainThreadOnly main_thread_only_;
MainThreadOnly& main_thread_only() {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
return main_thread_only_;
}
const MainThreadOnly& main_thread_only() const {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
return main_thread_only_;
}
WeakPtrFactory<SequenceManagerImpl> weak_factory_{this};
DISALLOW_COPY_AND_ASSIGN(SequenceManagerImpl);
};
} // namespace internal
} // namespace sequence_manager
} // namespace base
#endif // BASE_TASK_SEQUENCE_MANAGER_SEQUENCE_MANAGER_IMPL_H_

View file

@ -0,0 +1,49 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TASK_SEQUENCE_MANAGER_SEQUENCED_TASK_SOURCE_H_
#define BASE_TASK_SEQUENCE_MANAGER_SEQUENCED_TASK_SOURCE_H_
#include "base/optional.h"
#include "base/pending_task.h"
#include "base/task/sequence_manager/lazy_now.h"
#include "base/task/sequence_manager/tasks.h"
namespace base {
namespace sequence_manager {
namespace internal {
// Interface to pass tasks to ThreadController.
class SequencedTaskSource {
public:
virtual ~SequencedTaskSource() = default;
// Returns the next task to run from this source or nullptr if
// there're no more tasks ready to run. If a task is returned,
// DidRunTask() must be invoked before the next call to SelectNextTask().
virtual Task* SelectNextTask() = 0;
// Notifies this source that the task previously obtained
// from SelectNextTask() has been completed.
virtual void DidRunTask() = 0;
// Returns the delay till the next task or TimeDelta::Max()
// if there are no tasks left.
virtual TimeDelta DelayTillNextTask(LazyNow* lazy_now) const = 0;
// Return true if there are any pending tasks in the task source which require
// high resolution timing.
virtual bool HasPendingHighResolutionTasks() = 0;
// Called when we have run out of immediate work. If more immediate work
// becomes available as a result of any processing done by this callback,
// return true to schedule a future DoWork.
virtual bool OnSystemIdle() = 0;
};
} // namespace internal
} // namespace sequence_manager
} // namespace base
#endif // BASE_TASK_SEQUENCE_MANAGER_SEQUENCED_TASK_SOURCE_H_

View file

@ -0,0 +1,359 @@
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/task/sequence_manager/task_queue.h"
#include <utility>
#include "base/bind.h"
#include "base/memory/ptr_util.h"
#include "base/task/sequence_manager/associated_thread_id.h"
#include "base/task/sequence_manager/sequence_manager_impl.h"
#include "base/task/sequence_manager/task_queue_impl.h"
#include "base/threading/thread_checker.h"
#include "base/threading/thread_checker_impl.h"
#include "base/time/time.h"
namespace base {
namespace sequence_manager {
namespace {
class NullTaskRunner final : public SingleThreadTaskRunner {
public:
NullTaskRunner() {}
bool PostDelayedTask(const Location& location,
OnceClosure callback,
TimeDelta delay) override {
return false;
}
bool PostNonNestableDelayedTask(const Location& location,
OnceClosure callback,
TimeDelta delay) override {
return false;
}
bool RunsTasksInCurrentSequence() const override {
return thread_checker_.CalledOnValidThread();
}
private:
// Ref-counted
~NullTaskRunner() override = default;
ThreadCheckerImpl thread_checker_;
};
// TODO(kraynov): Move NullTaskRunner from //base/test to //base.
scoped_refptr<SingleThreadTaskRunner> CreateNullTaskRunner() {
return MakeRefCounted<NullTaskRunner>();
}
} // namespace
TaskQueue::QueueEnabledVoter::QueueEnabledVoter(
scoped_refptr<TaskQueue> task_queue)
: task_queue_(std::move(task_queue)), enabled_(true) {
task_queue_->AddQueueEnabledVoter(enabled_);
}
TaskQueue::QueueEnabledVoter::~QueueEnabledVoter() {
task_queue_->RemoveQueueEnabledVoter(enabled_);
}
void TaskQueue::QueueEnabledVoter::SetVoteToEnable(bool enabled) {
if (enabled == enabled_)
return;
enabled_ = enabled;
task_queue_->OnQueueEnabledVoteChanged(enabled_);
}
void TaskQueue::AddQueueEnabledVoter(bool voter_is_enabled) {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
++voter_count_;
if (voter_is_enabled)
++enabled_voter_count_;
}
void TaskQueue::RemoveQueueEnabledVoter(bool voter_is_enabled) {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
if (!impl_)
return;
bool was_enabled = AreAllQueueEnabledVotersEnabled();
if (voter_is_enabled) {
--enabled_voter_count_;
DCHECK_GE(enabled_voter_count_, 0);
}
--voter_count_;
DCHECK_GE(voter_count_, 0);
bool is_enabled = AreAllQueueEnabledVotersEnabled();
if (was_enabled != is_enabled)
impl_->SetQueueEnabled(is_enabled);
}
bool TaskQueue::AreAllQueueEnabledVotersEnabled() const {
return enabled_voter_count_ == voter_count_;
}
void TaskQueue::OnQueueEnabledVoteChanged(bool enabled) {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
bool was_enabled = AreAllQueueEnabledVotersEnabled();
if (enabled) {
++enabled_voter_count_;
DCHECK_LE(enabled_voter_count_, voter_count_);
} else {
--enabled_voter_count_;
DCHECK_GE(enabled_voter_count_, 0);
}
bool is_enabled = AreAllQueueEnabledVotersEnabled();
if (was_enabled != is_enabled)
impl_->SetQueueEnabled(is_enabled);
}
TaskQueue::TaskQueue(std::unique_ptr<internal::TaskQueueImpl> impl,
const TaskQueue::Spec& spec)
: impl_(std::move(impl)),
sequence_manager_(impl_ ? impl_->GetSequenceManagerWeakPtr() : nullptr),
associated_thread_((impl_ && impl_->sequence_manager())
? impl_->sequence_manager()->associated_thread()
: MakeRefCounted<internal::AssociatedThreadId>()),
default_task_runner_(impl_ ? impl_->CreateTaskRunner(kTaskTypeNone)
: CreateNullTaskRunner()),
name_(impl_ ? impl_->GetName() : "") {}
TaskQueue::~TaskQueue() {
ShutdownTaskQueueGracefully();
}
void TaskQueue::ShutdownTaskQueueGracefully() {
// scoped_refptr guarantees us that this object isn't used.
if (!impl_)
return;
if (impl_->IsUnregistered())
return;
// If we've not been unregistered then this must occur on the main thread.
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
impl_->SetObserver(nullptr);
impl_->sequence_manager()->ShutdownTaskQueueGracefully(TakeTaskQueueImpl());
}
TaskQueue::TaskTiming::TaskTiming(bool has_wall_time, bool has_thread_time)
: has_wall_time_(has_wall_time), has_thread_time_(has_thread_time) {}
void TaskQueue::TaskTiming::RecordTaskStart(LazyNow* now) {
DCHECK_EQ(State::NotStarted, state_);
state_ = State::Running;
if (has_wall_time())
start_time_ = now->Now();
if (has_thread_time())
start_thread_time_ = base::ThreadTicks::Now();
}
void TaskQueue::TaskTiming::RecordTaskEnd(LazyNow* now) {
DCHECK(state_ == State::Running || state_ == State::Finished);
if (state_ == State::Finished)
return;
state_ = State::Finished;
if (has_wall_time())
end_time_ = now->Now();
if (has_thread_time())
end_thread_time_ = base::ThreadTicks::Now();
}
void TaskQueue::ShutdownTaskQueue() {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
if (!impl_)
return;
if (!sequence_manager_) {
TakeTaskQueueImpl().reset();
return;
}
impl_->SetBlameContext(nullptr);
impl_->SetOnTaskStartedHandler(
internal::TaskQueueImpl::OnTaskStartedHandler());
impl_->SetOnTaskCompletedHandler(
internal::TaskQueueImpl::OnTaskCompletedHandler());
sequence_manager_->UnregisterTaskQueueImpl(TakeTaskQueueImpl());
}
scoped_refptr<SingleThreadTaskRunner> TaskQueue::CreateTaskRunner(
TaskType task_type) {
// We only need to lock if we're not on the main thread.
base::internal::CheckedAutoLockMaybe lock(IsOnMainThread() ? &impl_lock_
: nullptr);
if (!impl_)
return CreateNullTaskRunner();
return impl_->CreateTaskRunner(task_type);
}
std::unique_ptr<TaskQueue::QueueEnabledVoter>
TaskQueue::CreateQueueEnabledVoter() {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
if (!impl_)
return nullptr;
return WrapUnique(new QueueEnabledVoter(this));
}
bool TaskQueue::IsQueueEnabled() const {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
if (!impl_)
return false;
return impl_->IsQueueEnabled();
}
bool TaskQueue::IsEmpty() const {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
if (!impl_)
return true;
return impl_->IsEmpty();
}
size_t TaskQueue::GetNumberOfPendingTasks() const {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
if (!impl_)
return 0;
return impl_->GetNumberOfPendingTasks();
}
bool TaskQueue::HasTaskToRunImmediately() const {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
if (!impl_)
return false;
return impl_->HasTaskToRunImmediately();
}
Optional<TimeTicks> TaskQueue::GetNextScheduledWakeUp() {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
if (!impl_)
return nullopt;
return impl_->GetNextScheduledWakeUp();
}
void TaskQueue::SetQueuePriority(TaskQueue::QueuePriority priority) {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
if (!impl_)
return;
impl_->SetQueuePriority(priority);
}
TaskQueue::QueuePriority TaskQueue::GetQueuePriority() const {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
if (!impl_)
return TaskQueue::QueuePriority::kLowPriority;
return impl_->GetQueuePriority();
}
void TaskQueue::AddTaskObserver(TaskObserver* task_observer) {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
if (!impl_)
return;
impl_->AddTaskObserver(task_observer);
}
void TaskQueue::RemoveTaskObserver(TaskObserver* task_observer) {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
if (!impl_)
return;
impl_->RemoveTaskObserver(task_observer);
}
void TaskQueue::SetTimeDomain(TimeDomain* time_domain) {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
if (!impl_)
return;
impl_->SetTimeDomain(time_domain);
}
TimeDomain* TaskQueue::GetTimeDomain() const {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
if (!impl_)
return nullptr;
return impl_->GetTimeDomain();
}
void TaskQueue::SetBlameContext(trace_event::BlameContext* blame_context) {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
if (!impl_)
return;
impl_->SetBlameContext(blame_context);
}
void TaskQueue::InsertFence(InsertFencePosition position) {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
if (!impl_)
return;
impl_->InsertFence(position);
}
void TaskQueue::InsertFenceAt(TimeTicks time) {
impl_->InsertFenceAt(time);
}
void TaskQueue::RemoveFence() {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
if (!impl_)
return;
impl_->RemoveFence();
}
bool TaskQueue::HasActiveFence() {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
if (!impl_)
return false;
return impl_->HasActiveFence();
}
bool TaskQueue::BlockedByFence() const {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
if (!impl_)
return false;
return impl_->BlockedByFence();
}
EnqueueOrder TaskQueue::GetEnqueueOrderAtWhichWeBecameUnblocked() const {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
if (!impl_)
return EnqueueOrder();
return impl_->GetEnqueueOrderAtWhichWeBecameUnblocked();
}
const char* TaskQueue::GetName() const {
return name_;
}
void TaskQueue::SetObserver(Observer* observer) {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
if (!impl_)
return;
// Observer is guaranteed to outlive TaskQueue and TaskQueueImpl lifecycle is
// controlled by |this|.
impl_->SetObserver(observer);
}
void TaskQueue::SetShouldReportPostedTasksWhenDisabled(bool should_report) {
impl_->SetShouldReportPostedTasksWhenDisabled(should_report);
}
bool TaskQueue::IsOnMainThread() const {
return associated_thread_->IsBoundToCurrentThread();
}
std::unique_ptr<internal::TaskQueueImpl> TaskQueue::TakeTaskQueueImpl() {
base::internal::CheckedAutoLock lock(impl_lock_);
DCHECK(impl_);
return std::move(impl_);
}
} // namespace sequence_manager
} // namespace base

View file

@ -0,0 +1,397 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TASK_SEQUENCE_MANAGER_TASK_QUEUE_H_
#define BASE_TASK_SEQUENCE_MANAGER_TASK_QUEUE_H_
#include <memory>
#include "base/macros.h"
#include "base/memory/weak_ptr.h"
#include "base/optional.h"
#include "base/single_thread_task_runner.h"
#include "base/task/common/checked_lock.h"
#include "base/task/sequence_manager/lazy_now.h"
#include "base/task/sequence_manager/tasks.h"
#include "base/task/task_observer.h"
#include "base/threading/platform_thread.h"
#include "base/time/time.h"
namespace base {
class TaskObserver;
namespace trace_event {
class BlameContext;
}
namespace sequence_manager {
namespace internal {
class AssociatedThreadId;
class SequenceManagerImpl;
class TaskQueueImpl;
} // namespace internal
class TimeDomain;
// TODO(kraynov): Make TaskQueue to actually be an interface for TaskQueueImpl
// and stop using ref-counting because we're no longer tied to task runner
// lifecycle and there's no other need for ref-counting either.
// NOTE: When TaskQueue gets automatically deleted on zero ref-count,
// TaskQueueImpl gets gracefully shutdown. It means that it doesn't get
// unregistered immediately and might accept some last minute tasks until
// SequenceManager will unregister it at some point. It's done to ensure that
// task queue always gets unregistered on the main thread.
class BASE_EXPORT TaskQueue : public RefCountedThreadSafe<TaskQueue> {
public:
class Observer {
public:
virtual ~Observer() = default;
// Notify observer that the time at which this queue wants to run
// the next task has changed. |next_wakeup| can be in the past
// (e.g. TimeTicks() can be used to notify about immediate work).
// Can be called on any thread
// All methods but SetObserver, SetTimeDomain and GetTimeDomain can be
// called on |queue|.
//
// TODO(altimin): Make it Optional<TimeTicks> to tell
// observer about cancellations.
virtual void OnQueueNextWakeUpChanged(TimeTicks next_wake_up) = 0;
};
// Shuts down the queue. All tasks currently queued will be discarded.
virtual void ShutdownTaskQueue();
// Shuts down the queue when there are no more tasks queued.
void ShutdownTaskQueueGracefully();
// TODO(scheduler-dev): Could we define a more clear list of priorities?
// See https://crbug.com/847858.
enum QueuePriority : uint8_t {
// Queues with control priority will run before any other queue, and will
// explicitly starve other queues. Typically this should only be used for
// private queues which perform control operations.
kControlPriority = 0,
// The selector will prioritize highest over high, normal and low; and
// high over normal and low; and normal over low. However it will ensure
// neither of the lower priority queues can be completely starved by higher
// priority tasks. All three of these queues will always take priority over
// and can starve the best effort queue.
kHighestPriority = 1,
kVeryHighPriority = 2,
kHighPriority = 3,
// Queues with normal priority are the default.
kNormalPriority = 4,
kLowPriority = 5,
// Queues with best effort priority will only be run if all other queues are
// empty. They can be starved by the other queues.
kBestEffortPriority = 6,
// Must be the last entry.
kQueuePriorityCount = 7,
kFirstQueuePriority = kControlPriority,
};
// Can be called on any thread.
static const char* PriorityToString(QueuePriority priority);
// Options for constructing a TaskQueue.
struct Spec {
explicit Spec(const char* name) : name(name) {}
Spec SetShouldMonitorQuiescence(bool should_monitor) {
should_monitor_quiescence = should_monitor;
return *this;
}
Spec SetShouldNotifyObservers(bool run_observers) {
should_notify_observers = run_observers;
return *this;
}
// Delayed fences require Now() to be sampled when posting immediate tasks
// which is not free.
Spec SetDelayedFencesAllowed(bool allow_delayed_fences) {
delayed_fence_allowed = allow_delayed_fences;
return *this;
}
Spec SetTimeDomain(TimeDomain* domain) {
time_domain = domain;
return *this;
}
const char* name;
bool should_monitor_quiescence = false;
TimeDomain* time_domain = nullptr;
bool should_notify_observers = true;
bool delayed_fence_allowed = false;
};
// TODO(altimin): Make this private after TaskQueue/TaskQueueImpl refactoring.
TaskQueue(std::unique_ptr<internal::TaskQueueImpl> impl,
const TaskQueue::Spec& spec);
// Information about task execution.
//
// Wall-time related methods (start_time, end_time, wall_duration) can be
// called only when |has_wall_time()| is true.
// Thread-time related mehtods (start_thread_time, end_thread_time,
// thread_duration) can be called only when |has_thread_time()| is true.
//
// start_* should be called after RecordTaskStart.
// end_* and *_duration should be called after RecordTaskEnd.
class BASE_EXPORT TaskTiming {
public:
enum class State { NotStarted, Running, Finished };
enum class TimeRecordingPolicy { DoRecord, DoNotRecord };
TaskTiming(bool has_wall_time, bool has_thread_time);
bool has_wall_time() const { return has_wall_time_; }
bool has_thread_time() const { return has_thread_time_; }
base::TimeTicks start_time() const {
DCHECK(has_wall_time());
return start_time_;
}
base::TimeTicks end_time() const {
DCHECK(has_wall_time());
return end_time_;
}
base::TimeDelta wall_duration() const {
DCHECK(has_wall_time());
return end_time_ - start_time_;
}
base::ThreadTicks start_thread_time() const {
DCHECK(has_thread_time());
return start_thread_time_;
}
base::ThreadTicks end_thread_time() const {
DCHECK(has_thread_time());
return end_thread_time_;
}
base::TimeDelta thread_duration() const {
DCHECK(has_thread_time());
return end_thread_time_ - start_thread_time_;
}
State state() const { return state_; }
void RecordTaskStart(LazyNow* now);
void RecordTaskEnd(LazyNow* now);
// Protected for tests.
protected:
State state_ = State::NotStarted;
bool has_wall_time_;
bool has_thread_time_;
base::TimeTicks start_time_;
base::TimeTicks end_time_;
base::ThreadTicks start_thread_time_;
base::ThreadTicks end_thread_time_;
};
// An interface that lets the owner vote on whether or not the associated
// TaskQueue should be enabled.
class BASE_EXPORT QueueEnabledVoter {
public:
~QueueEnabledVoter();
QueueEnabledVoter(const QueueEnabledVoter&) = delete;
const QueueEnabledVoter& operator=(const QueueEnabledVoter&) = delete;
// Votes to enable or disable the associated TaskQueue. The TaskQueue will
// only be enabled if all the voters agree it should be enabled, or if there
// are no voters.
// NOTE this must be called on the thread the associated TaskQueue was
// created on.
void SetVoteToEnable(bool enabled);
bool IsVotingToEnable() const { return enabled_; }
private:
friend class TaskQueue;
explicit QueueEnabledVoter(scoped_refptr<TaskQueue> task_queue);
scoped_refptr<TaskQueue> const task_queue_;
bool enabled_;
};
// Returns an interface that allows the caller to vote on whether or not this
// TaskQueue is enabled. The TaskQueue will be enabled if there are no voters
// or if all agree it should be enabled.
// NOTE this must be called on the thread this TaskQueue was created by.
std::unique_ptr<QueueEnabledVoter> CreateQueueEnabledVoter();
// NOTE this must be called on the thread this TaskQueue was created by.
bool IsQueueEnabled() const;
// Returns true if the queue is completely empty.
bool IsEmpty() const;
// Returns the number of pending tasks in the queue.
size_t GetNumberOfPendingTasks() const;
// Returns true if the queue has work that's ready to execute now.
// NOTE: this must be called on the thread this TaskQueue was created by.
bool HasTaskToRunImmediately() const;
// Returns requested run time of next scheduled wake-up for a delayed task
// which is not ready to run. If there are no such tasks (immediate tasks
// don't count) or the queue is disabled it returns nullopt.
// NOTE: this must be called on the thread this TaskQueue was created by.
Optional<TimeTicks> GetNextScheduledWakeUp();
// Can be called on any thread.
virtual const char* GetName() const;
// Set the priority of the queue to |priority|. NOTE this must be called on
// the thread this TaskQueue was created by.
void SetQueuePriority(QueuePriority priority);
// Returns the current queue priority.
QueuePriority GetQueuePriority() const;
// These functions can only be called on the same thread that the task queue
// manager executes its tasks on.
void AddTaskObserver(TaskObserver* task_observer);
void RemoveTaskObserver(TaskObserver* task_observer);
// Set the blame context which is entered and left while executing tasks from
// this task queue. |blame_context| must be null or outlive this task queue.
// Must be called on the thread this TaskQueue was created by.
void SetBlameContext(trace_event::BlameContext* blame_context);
// Removes the task queue from the previous TimeDomain and adds it to
// |domain|. This is a moderately expensive operation.
void SetTimeDomain(TimeDomain* domain);
// Returns the queue's current TimeDomain. Can be called from any thread.
TimeDomain* GetTimeDomain() const;
enum class InsertFencePosition {
kNow, // Tasks posted on the queue up till this point further may run.
// All further tasks are blocked.
kBeginningOfTime, // No tasks posted on this queue may run.
};
// Inserts a barrier into the task queue which prevents tasks with an enqueue
// order greater than the fence from running until either the fence has been
// removed or a subsequent fence has unblocked some tasks within the queue.
// Note: delayed tasks get their enqueue order set once their delay has
// expired, and non-delayed tasks get their enqueue order set when posted.
//
// Fences come in three flavours:
// - Regular (InsertFence(NOW)) - all tasks posted after this moment
// are blocked.
// - Fully blocking (InsertFence(kBeginningOfTime)) - all tasks including
// already posted are blocked.
// - Delayed (InsertFenceAt(timestamp)) - blocks all tasks posted after given
// point in time (must be in the future).
//
// Only one fence can be scheduled at a time. Inserting a new fence
// will automatically remove the previous one, regardless of fence type.
void InsertFence(InsertFencePosition position);
// Delayed fences are only allowed for queues created with
// SetDelayedFencesAllowed(true) because this feature implies sampling Now()
// (which isn't free) for every PostTask, even those with zero delay.
void InsertFenceAt(TimeTicks time);
// Removes any previously added fence and unblocks execution of any tasks
// blocked by it.
void RemoveFence();
// Returns true if the queue has a fence but it isn't necessarily blocking
// execution of tasks (it may be the case if tasks enqueue order hasn't
// reached the number set for a fence).
bool HasActiveFence();
// Returns true if the queue has a fence which is blocking execution of tasks.
bool BlockedByFence() const;
// Returns an EnqueueOrder generated at the last transition to unblocked. A
// queue is unblocked when it is enabled and no fence prevents the front task
// from running. If the EnqueueOrder of a task is greater than this when it
// starts running, it means that is was never blocked.
EnqueueOrder GetEnqueueOrderAtWhichWeBecameUnblocked() const;
void SetObserver(Observer* observer);
// Controls whether or not the queue will emit traces events when tasks are
// posted to it while disabled. This only applies for the current or next
// period during which the queue is disabled. When the queue is re-enabled
// this will revert back to the default value of false.
void SetShouldReportPostedTasksWhenDisabled(bool should_report);
// Create a task runner for this TaskQueue which will annotate all
// posted tasks with the given task type.
// May be called on any thread.
// NOTE: Task runners don't hold a reference to a TaskQueue, hence,
// it's required to retain that reference to prevent automatic graceful
// shutdown. Unique ownership of task queues will fix this issue soon.
scoped_refptr<SingleThreadTaskRunner> CreateTaskRunner(TaskType task_type);
// Default task runner which doesn't annotate tasks with a task type.
scoped_refptr<SingleThreadTaskRunner> task_runner() const {
return default_task_runner_;
}
protected:
virtual ~TaskQueue();
internal::TaskQueueImpl* GetTaskQueueImpl() const { return impl_.get(); }
private:
friend class RefCountedThreadSafe<TaskQueue>;
friend class internal::SequenceManagerImpl;
friend class internal::TaskQueueImpl;
void AddQueueEnabledVoter(bool voter_is_enabled);
void RemoveQueueEnabledVoter(bool voter_is_enabled);
bool AreAllQueueEnabledVotersEnabled() const;
void OnQueueEnabledVoteChanged(bool enabled);
bool IsOnMainThread() const;
// TaskQueue has ownership of an underlying implementation but in certain
// cases (e.g. detached frames) their lifetime may diverge.
// This method should be used to take away the impl for graceful shutdown.
// TaskQueue will disregard any calls or posting tasks thereafter.
std::unique_ptr<internal::TaskQueueImpl> TakeTaskQueueImpl();
// |impl_| can be written to on the main thread but can be read from
// any thread.
// |impl_lock_| must be acquired when writing to |impl_| or when accessing
// it from non-main thread. Reading from the main thread does not require
// a lock.
mutable base::internal::CheckedLock impl_lock_{
base::internal::UniversalPredecessor{}};
std::unique_ptr<internal::TaskQueueImpl> impl_;
const WeakPtr<internal::SequenceManagerImpl> sequence_manager_;
scoped_refptr<internal::AssociatedThreadId> associated_thread_;
scoped_refptr<SingleThreadTaskRunner> default_task_runner_;
int enabled_voter_count_ = 0;
int voter_count_ = 0;
const char* name_;
DISALLOW_COPY_AND_ASSIGN(TaskQueue);
};
} // namespace sequence_manager
} // namespace base
#endif // BASE_TASK_SEQUENCE_MANAGER_TASK_QUEUE_H_

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,558 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TASK_SEQUENCE_MANAGER_TASK_QUEUE_IMPL_H_
#define BASE_TASK_SEQUENCE_MANAGER_TASK_QUEUE_IMPL_H_
#include <stddef.h>
#include <memory>
#include <queue>
#include <set>
#include "base/callback.h"
#include "base/macros.h"
#include "base/memory/weak_ptr.h"
#include "base/pending_task.h"
#include "base/task/common/checked_lock.h"
#include "base/task/common/intrusive_heap.h"
#include "base/task/common/operations_controller.h"
#include "base/task/sequence_manager/associated_thread_id.h"
#include "base/task/sequence_manager/atomic_flag_set.h"
#include "base/task/sequence_manager/enqueue_order.h"
#include "base/task/sequence_manager/lazily_deallocated_deque.h"
#include "base/task/sequence_manager/sequenced_task_source.h"
#include "base/task/sequence_manager/task_queue.h"
#include "base/threading/thread_checker.h"
#include "base/trace_event/trace_event.h"
#include "base/trace_event/traced_value.h"
namespace base {
namespace sequence_manager {
class LazyNow;
class TimeDomain;
namespace internal {
class SequenceManagerImpl;
class WorkQueue;
class WorkQueueSets;
// TaskQueueImpl has four main queues:
//
// Immediate (non-delayed) tasks:
// |immediate_incoming_queue| - PostTask enqueues tasks here.
// |immediate_work_queue| - SequenceManager takes immediate tasks here.
//
// Delayed tasks
// |delayed_incoming_queue| - PostDelayedTask enqueues tasks here.
// |delayed_work_queue| - SequenceManager takes delayed tasks here.
//
// The |immediate_incoming_queue| can be accessed from any thread, the other
// queues are main-thread only. To reduce the overhead of locking,
// |immediate_work_queue| is swapped with |immediate_incoming_queue| when
// |immediate_work_queue| becomes empty.
//
// Delayed tasks are initially posted to |delayed_incoming_queue| and a wake-up
// is scheduled with the TimeDomain. When the delay has elapsed, the TimeDomain
// calls UpdateDelayedWorkQueue and ready delayed tasks are moved into the
// |delayed_work_queue|. Note the EnqueueOrder (used for ordering) for a delayed
// task is not set until it's moved into the |delayed_work_queue|.
//
// TaskQueueImpl uses the WorkQueueSets and the TaskQueueSelector to implement
// prioritization. Task selection is done by the TaskQueueSelector and when a
// queue is selected, it round-robins between the |immediate_work_queue| and
// |delayed_work_queue|. The reason for this is we want to make sure delayed
// tasks (normally the most common type) don't starve out immediate work.
class BASE_EXPORT TaskQueueImpl {
public:
TaskQueueImpl(SequenceManagerImpl* sequence_manager,
TimeDomain* time_domain,
const TaskQueue::Spec& spec);
~TaskQueueImpl();
// Types of queues TaskQueueImpl is maintaining internally.
enum class WorkQueueType { kImmediate, kDelayed };
// Some methods have fast paths when on the main thread.
enum class CurrentThread { kMainThread, kNotMainThread };
// Non-nestable tasks may get deferred but such queue is being maintained on
// SequenceManager side, so we need to keep information how to requeue it.
struct DeferredNonNestableTask {
Task task;
internal::TaskQueueImpl* task_queue;
WorkQueueType work_queue_type;
};
using OnNextWakeUpChangedCallback = RepeatingCallback<void(TimeTicks)>;
using OnTaskReadyHandler = RepeatingCallback<void(const Task&, LazyNow*)>;
using OnTaskStartedHandler =
RepeatingCallback<void(const Task&, const TaskQueue::TaskTiming&)>;
using OnTaskCompletedHandler =
RepeatingCallback<void(const Task&, TaskQueue::TaskTiming*, LazyNow*)>;
// May be called from any thread.
scoped_refptr<SingleThreadTaskRunner> CreateTaskRunner(
TaskType task_type) const;
// TaskQueue implementation.
const char* GetName() const;
bool IsQueueEnabled() const;
void SetQueueEnabled(bool enabled);
void SetShouldReportPostedTasksWhenDisabled(bool should_report);
bool IsEmpty() const;
size_t GetNumberOfPendingTasks() const;
bool HasTaskToRunImmediately() const;
Optional<TimeTicks> GetNextScheduledWakeUp();
Optional<DelayedWakeUp> GetNextScheduledWakeUpImpl();
void SetQueuePriority(TaskQueue::QueuePriority priority);
TaskQueue::QueuePriority GetQueuePriority() const;
void AddTaskObserver(TaskObserver* task_observer);
void RemoveTaskObserver(TaskObserver* task_observer);
void SetTimeDomain(TimeDomain* time_domain);
TimeDomain* GetTimeDomain() const;
void SetBlameContext(trace_event::BlameContext* blame_context);
void InsertFence(TaskQueue::InsertFencePosition position);
void InsertFenceAt(TimeTicks time);
void RemoveFence();
bool HasActiveFence();
bool BlockedByFence() const;
EnqueueOrder GetEnqueueOrderAtWhichWeBecameUnblocked() const;
// Implementation of TaskQueue::SetObserver.
void SetObserver(TaskQueue::Observer* observer);
void UnregisterTaskQueue();
// Returns true if a (potentially hypothetical) task with the specified
// |enqueue_order| could run on the queue. Must be called from the main
// thread.
bool CouldTaskRun(EnqueueOrder enqueue_order) const;
// Returns true if a task with |enqueue_order| obtained from this queue was
// ever in the queue while it was disabled, blocked by a fence, or less
// important than kNormalPriority.
bool WasBlockedOrLowPriority(EnqueueOrder enqueue_order) const;
// Must only be called from the thread this task queue was created on.
void ReloadEmptyImmediateWorkQueue();
void AsValueInto(TimeTicks now,
trace_event::TracedValue* state,
bool force_verbose) const;
bool GetQuiescenceMonitored() const { return should_monitor_quiescence_; }
bool GetShouldNotifyObservers() const { return should_notify_observers_; }
void NotifyWillProcessTask(const Task& task,
bool was_blocked_or_low_priority);
void NotifyDidProcessTask(const Task& task);
// Check for available tasks in immediate work queues.
// Used to check if we need to generate notifications about delayed work.
bool HasPendingImmediateWork();
bool HasPendingImmediateWorkLocked()
EXCLUSIVE_LOCKS_REQUIRED(any_thread_lock_);
bool has_pending_high_resolution_tasks() const {
return main_thread_only()
.delayed_incoming_queue.has_pending_high_resolution_tasks();
}
WorkQueue* delayed_work_queue() {
return main_thread_only().delayed_work_queue.get();
}
const WorkQueue* delayed_work_queue() const {
return main_thread_only().delayed_work_queue.get();
}
WorkQueue* immediate_work_queue() {
return main_thread_only().immediate_work_queue.get();
}
const WorkQueue* immediate_work_queue() const {
return main_thread_only().immediate_work_queue.get();
}
// Enqueues any delayed tasks which should be run now on the
// |delayed_work_queue|. Must be called from the main thread.
void MoveReadyDelayedTasksToWorkQueue(LazyNow* lazy_now);
base::internal::HeapHandle heap_handle() const {
return main_thread_only().heap_handle;
}
void set_heap_handle(base::internal::HeapHandle heap_handle) {
main_thread_only().heap_handle = heap_handle;
}
// Pushes |task| onto the front of the specified work queue. Caution must be
// taken with this API because you could easily starve out other work.
// TODO(kraynov): Simplify non-nestable task logic https://crbug.com/845437.
void RequeueDeferredNonNestableTask(DeferredNonNestableTask task);
void PushImmediateIncomingTaskForTest(Task&& task);
// Iterates over |delayed_incoming_queue| removing canceled tasks. In
// addition MaybeShrinkQueue is called on all internal queues.
void ReclaimMemory(TimeTicks now);
// Registers a handler to invoke when a task posted to this TaskQueueImpl is
// ready. For a non-delayed task, this is when the task is posted. For a
// delayed task, this is when the delay expires.
void SetOnTaskReadyHandler(OnTaskReadyHandler handler);
// Allows wrapping TaskQueue to set a handler to subscribe for notifications
// about started and completed tasks.
void SetOnTaskStartedHandler(OnTaskStartedHandler handler);
void OnTaskStarted(const Task& task,
const TaskQueue::TaskTiming& task_timing);
// |task_timing| may be passed in Running state and may not have the end time,
// so that the handler can run an additional task that is counted as a part of
// the main task.
// The handler can call TaskTiming::RecordTaskEnd, which is optional, to
// finalize the task, and use the resulting timing.
void SetOnTaskCompletedHandler(OnTaskCompletedHandler handler);
void OnTaskCompleted(const Task& task,
TaskQueue::TaskTiming* task_timing,
LazyNow* lazy_now);
bool RequiresTaskTiming() const;
WeakPtr<SequenceManagerImpl> GetSequenceManagerWeakPtr();
SequenceManagerImpl* sequence_manager() const { return sequence_manager_; }
// Returns true if this queue is unregistered or task queue manager is deleted
// and this queue can be safely deleted on any thread.
bool IsUnregistered() const;
// Delete all tasks within this TaskQueue.
void DeletePendingTasks();
// Whether this task queue owns any tasks. Task queue being disabled doesn't
// affect this.
bool HasTasks() const;
protected:
void SetDelayedWakeUpForTesting(Optional<DelayedWakeUp> wake_up);
private:
friend class WorkQueue;
friend class WorkQueueTest;
// A TaskQueueImpl instance can be destroyed or unregistered before all its
// associated TaskRunner instances are (they are refcounted). Thus we need a
// way to prevent TaskRunner instances from posting further tasks. This class
// guards PostTask calls using an OperationsController.
// This class is ref-counted as both the TaskQueueImpl instance and all
// associated TaskRunner instances share the same GuardedTaskPoster instance.
// When TaskQueueImpl shuts down it calls ShutdownAndWaitForZeroOperations(),
// preventing further PostTask calls being made to the underlying
// TaskQueueImpl.
class GuardedTaskPoster : public RefCountedThreadSafe<GuardedTaskPoster> {
public:
explicit GuardedTaskPoster(TaskQueueImpl* outer);
bool PostTask(PostedTask task);
void StartAcceptingOperations() {
operations_controller_.StartAcceptingOperations();
}
void ShutdownAndWaitForZeroOperations() {
operations_controller_.ShutdownAndWaitForZeroOperations();
}
private:
friend class RefCountedThreadSafe<GuardedTaskPoster>;
~GuardedTaskPoster();
base::internal::OperationsController operations_controller_;
// Pointer might be stale, access guarded by |operations_controller_|
TaskQueueImpl* const outer_;
};
class TaskRunner : public SingleThreadTaskRunner {
public:
explicit TaskRunner(scoped_refptr<GuardedTaskPoster> task_poster,
scoped_refptr<AssociatedThreadId> associated_thread,
TaskType task_type);
bool PostDelayedTask(const Location& location,
OnceClosure callback,
TimeDelta delay) final;
bool PostNonNestableDelayedTask(const Location& location,
OnceClosure callback,
TimeDelta delay) final;
bool RunsTasksInCurrentSequence() const final;
private:
~TaskRunner() final;
bool PostTask(PostedTask task) const;
const scoped_refptr<GuardedTaskPoster> task_poster_;
const scoped_refptr<AssociatedThreadId> associated_thread_;
const TaskType task_type_;
};
// A queue for holding delayed tasks before their delay has expired.
struct DelayedIncomingQueue {
public:
DelayedIncomingQueue();
~DelayedIncomingQueue();
void push(Task&& task);
void pop();
bool empty() const { return queue_.empty(); }
size_t size() const { return queue_.size(); }
const Task& top() const { return queue_.top(); }
void swap(DelayedIncomingQueue* other);
bool has_pending_high_resolution_tasks() const {
return pending_high_res_tasks_;
}
void SweepCancelledTasks();
std::priority_queue<Task> TakeTasks() { return std::move(queue_); }
void AsValueInto(TimeTicks now, trace_event::TracedValue* state) const;
private:
struct PQueue : public std::priority_queue<Task> {
// Expose the container and comparator.
using std::priority_queue<Task>::c;
using std::priority_queue<Task>::comp;
};
PQueue queue_;
// Number of pending tasks in the queue that need high resolution timing.
int pending_high_res_tasks_ = 0;
DISALLOW_COPY_AND_ASSIGN(DelayedIncomingQueue);
};
struct MainThreadOnly {
MainThreadOnly(TaskQueueImpl* task_queue, TimeDomain* time_domain);
~MainThreadOnly();
// Another copy of TimeDomain for lock-free access from the main thread.
// See description inside struct AnyThread for details.
TimeDomain* time_domain;
TaskQueue::Observer* task_queue_observer = nullptr;
std::unique_ptr<WorkQueue> delayed_work_queue;
std::unique_ptr<WorkQueue> immediate_work_queue;
DelayedIncomingQueue delayed_incoming_queue;
ObserverList<TaskObserver>::Unchecked task_observers;
base::internal::HeapHandle heap_handle;
bool is_enabled = true;
trace_event::BlameContext* blame_context = nullptr; // Not owned.
EnqueueOrder current_fence;
Optional<TimeTicks> delayed_fence;
// Snapshots the next sequence number when the queue is unblocked, otherwise
// it contains EnqueueOrder::none(). If the EnqueueOrder of a task just
// popped from this queue is greater than this, it means that the queue was
// never disabled or blocked by a fence while the task was queued.
EnqueueOrder enqueue_order_at_which_we_became_unblocked;
// If the EnqueueOrder of a task just popped from this queue is greater than
// this, it means that the queue was never disabled, blocked by a fence or
// less important than kNormalPriority while the task was queued.
//
// Implementation details:
// 1) When the queue is made less important than kNormalPriority, this is
// set to EnqueueOrder::max(). The EnqueueOrder of any task will compare
// less than this.
// 2) When the queue is made at least as important as kNormalPriority, this
// snapshots the next sequence number. If the queue is blocked, the value
// is irrelevant because no task should be popped. If the queue is not
// blocked, the EnqueueOrder of any already queued task will compare less
// than this.
// 3) When the queue is unblocked while at least as important as
// kNormalPriority, this snapshots the next sequence number. The
// EnqueueOrder of any already queued task will compare less than this.
EnqueueOrder
enqueue_order_at_which_we_became_unblocked_with_normal_priority;
OnTaskReadyHandler on_task_ready_handler;
OnTaskStartedHandler on_task_started_handler;
OnTaskCompletedHandler on_task_completed_handler;
// Last reported wake up, used only in UpdateWakeUp to avoid
// excessive calls.
Optional<DelayedWakeUp> scheduled_wake_up;
// If false, queue will be disabled. Used only for tests.
bool is_enabled_for_test = true;
// The time at which the task queue was disabled, if it is currently
// disabled.
Optional<TimeTicks> disabled_time;
// Whether or not the task queue should emit tracing events for tasks
// posted to this queue when it is disabled.
bool should_report_posted_tasks_when_disabled = false;
};
void PostTask(PostedTask task);
void PostImmediateTaskImpl(PostedTask task, CurrentThread current_thread);
void PostDelayedTaskImpl(PostedTask task, CurrentThread current_thread);
// Push the task onto the |delayed_incoming_queue|. Lock-free main thread
// only fast path.
void PushOntoDelayedIncomingQueueFromMainThread(Task pending_task,
TimeTicks now,
bool notify_task_annotator);
// Push the task onto the |delayed_incoming_queue|. Slow path from other
// threads.
void PushOntoDelayedIncomingQueue(Task pending_task);
void ScheduleDelayedWorkTask(Task pending_task);
void MoveReadyImmediateTasksToImmediateWorkQueueLocked()
EXCLUSIVE_LOCKS_REQUIRED(any_thread_lock_);
// LazilyDeallocatedDeque use TimeTicks to figure out when to resize. We
// should use real time here always.
using TaskDeque =
LazilyDeallocatedDeque<Task, subtle::TimeTicksNowIgnoringOverride>;
// Extracts all the tasks from the immediate incoming queue and swaps it with
// |queue| which must be empty.
// Can be called from any thread.
void TakeImmediateIncomingQueueTasks(TaskDeque* queue);
void TraceQueueSize() const;
static void QueueAsValueInto(const TaskDeque& queue,
TimeTicks now,
trace_event::TracedValue* state);
static void QueueAsValueInto(const std::priority_queue<Task>& queue,
TimeTicks now,
trace_event::TracedValue* state);
static void TaskAsValueInto(const Task& task,
TimeTicks now,
trace_event::TracedValue* state);
// Schedules delayed work on time domain and calls the observer.
void UpdateDelayedWakeUp(LazyNow* lazy_now);
void UpdateDelayedWakeUpImpl(LazyNow* lazy_now,
Optional<DelayedWakeUp> wake_up);
// Activate a delayed fence if a time has come.
void ActivateDelayedFenceIfNeeded(TimeTicks now);
// Updates state protected by any_thread_lock_.
void UpdateCrossThreadQueueStateLocked()
EXCLUSIVE_LOCKS_REQUIRED(any_thread_lock_);
void MaybeLogPostTask(PostedTask* task);
void MaybeAdjustTaskDelay(PostedTask* task, CurrentThread current_thread);
// Reports the task if it was due to IPC and was posted to a disabled queue.
// This should be called after WillQueueTask has been called for the task.
void MaybeReportIpcTaskQueuedFromMainThread(Task* pending_task,
const char* task_queue_name);
bool ShouldReportIpcTaskQueuedFromAnyThreadLocked(
base::TimeDelta* time_since_disabled)
EXCLUSIVE_LOCKS_REQUIRED(any_thread_lock_);
void MaybeReportIpcTaskQueuedFromAnyThreadLocked(Task* pending_task,
const char* task_queue_name)
EXCLUSIVE_LOCKS_REQUIRED(any_thread_lock_);
void MaybeReportIpcTaskQueuedFromAnyThreadUnlocked(
Task* pending_task,
const char* task_queue_name);
void ReportIpcTaskQueued(Task* pending_task,
const char* task_queue_name,
const base::TimeDelta& time_since_disabled);
// Invoked when the queue becomes enabled and not blocked by a fence.
void OnQueueUnblocked();
const char* name_;
SequenceManagerImpl* const sequence_manager_;
scoped_refptr<AssociatedThreadId> associated_thread_;
const scoped_refptr<GuardedTaskPoster> task_poster_;
mutable base::internal::CheckedLock any_thread_lock_;
struct AnyThread {
// Mirrored from MainThreadOnly. These are only used for tracing.
struct TracingOnly {
TracingOnly();
~TracingOnly();
bool is_enabled = true;
Optional<TimeTicks> disabled_time;
bool should_report_posted_tasks_when_disabled = false;
};
explicit AnyThread(TimeDomain* time_domain);
~AnyThread();
// TimeDomain is maintained in two copies: inside AnyThread and inside
// MainThreadOnly. It can be changed only from main thread, so it should be
// locked before accessing from other threads.
TimeDomain* time_domain;
TaskQueue::Observer* task_queue_observer = nullptr;
TaskDeque immediate_incoming_queue;
// True if main_thread_only().immediate_work_queue is empty.
bool immediate_work_queue_empty = true;
bool post_immediate_task_should_schedule_work = true;
bool unregistered = false;
OnTaskReadyHandler on_task_ready_handler;
#if DCHECK_IS_ON()
// A cache of |immediate_work_queue->work_queue_set_index()| which is used
// to index into
// SequenceManager::Settings::per_priority_cross_thread_task_delay to apply
// a priority specific delay for debugging purposes.
int queue_set_index = 0;
#endif
TracingOnly tracing_only;
};
AnyThread any_thread_ GUARDED_BY(any_thread_lock_);
MainThreadOnly main_thread_only_;
MainThreadOnly& main_thread_only() {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
return main_thread_only_;
}
const MainThreadOnly& main_thread_only() const {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
return main_thread_only_;
}
// Handle to our entry within the SequenceManagers |empty_queues_to_reload_|
// atomic flag set. Used to signal that this queue needs to be reloaded.
// If you call SetActive(false) you should do so inside |any_thread_lock_|
// because there is a danger a cross thread PostTask might reset it before we
// make |immediate_work_queue| non-empty.
AtomicFlagSet::AtomicFlag empty_queues_to_reload_handle_;
const bool should_monitor_quiescence_;
const bool should_notify_observers_;
const bool delayed_fence_allowed_;
DISALLOW_COPY_AND_ASSIGN(TaskQueueImpl);
};
} // namespace internal
} // namespace sequence_manager
} // namespace base
#endif // BASE_TASK_SEQUENCE_MANAGER_TASK_QUEUE_IMPL_H_

View file

@ -0,0 +1,252 @@
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/task/sequence_manager/task_queue_selector.h"
#include <utility>
#include "base/bits.h"
#include "base/logging.h"
#include "base/task/sequence_manager/associated_thread_id.h"
#include "base/task/sequence_manager/task_queue_impl.h"
#include "base/task/sequence_manager/work_queue.h"
#include "base/threading/thread_checker.h"
#include "base/trace_event/traced_value.h"
namespace base {
namespace sequence_manager {
namespace internal {
TaskQueueSelector::TaskQueueSelector(
scoped_refptr<AssociatedThreadId> associated_thread,
const SequenceManager::Settings& settings)
: associated_thread_(std::move(associated_thread)),
#if DCHECK_IS_ON()
random_task_selection_(settings.random_task_selection_seed != 0),
#endif
delayed_work_queue_sets_("delayed", this, settings),
immediate_work_queue_sets_("immediate", this, settings) {
}
TaskQueueSelector::~TaskQueueSelector() = default;
void TaskQueueSelector::AddQueue(internal::TaskQueueImpl* queue) {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
DCHECK(queue->IsQueueEnabled());
AddQueueImpl(queue, TaskQueue::kNormalPriority);
}
void TaskQueueSelector::RemoveQueue(internal::TaskQueueImpl* queue) {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
if (queue->IsQueueEnabled()) {
RemoveQueueImpl(queue);
}
}
void TaskQueueSelector::EnableQueue(internal::TaskQueueImpl* queue) {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
DCHECK(queue->IsQueueEnabled());
AddQueueImpl(queue, queue->GetQueuePriority());
if (task_queue_selector_observer_)
task_queue_selector_observer_->OnTaskQueueEnabled(queue);
}
void TaskQueueSelector::DisableQueue(internal::TaskQueueImpl* queue) {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
DCHECK(!queue->IsQueueEnabled());
RemoveQueueImpl(queue);
}
void TaskQueueSelector::SetQueuePriority(internal::TaskQueueImpl* queue,
TaskQueue::QueuePriority priority) {
DCHECK_LT(priority, TaskQueue::kQueuePriorityCount);
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
if (queue->IsQueueEnabled()) {
ChangeSetIndex(queue, priority);
} else {
// Disabled queue is not in any set so we can't use ChangeSetIndex here
// and have to assign priority for the queue itself.
queue->delayed_work_queue()->AssignSetIndex(priority);
queue->immediate_work_queue()->AssignSetIndex(priority);
}
DCHECK_EQ(priority, queue->GetQueuePriority());
}
TaskQueue::QueuePriority TaskQueueSelector::NextPriority(
TaskQueue::QueuePriority priority) {
DCHECK(priority < TaskQueue::kQueuePriorityCount);
return static_cast<TaskQueue::QueuePriority>(static_cast<int>(priority) + 1);
}
void TaskQueueSelector::AddQueueImpl(internal::TaskQueueImpl* queue,
TaskQueue::QueuePriority priority) {
#if DCHECK_IS_ON()
DCHECK(!CheckContainsQueueForTest(queue));
#endif
delayed_work_queue_sets_.AddQueue(queue->delayed_work_queue(), priority);
immediate_work_queue_sets_.AddQueue(queue->immediate_work_queue(), priority);
#if DCHECK_IS_ON()
DCHECK(CheckContainsQueueForTest(queue));
#endif
}
void TaskQueueSelector::ChangeSetIndex(internal::TaskQueueImpl* queue,
TaskQueue::QueuePriority priority) {
#if DCHECK_IS_ON()
DCHECK(CheckContainsQueueForTest(queue));
#endif
delayed_work_queue_sets_.ChangeSetIndex(queue->delayed_work_queue(),
priority);
immediate_work_queue_sets_.ChangeSetIndex(queue->immediate_work_queue(),
priority);
#if DCHECK_IS_ON()
DCHECK(CheckContainsQueueForTest(queue));
#endif
}
void TaskQueueSelector::RemoveQueueImpl(internal::TaskQueueImpl* queue) {
#if DCHECK_IS_ON()
DCHECK(CheckContainsQueueForTest(queue));
#endif
delayed_work_queue_sets_.RemoveQueue(queue->delayed_work_queue());
immediate_work_queue_sets_.RemoveQueue(queue->immediate_work_queue());
#if DCHECK_IS_ON()
DCHECK(!CheckContainsQueueForTest(queue));
#endif
}
void TaskQueueSelector::WorkQueueSetBecameEmpty(size_t set_index) {
non_empty_set_counts_[set_index]--;
DCHECK_GE(non_empty_set_counts_[set_index], 0);
// There are no delayed or immediate tasks for |set_index| so remove from
// |active_priority_tracker_|.
if (non_empty_set_counts_[set_index] == 0) {
active_priority_tracker_.SetActive(
static_cast<TaskQueue::QueuePriority>(set_index), false);
}
}
void TaskQueueSelector::WorkQueueSetBecameNonEmpty(size_t set_index) {
non_empty_set_counts_[set_index]++;
DCHECK_LE(non_empty_set_counts_[set_index], kMaxNonEmptySetCount);
// There is now a delayed or an immediate task for |set_index|, so add to
// |active_priority_tracker_|.
if (non_empty_set_counts_[set_index] == 1) {
TaskQueue::QueuePriority priority =
static_cast<TaskQueue::QueuePriority>(set_index);
active_priority_tracker_.SetActive(priority, true);
}
}
void TaskQueueSelector::CollectSkippedOverLowerPriorityTasks(
const internal::WorkQueue* selected_work_queue,
std::vector<const Task*>* result) const {
delayed_work_queue_sets_.CollectSkippedOverLowerPriorityTasks(
selected_work_queue, result);
immediate_work_queue_sets_.CollectSkippedOverLowerPriorityTasks(
selected_work_queue, result);
}
#if DCHECK_IS_ON() || !defined(NDEBUG)
bool TaskQueueSelector::CheckContainsQueueForTest(
const internal::TaskQueueImpl* queue) const {
bool contains_delayed_work_queue =
delayed_work_queue_sets_.ContainsWorkQueueForTest(
queue->delayed_work_queue());
bool contains_immediate_work_queue =
immediate_work_queue_sets_.ContainsWorkQueueForTest(
queue->immediate_work_queue());
DCHECK_EQ(contains_delayed_work_queue, contains_immediate_work_queue);
return contains_delayed_work_queue;
}
#endif
WorkQueue* TaskQueueSelector::SelectWorkQueueToService() {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
if (!active_priority_tracker_.HasActivePriority())
return nullptr;
// Select the priority from which we will select a task. Usually this is
// the highest priority for which we have work, unless we are starving a lower
// priority.
TaskQueue::QueuePriority priority =
active_priority_tracker_.HighestActivePriority();
WorkQueue* queue =
#if DCHECK_IS_ON()
random_task_selection_ ? ChooseWithPriority<SetOperationRandom>(priority)
:
#endif
ChooseWithPriority<SetOperationOldest>(priority);
// If we have selected a delayed task while having an immediate task of the
// same priority, increase the starvation count.
if (queue->queue_type() == WorkQueue::QueueType::kDelayed &&
!immediate_work_queue_sets_.IsSetEmpty(priority)) {
immediate_starvation_count_++;
} else {
immediate_starvation_count_ = 0;
}
return queue;
}
void TaskQueueSelector::AsValueInto(trace_event::TracedValue* state) const {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
state->SetInteger("immediate_starvation_count", immediate_starvation_count_);
}
void TaskQueueSelector::SetTaskQueueSelectorObserver(Observer* observer) {
task_queue_selector_observer_ = observer;
}
Optional<TaskQueue::QueuePriority>
TaskQueueSelector::GetHighestPendingPriority() const {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
if (!active_priority_tracker_.HasActivePriority())
return nullopt;
return active_priority_tracker_.HighestActivePriority();
}
void TaskQueueSelector::SetImmediateStarvationCountForTest(
size_t immediate_starvation_count) {
immediate_starvation_count_ = immediate_starvation_count;
}
bool TaskQueueSelector::HasTasksWithPriority(
TaskQueue::QueuePriority priority) {
return !delayed_work_queue_sets_.IsSetEmpty(priority) ||
!immediate_work_queue_sets_.IsSetEmpty(priority);
}
TaskQueueSelector::ActivePriorityTracker::ActivePriorityTracker() = default;
void TaskQueueSelector::ActivePriorityTracker::SetActive(
TaskQueue::QueuePriority priority,
bool is_active) {
DCHECK_LT(priority, TaskQueue::QueuePriority::kQueuePriorityCount);
DCHECK_NE(IsActive(priority), is_active);
if (is_active) {
active_priorities_ |= (1u << static_cast<size_t>(priority));
} else {
active_priorities_ &= ~(1u << static_cast<size_t>(priority));
}
}
TaskQueue::QueuePriority
TaskQueueSelector::ActivePriorityTracker::HighestActivePriority() const {
DCHECK_NE(active_priorities_, 0u)
<< "CountTrailingZeroBits(0) has undefined behavior";
return static_cast<TaskQueue::QueuePriority>(
bits::CountTrailingZeroBits(active_priorities_));
}
} // namespace internal
} // namespace sequence_manager
} // namespace base

View file

@ -0,0 +1,251 @@
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TASK_SEQUENCE_MANAGER_TASK_QUEUE_SELECTOR_H_
#define BASE_TASK_SEQUENCE_MANAGER_TASK_QUEUE_SELECTOR_H_
#include <stddef.h>
#include "base/base_export.h"
#include "base/macros.h"
#include "base/pending_task.h"
#include "base/task/sequence_manager/sequence_manager.h"
#include "base/task/sequence_manager/task_queue_selector_logic.h"
#include "base/task/sequence_manager/work_queue_sets.h"
namespace base {
namespace sequence_manager {
namespace internal {
class AssociatedThreadId;
// TaskQueueSelector is used by the SchedulerHelper to enable prioritization
// of particular task queues.
class BASE_EXPORT TaskQueueSelector : public WorkQueueSets::Observer {
public:
TaskQueueSelector(scoped_refptr<AssociatedThreadId> associated_thread,
const SequenceManager::Settings& settings);
~TaskQueueSelector() override;
// Called to register a queue that can be selected. This function is called
// on the main thread.
void AddQueue(internal::TaskQueueImpl* queue);
// The specified work will no longer be considered for selection. This
// function is called on the main thread.
void RemoveQueue(internal::TaskQueueImpl* queue);
// Make |queue| eligible for selection. This function is called on the main
// thread. Must only be called if |queue| is disabled.
void EnableQueue(internal::TaskQueueImpl* queue);
// Disable selection from |queue|. Must only be called if |queue| is enabled.
void DisableQueue(internal::TaskQueueImpl* queue);
// Called get or set the priority of |queue|.
void SetQueuePriority(internal::TaskQueueImpl* queue,
TaskQueue::QueuePriority priority);
// Called to choose the work queue from which the next task should be taken
// and run. Return the queue to service if there is one or null otherwise.
// This function is called on the main thread.
WorkQueue* SelectWorkQueueToService();
// Serialize the selector state for tracing.
void AsValueInto(trace_event::TracedValue* state) const;
class BASE_EXPORT Observer {
public:
virtual ~Observer() = default;
// Called when |queue| transitions from disabled to enabled.
virtual void OnTaskQueueEnabled(internal::TaskQueueImpl* queue) = 0;
};
// Called once to set the Observer. This function is called
// on the main thread. If |observer| is null, then no callbacks will occur.
void SetTaskQueueSelectorObserver(Observer* observer);
// Returns the priority of the most important pending task if one exists.
// O(1).
Optional<TaskQueue::QueuePriority> GetHighestPendingPriority() const;
// WorkQueueSets::Observer implementation:
void WorkQueueSetBecameEmpty(size_t set_index) override;
void WorkQueueSetBecameNonEmpty(size_t set_index) override;
// Populates |result| with tasks with lower priority than the first task from
// |selected_work_queue| which could otherwise run now.
void CollectSkippedOverLowerPriorityTasks(
const internal::WorkQueue* selected_work_queue,
std::vector<const Task*>* result) const;
protected:
WorkQueueSets* delayed_work_queue_sets() { return &delayed_work_queue_sets_; }
WorkQueueSets* immediate_work_queue_sets() {
return &immediate_work_queue_sets_;
}
// This method will force select an immediate task if those are being
// starved by delayed tasks.
void SetImmediateStarvationCountForTest(size_t immediate_starvation_count);
// Maximum number of delayed tasks tasks which can be run while there's a
// waiting non-delayed task.
static const size_t kMaxDelayedStarvationTasks = 3;
// Tracks which priorities are currently active, meaning there are pending
// runnable tasks with that priority. Because there are only a handful of
// priorities, and because we always run tasks in order from highest to lowest
// priority, we can use a single integer to represent enabled priorities,
// using a bit per priority.
class BASE_EXPORT ActivePriorityTracker {
public:
ActivePriorityTracker();
bool HasActivePriority() const { return active_priorities_ != 0; }
bool IsActive(TaskQueue::QueuePriority priority) const {
return active_priorities_ & (1u << static_cast<size_t>(priority));
}
void SetActive(TaskQueue::QueuePriority priority, bool is_active);
TaskQueue::QueuePriority HighestActivePriority() const;
private:
static_assert(TaskQueue::QueuePriority::kQueuePriorityCount <
sizeof(size_t) * 8,
"The number of priorities must be strictly less than the "
"number of bits of |active_priorities_|!");
size_t active_priorities_ = 0;
};
/*
* SetOperation is used to configure ChooseWithPriority() and must have:
*
* static WorkQueue* GetWithPriority(const WorkQueueSets& sets,
* TaskQueue::QueuePriority priority);
*
* static WorkQueue* GetWithPriorityAndEnqueueOrder(
* const WorkQueueSets& sets,
* TaskQueue::QueuePriority priority
* EnqueueOrder* enqueue_order);
*/
// The default
struct SetOperationOldest {
static WorkQueue* GetWithPriority(const WorkQueueSets& sets,
TaskQueue::QueuePriority priority) {
return sets.GetOldestQueueInSet(priority);
}
static WorkQueue* GetWithPriorityAndEnqueueOrder(
const WorkQueueSets& sets,
TaskQueue::QueuePriority priority,
EnqueueOrder* enqueue_order) {
return sets.GetOldestQueueAndEnqueueOrderInSet(priority, enqueue_order);
}
};
#if DCHECK_IS_ON()
struct SetOperationRandom {
static WorkQueue* GetWithPriority(const WorkQueueSets& sets,
TaskQueue::QueuePriority priority) {
return sets.GetRandomQueueInSet(priority);
}
static WorkQueue* GetWithPriorityAndEnqueueOrder(
const WorkQueueSets& sets,
TaskQueue::QueuePriority priority,
EnqueueOrder* enqueue_order) {
return sets.GetRandomQueueAndEnqueueOrderInSet(priority, enqueue_order);
}
};
#endif // DCHECK_IS_ON()
template <typename SetOperation>
WorkQueue* ChooseWithPriority(TaskQueue::QueuePriority priority) const {
// Select an immediate work queue if we are starving immediate tasks.
if (immediate_starvation_count_ >= kMaxDelayedStarvationTasks) {
WorkQueue* queue =
SetOperation::GetWithPriority(immediate_work_queue_sets_, priority);
if (queue)
return queue;
return SetOperation::GetWithPriority(delayed_work_queue_sets_, priority);
}
return ChooseImmediateOrDelayedTaskWithPriority<SetOperation>(priority);
}
private:
void ChangeSetIndex(internal::TaskQueueImpl* queue,
TaskQueue::QueuePriority priority);
void AddQueueImpl(internal::TaskQueueImpl* queue,
TaskQueue::QueuePriority priority);
void RemoveQueueImpl(internal::TaskQueueImpl* queue);
#if DCHECK_IS_ON() || !defined(NDEBUG)
bool CheckContainsQueueForTest(const internal::TaskQueueImpl* queue) const;
#endif
template <typename SetOperation>
WorkQueue* ChooseImmediateOrDelayedTaskWithPriority(
TaskQueue::QueuePriority priority) const {
EnqueueOrder immediate_enqueue_order;
WorkQueue* immediate_queue = SetOperation::GetWithPriorityAndEnqueueOrder(
immediate_work_queue_sets_, priority, &immediate_enqueue_order);
if (immediate_queue) {
EnqueueOrder delayed_enqueue_order;
WorkQueue* delayed_queue = SetOperation::GetWithPriorityAndEnqueueOrder(
delayed_work_queue_sets_, priority, &delayed_enqueue_order);
if (!delayed_queue)
return immediate_queue;
if (immediate_enqueue_order < delayed_enqueue_order) {
return immediate_queue;
} else {
return delayed_queue;
}
}
return SetOperation::GetWithPriority(delayed_work_queue_sets_, priority);
}
// Returns the priority which is next after |priority|.
static TaskQueue::QueuePriority NextPriority(
TaskQueue::QueuePriority priority);
// Returns true if there are pending tasks with priority |priority|.
bool HasTasksWithPriority(TaskQueue::QueuePriority priority);
scoped_refptr<AssociatedThreadId> associated_thread_;
#if DCHECK_IS_ON()
const bool random_task_selection_ = false;
#endif
// Count of the number of sets (delayed or immediate) for each priority.
// Should only contain 0, 1 or 2.
std::array<int, TaskQueue::kQueuePriorityCount> non_empty_set_counts_ = {{0}};
static constexpr const int kMaxNonEmptySetCount = 2;
// List of active priorities, which is used to work out which priority to run
// next.
ActivePriorityTracker active_priority_tracker_;
WorkQueueSets delayed_work_queue_sets_;
WorkQueueSets immediate_work_queue_sets_;
size_t immediate_starvation_count_ = 0;
Observer* task_queue_selector_observer_ = nullptr; // Not owned.
DISALLOW_COPY_AND_ASSIGN(TaskQueueSelector);
};
} // namespace internal
} // namespace sequence_manager
} // namespace base
#endif // BASE_TASK_SEQUENCE_MANAGER_TASK_QUEUE_SELECTOR_H_

View file

@ -0,0 +1,37 @@
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TASK_SEQUENCE_MANAGER_TASK_QUEUE_SELECTOR_LOGIC_H_
#define BASE_TASK_SEQUENCE_MANAGER_TASK_QUEUE_SELECTOR_LOGIC_H_
namespace base {
namespace sequence_manager {
namespace internal {
// Used to describe the logic trigerred when a task queue is selected to
// service.
// This enum is used for histograms and should not be renumbered.
enum class TaskQueueSelectorLogic {
// Selected due to priority rules.
kControlPriorityLogic = 0,
kHighestPriorityLogic = 1,
kHighPriorityLogic = 2,
kNormalPriorityLogic = 3,
kLowPriorityLogic = 4,
kBestEffortPriorityLogic = 5,
// Selected due to starvation logic.
kHighPriorityStarvationLogic = 6,
kNormalPriorityStarvationLogic = 7,
kLowPriorityStarvationLogic = 8,
kCount = 9,
};
} // namespace internal
} // namespace sequence_manager
} // namespace base
#endif // BASE_TASK_SEQUENCE_MANAGER_TASK_QUEUE_SELECTOR_LOGIC_H_

View file

@ -0,0 +1,32 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TASK_SEQUENCE_MANAGER_TASK_TIME_OBSERVER_H_
#define BASE_TASK_SEQUENCE_MANAGER_TASK_TIME_OBSERVER_H_
#include "base/time/time.h"
namespace base {
namespace sequence_manager {
// TaskTimeObserver provides an API for observing completion of tasks.
class TaskTimeObserver {
public:
TaskTimeObserver() = default;
virtual ~TaskTimeObserver() = default;
// To be called when task is about to start.
virtual void WillProcessTask(TimeTicks start_time) = 0;
// To be called when task is completed.
virtual void DidProcessTask(TimeTicks start_time, TimeTicks end_time) = 0;
private:
DISALLOW_COPY_AND_ASSIGN(TaskTimeObserver);
};
} // namespace sequence_manager
} // namespace base
#endif // BASE_TASK_SEQUENCE_MANAGER_TASK_TIME_OBSERVER_H_

View file

@ -0,0 +1,65 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/task/sequence_manager/tasks.h"
namespace base {
namespace sequence_manager {
Task::Task(internal::PostedTask posted_task,
TimeTicks delayed_run_time,
EnqueueOrder sequence_order,
EnqueueOrder enqueue_order,
internal::WakeUpResolution resolution)
: PendingTask(posted_task.location,
std::move(posted_task.callback),
delayed_run_time,
posted_task.nestable),
task_type(posted_task.task_type),
task_runner(std::move(posted_task.task_runner)),
enqueue_order_(enqueue_order) {
// We use |sequence_num| in DelayedWakeUp for ordering purposes and it
// may wrap around to a negative number during the static cast, hence,
// the relevant code is especially sensitive to a potential change of
// |PendingTask::sequence_num|'s type.
static_assert(std::is_same<decltype(sequence_num), int>::value, "");
sequence_num = static_cast<int>(sequence_order);
this->is_high_res = resolution == internal::WakeUpResolution::kHigh;
queue_time = posted_task.queue_time;
}
Task::Task(Task&& move_from) = default;
Task::~Task() = default;
Task& Task::operator=(Task&& other) = default;
namespace internal {
PostedTask::PostedTask(scoped_refptr<SequencedTaskRunner> task_runner,
OnceClosure callback,
Location location,
TimeDelta delay,
Nestable nestable,
TaskType task_type)
: callback(std::move(callback)),
location(location),
delay(delay),
nestable(nestable),
task_type(task_type),
task_runner(std::move(task_runner)) {}
PostedTask::PostedTask(PostedTask&& move_from) noexcept
: callback(std::move(move_from.callback)),
location(move_from.location),
delay(move_from.delay),
nestable(move_from.nestable),
task_type(move_from.task_type),
task_runner(std::move(move_from.task_runner)),
queue_time(move_from.queue_time) {}
PostedTask::~PostedTask() = default;
} // namespace internal
} // namespace sequence_manager
} // namespace base

View file

@ -0,0 +1,128 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TASK_SEQUENCE_MANAGER_TASKS_H_
#define BASE_TASK_SEQUENCE_MANAGER_TASKS_H_
#include "base/pending_task.h"
#include "base/sequenced_task_runner.h"
#include "base/task/sequence_manager/enqueue_order.h"
namespace base {
namespace sequence_manager {
using TaskType = uint8_t;
constexpr TaskType kTaskTypeNone = 0;
namespace internal {
enum class WakeUpResolution { kLow, kHigh };
// Wrapper around PostTask method arguments and the assigned task type.
// Eventually it becomes a PendingTask once accepted by a TaskQueueImpl.
struct BASE_EXPORT PostedTask {
explicit PostedTask(scoped_refptr<SequencedTaskRunner> task_runner,
OnceClosure callback = OnceClosure(),
Location location = Location(),
TimeDelta delay = TimeDelta(),
Nestable nestable = Nestable::kNestable,
TaskType task_type = kTaskTypeNone);
PostedTask(PostedTask&& move_from) noexcept;
~PostedTask();
OnceClosure callback;
Location location;
TimeDelta delay;
Nestable nestable;
TaskType task_type;
// The task runner this task is running on. Can be used by task runners that
// support posting back to the "current sequence".
scoped_refptr<SequencedTaskRunner> task_runner;
// The time at which the task was queued.
TimeTicks queue_time;
DISALLOW_COPY_AND_ASSIGN(PostedTask);
};
// Represents a time at which a task wants to run. Tasks scheduled for the
// same point in time will be ordered by their sequence numbers.
struct DelayedWakeUp {
TimeTicks time;
int sequence_num;
bool operator!=(const DelayedWakeUp& other) const {
return time != other.time || other.sequence_num != sequence_num;
}
bool operator==(const DelayedWakeUp& other) const {
return !(*this != other);
}
bool operator<=(const DelayedWakeUp& other) const {
if (time == other.time) {
// Debug gcc builds can compare an element against itself.
DCHECK(sequence_num != other.sequence_num || this == &other);
// |sequence_num| is int and might wrap around to a negative number when
// casted from EnqueueOrder. This way of comparison handles that properly.
return (sequence_num - other.sequence_num) <= 0;
}
return time < other.time;
}
};
} // namespace internal
// PendingTask with extra metadata for SequenceManager.
struct BASE_EXPORT Task : public PendingTask {
Task(internal::PostedTask posted_task,
TimeTicks delayed_run_time,
EnqueueOrder sequence_order,
EnqueueOrder enqueue_order = EnqueueOrder(),
internal::WakeUpResolution wake_up_resolution =
internal::WakeUpResolution::kLow);
Task(Task&& move_from);
~Task();
Task& operator=(Task&& other);
internal::DelayedWakeUp delayed_wake_up() const {
return internal::DelayedWakeUp{delayed_run_time, sequence_num};
}
// SequenceManager is particularly sensitive to enqueue order,
// so we have accessors for safety.
EnqueueOrder enqueue_order() const {
DCHECK(enqueue_order_);
return enqueue_order_;
}
void set_enqueue_order(EnqueueOrder enqueue_order) {
DCHECK(!enqueue_order_);
enqueue_order_ = enqueue_order;
}
bool enqueue_order_set() const { return enqueue_order_; }
TaskType task_type;
// The task runner this task is running on. Can be used by task runners that
// support posting back to the "current sequence".
scoped_refptr<SequencedTaskRunner> task_runner;
#if DCHECK_IS_ON()
bool cross_thread_;
#endif
private:
// Similar to |sequence_num|, but ultimately the |enqueue_order| is what
// the scheduler uses for task ordering. For immediate tasks |enqueue_order|
// is set when posted, but for delayed tasks it's not defined until they are
// enqueued. This is because otherwise delayed tasks could run before
// an immediate task posted after the delayed task.
EnqueueOrder enqueue_order_;
};
} // namespace sequence_manager
} // namespace base
#endif // BASE_TASK_SEQUENCE_MANAGER_TASKS_H_

View file

@ -0,0 +1,124 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TASK_SEQUENCE_MANAGER_THREAD_CONTROLLER_H_
#define BASE_TASK_SEQUENCE_MANAGER_THREAD_CONTROLLER_H_
#include "base/message_loop/message_pump.h"
#include "base/run_loop.h"
#include "base/single_thread_task_runner.h"
#include "base/task/sequence_manager/lazy_now.h"
#include "base/time/time.h"
#include "build/build_config.h"
namespace base {
class MessageLoopBase;
class TickClock;
struct PendingTask;
namespace sequence_manager {
namespace internal {
class AssociatedThreadId;
class SequencedTaskSource;
// Implementation of this interface is used by SequenceManager to schedule
// actual work to be run. Hopefully we can stop using MessageLoop and this
// interface will become more concise.
class ThreadController {
public:
virtual ~ThreadController() = default;
// Sets the number of tasks executed in a single invocation of DoWork.
// Increasing the batch size can reduce the overhead of yielding back to the
// main message loop.
virtual void SetWorkBatchSize(int work_batch_size = 1) = 0;
// Notifies that |pending_task| is about to be enqueued. Needed for tracing
// purposes. The impl may use this opportunity add metadata to |pending_task|
// before it is moved into the queue.
virtual void WillQueueTask(PendingTask* pending_task,
const char* task_queue_name) = 0;
// Notify the controller that its associated sequence has immediate work
// to run. Shortly after this is called, the thread associated with this
// controller will run a task returned by sequence->TakeTask(). Can be called
// from any sequence.
//
// TODO(altimin): Change this to "the thread associated with this
// controller will run tasks returned by sequence->TakeTask() until it
// returns null or sequence->DidRunTask() returns false" once the
// code is changed to work that way.
virtual void ScheduleWork() = 0;
// Notify the controller that SequencedTaskSource will have a delayed work
// ready to be run at |run_time|. This call cancels any previously
// scheduled delayed work. Can only be called from the main sequence.
// NOTE: DelayTillNextTask might return a different value as it also takes
// immediate work into account.
// TODO(kraynov): Remove |lazy_now| parameter.
virtual void SetNextDelayedDoWork(LazyNow* lazy_now, TimeTicks run_time) = 0;
// Sets the sequenced task source from which to take tasks after
// a Schedule*Work() call is made.
// Must be called before the first call to Schedule*Work().
virtual void SetSequencedTaskSource(SequencedTaskSource*) = 0;
// Requests desired timer precision from the OS.
// Has no effect on some platforms.
virtual void SetTimerSlack(TimerSlack timer_slack) = 0;
// Completes delayed initialization of unbound ThreadControllers.
// BindToCurrentThread(MessageLoopBase*) or BindToCurrentThread(MessagePump*)
// may only be called once.
virtual void BindToCurrentThread(
std::unique_ptr<MessagePump> message_pump) = 0;
// Explicitly allow or disallow task execution. Implicitly disallowed when
// entering a nested runloop.
virtual void SetTaskExecutionAllowed(bool allowed) = 0;
// Whether task execution is allowed or not.
virtual bool IsTaskExecutionAllowed() const = 0;
// Returns the MessagePump we're bound to if any.
virtual MessagePump* GetBoundMessagePump() const = 0;
// Returns true if the current run loop should quit when idle.
virtual bool ShouldQuitRunLoopWhenIdle() = 0;
#if defined(OS_IOS) || defined(OS_ANDROID)
// On iOS, the main message loop cannot be Run(). Instead call
// AttachToMessagePump(), which connects this ThreadController to the
// UI thread's CFRunLoop and allows PostTask() to work.
virtual void AttachToMessagePump() = 0;
#endif
#if defined(OS_IOS)
// Detaches this ThreadController from the message pump, allowing the
// controller to be shut down cleanly.
virtual void DetachFromMessagePump() = 0;
#endif
// TODO(altimin): Get rid of the methods below.
// These methods exist due to current integration of SequenceManager
// with MessageLoop.
virtual bool RunsTasksInCurrentSequence() = 0;
virtual const TickClock* GetClock() = 0;
virtual void SetDefaultTaskRunner(scoped_refptr<SingleThreadTaskRunner>) = 0;
virtual scoped_refptr<SingleThreadTaskRunner> GetDefaultTaskRunner() = 0;
virtual void RestoreDefaultTaskRunner() = 0;
virtual void AddNestingObserver(RunLoop::NestingObserver* observer) = 0;
virtual void RemoveNestingObserver(RunLoop::NestingObserver* observer) = 0;
virtual const scoped_refptr<AssociatedThreadId>& GetAssociatedThread()
const = 0;
};
} // namespace internal
} // namespace sequence_manager
} // namespace base
#endif // BASE_TASK_SEQUENCE_MANAGER_THREAD_CONTROLLER_H_

View file

@ -0,0 +1,327 @@
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/task/sequence_manager/thread_controller_impl.h"
#include <algorithm>
#include "base/bind.h"
#include "base/memory/ptr_util.h"
#include "base/message_loop/message_pump.h"
#include "base/run_loop.h"
#include "base/task/sequence_manager/lazy_now.h"
#include "base/task/sequence_manager/sequence_manager_impl.h"
#include "base/task/sequence_manager/sequenced_task_source.h"
#include "base/trace_event/trace_event.h"
namespace base {
namespace sequence_manager {
namespace internal {
using ShouldScheduleWork = WorkDeduplicator::ShouldScheduleWork;
ThreadControllerImpl::ThreadControllerImpl(
SequenceManagerImpl* funneled_sequence_manager,
scoped_refptr<SingleThreadTaskRunner> task_runner,
const TickClock* time_source)
: funneled_sequence_manager_(funneled_sequence_manager),
task_runner_(task_runner),
associated_thread_(AssociatedThreadId::CreateUnbound()),
message_loop_task_runner_(funneled_sequence_manager
? funneled_sequence_manager->GetTaskRunner()
: nullptr),
time_source_(time_source),
work_deduplicator_(associated_thread_) {
if (task_runner_ || funneled_sequence_manager_)
work_deduplicator_.BindToCurrentThread();
immediate_do_work_closure_ =
BindRepeating(&ThreadControllerImpl::DoWork, weak_factory_.GetWeakPtr(),
WorkType::kImmediate);
delayed_do_work_closure_ =
BindRepeating(&ThreadControllerImpl::DoWork, weak_factory_.GetWeakPtr(),
WorkType::kDelayed);
}
ThreadControllerImpl::~ThreadControllerImpl() = default;
ThreadControllerImpl::MainSequenceOnly::MainSequenceOnly() = default;
ThreadControllerImpl::MainSequenceOnly::~MainSequenceOnly() = default;
std::unique_ptr<ThreadControllerImpl> ThreadControllerImpl::Create(
SequenceManagerImpl* funneled_sequence_manager,
const TickClock* time_source) {
return WrapUnique(new ThreadControllerImpl(
funneled_sequence_manager,
funneled_sequence_manager ? funneled_sequence_manager->GetTaskRunner()
: nullptr,
time_source));
}
void ThreadControllerImpl::SetSequencedTaskSource(
SequencedTaskSource* sequence) {
DCHECK_CALLED_ON_VALID_SEQUENCE(associated_thread_->sequence_checker);
DCHECK(sequence);
DCHECK(!sequence_);
sequence_ = sequence;
}
void ThreadControllerImpl::SetTimerSlack(TimerSlack timer_slack) {
if (!funneled_sequence_manager_)
return;
funneled_sequence_manager_->SetTimerSlack(timer_slack);
}
void ThreadControllerImpl::ScheduleWork() {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
"ThreadControllerImpl::ScheduleWork::PostTask");
if (work_deduplicator_.OnWorkRequested() ==
ShouldScheduleWork::kScheduleImmediate)
task_runner_->PostTask(FROM_HERE, immediate_do_work_closure_);
}
void ThreadControllerImpl::SetNextDelayedDoWork(LazyNow* lazy_now,
TimeTicks run_time) {
DCHECK_CALLED_ON_VALID_SEQUENCE(associated_thread_->sequence_checker);
DCHECK(sequence_);
if (main_sequence_only().next_delayed_do_work == run_time)
return;
// Cancel DoWork if it was scheduled and we set an "infinite" delay now.
if (run_time == TimeTicks::Max()) {
cancelable_delayed_do_work_closure_.Cancel();
main_sequence_only().next_delayed_do_work = TimeTicks::Max();
return;
}
if (work_deduplicator_.OnDelayedWorkRequested() ==
ShouldScheduleWork::kNotNeeded) {
return;
}
base::TimeDelta delay = std::max(TimeDelta(), run_time - lazy_now->Now());
TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
"ThreadControllerImpl::SetNextDelayedDoWork::PostDelayedTask",
"delay_ms", delay.InMillisecondsF());
main_sequence_only().next_delayed_do_work = run_time;
// Reset also causes cancellation of the previous DoWork task.
cancelable_delayed_do_work_closure_.Reset(delayed_do_work_closure_);
task_runner_->PostDelayedTask(
FROM_HERE, cancelable_delayed_do_work_closure_.callback(), delay);
}
bool ThreadControllerImpl::RunsTasksInCurrentSequence() {
return task_runner_->RunsTasksInCurrentSequence();
}
const TickClock* ThreadControllerImpl::GetClock() {
return time_source_;
}
void ThreadControllerImpl::SetDefaultTaskRunner(
scoped_refptr<SingleThreadTaskRunner> task_runner) {
#if DCHECK_IS_ON()
default_task_runner_set_ = true;
#endif
if (!funneled_sequence_manager_)
return;
funneled_sequence_manager_->SetTaskRunner(task_runner);
}
scoped_refptr<SingleThreadTaskRunner>
ThreadControllerImpl::GetDefaultTaskRunner() {
return funneled_sequence_manager_->GetTaskRunner();
}
void ThreadControllerImpl::RestoreDefaultTaskRunner() {
if (!funneled_sequence_manager_)
return;
funneled_sequence_manager_->SetTaskRunner(message_loop_task_runner_);
}
void ThreadControllerImpl::BindToCurrentThread(
std::unique_ptr<MessagePump> message_pump) {
NOTREACHED();
}
void ThreadControllerImpl::WillQueueTask(PendingTask* pending_task,
const char* task_queue_name) {
task_annotator_.WillQueueTask("SequenceManager PostTask", pending_task,
task_queue_name);
}
void ThreadControllerImpl::DoWork(WorkType work_type) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
"ThreadControllerImpl::DoWork");
DCHECK_CALLED_ON_VALID_SEQUENCE(associated_thread_->sequence_checker);
DCHECK(sequence_);
work_deduplicator_.OnWorkStarted();
WeakPtr<ThreadControllerImpl> weak_ptr = weak_factory_.GetWeakPtr();
// TODO(scheduler-dev): Consider moving to a time based work batch instead.
for (int i = 0; i < main_sequence_only().work_batch_size_; i++) {
Task* task = sequence_->SelectNextTask();
if (!task)
break;
// Trace-parsing tools (DevTools, Lighthouse, etc) consume this event
// to determine long tasks.
// The event scope must span across DidRunTask call below to make sure
// it covers RunMicrotasks event.
// See https://crbug.com/681863 and https://crbug.com/874982
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("devtools.timeline"), "RunTask");
{
// Trace events should finish before we call DidRunTask to ensure that
// SequenceManager trace events do not interfere with them.
TRACE_TASK_EXECUTION("ThreadControllerImpl::RunTask", *task);
task_annotator_.RunTask("SequenceManager RunTask", task);
}
if (!weak_ptr)
return;
sequence_->DidRunTask();
// NOTE: https://crbug.com/828835.
// When we're running inside a nested RunLoop it may quit anytime, so any
// outstanding pending tasks must run in the outer RunLoop
// (see SequenceManagerTestWithMessageLoop.QuitWhileNested test).
// Unfortunately, it's MessageLoop who's receving that signal and we can't
// know it before we return from DoWork, hence, OnExitNestedRunLoop
// will be called later. Since we must implement ThreadController and
// SequenceManager in conformance with MessageLoop task runners, we need
// to disable this batching optimization while nested.
// Implementing MessagePump::Delegate ourselves will help to resolve this
// issue.
if (main_sequence_only().nesting_depth > 0)
break;
}
work_deduplicator_.WillCheckForMoreWork();
LazyNow lazy_now(time_source_);
TimeDelta delay_till_next_task = sequence_->DelayTillNextTask(&lazy_now);
// The OnSystemIdle callback allows the TimeDomains to advance virtual time
// in which case we now have immediate word to do.
if (delay_till_next_task <= TimeDelta() || sequence_->OnSystemIdle()) {
// The next task needs to run immediately, post a continuation if
// another thread didn't get there first.
if (work_deduplicator_.DidCheckForMoreWork(
WorkDeduplicator::NextTask::kIsImmediate) ==
ShouldScheduleWork::kScheduleImmediate) {
task_runner_->PostTask(FROM_HERE, immediate_do_work_closure_);
}
return;
}
// It looks like we have a non-zero delay, however another thread may have
// posted an immediate task while we computed the delay.
if (work_deduplicator_.DidCheckForMoreWork(
WorkDeduplicator::NextTask::kIsDelayed) ==
ShouldScheduleWork::kScheduleImmediate) {
task_runner_->PostTask(FROM_HERE, immediate_do_work_closure_);
return;
}
// Check if there's no future work.
if (delay_till_next_task == TimeDelta::Max()) {
main_sequence_only().next_delayed_do_work = TimeTicks::Max();
cancelable_delayed_do_work_closure_.Cancel();
return;
}
// Check if we've already requested the required delay.
TimeTicks next_task_at = lazy_now.Now() + delay_till_next_task;
if (next_task_at == main_sequence_only().next_delayed_do_work)
return;
// Schedule a callback after |delay_till_next_task| and cancel any previous
// callback.
main_sequence_only().next_delayed_do_work = next_task_at;
cancelable_delayed_do_work_closure_.Reset(delayed_do_work_closure_);
task_runner_->PostDelayedTask(FROM_HERE,
cancelable_delayed_do_work_closure_.callback(),
delay_till_next_task);
}
void ThreadControllerImpl::AddNestingObserver(
RunLoop::NestingObserver* observer) {
DCHECK_CALLED_ON_VALID_SEQUENCE(associated_thread_->sequence_checker);
nesting_observer_ = observer;
RunLoop::AddNestingObserverOnCurrentThread(this);
}
void ThreadControllerImpl::RemoveNestingObserver(
RunLoop::NestingObserver* observer) {
DCHECK_CALLED_ON_VALID_SEQUENCE(associated_thread_->sequence_checker);
DCHECK_EQ(observer, nesting_observer_);
nesting_observer_ = nullptr;
RunLoop::RemoveNestingObserverOnCurrentThread(this);
}
const scoped_refptr<AssociatedThreadId>&
ThreadControllerImpl::GetAssociatedThread() const {
return associated_thread_;
}
void ThreadControllerImpl::OnBeginNestedRunLoop() {
main_sequence_only().nesting_depth++;
// Just assume we have a pending task and post a DoWork to make sure we don't
// grind to a halt while nested.
work_deduplicator_.OnWorkRequested(); // Set the pending DoWork flag.
task_runner_->PostTask(FROM_HERE, immediate_do_work_closure_);
if (nesting_observer_)
nesting_observer_->OnBeginNestedRunLoop();
}
void ThreadControllerImpl::OnExitNestedRunLoop() {
main_sequence_only().nesting_depth--;
if (nesting_observer_)
nesting_observer_->OnExitNestedRunLoop();
}
void ThreadControllerImpl::SetWorkBatchSize(int work_batch_size) {
main_sequence_only().work_batch_size_ = work_batch_size;
}
void ThreadControllerImpl::SetTaskExecutionAllowed(bool allowed) {
NOTREACHED();
}
bool ThreadControllerImpl::IsTaskExecutionAllowed() const {
return true;
}
bool ThreadControllerImpl::ShouldQuitRunLoopWhenIdle() {
// The MessageLoop does not expose the API needed to support this query.
return false;
}
MessagePump* ThreadControllerImpl::GetBoundMessagePump() const {
return nullptr;
}
#if defined(OS_IOS) || defined(OS_ANDROID)
void ThreadControllerImpl::AttachToMessagePump() {
NOTREACHED();
}
#endif // OS_IOS || OS_ANDROID
#if defined(OS_IOS)
void ThreadControllerImpl::DetachFromMessagePump() {
NOTREACHED();
}
#endif // OS_IOS
} // namespace internal
} // namespace sequence_manager
} // namespace base

View file

@ -0,0 +1,138 @@
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TASK_SEQUENCE_MANAGER_THREAD_CONTROLLER_IMPL_H_
#define BASE_TASK_SEQUENCE_MANAGER_THREAD_CONTROLLER_IMPL_H_
#include <memory>
#include "base/cancelable_callback.h"
#include "base/macros.h"
#include "base/memory/weak_ptr.h"
#include "base/run_loop.h"
#include "base/sequence_checker.h"
#include "base/single_thread_task_runner.h"
#include "base/task/common/task_annotator.h"
#include "base/task/sequence_manager/associated_thread_id.h"
#include "base/task/sequence_manager/thread_controller.h"
#include "base/task/sequence_manager/work_deduplicator.h"
#include "build/build_config.h"
namespace base {
namespace sequence_manager {
namespace internal {
class SequenceManagerImpl;
// This is the interface between a SequenceManager which sits on top of an
// underlying SequenceManagerImpl or SingleThreadTaskRunner. Currently it's only
// used for workers in blink although we'd intend to migrate those to
// ThreadControllerWithMessagePumpImpl (https://crbug.com/948051). Long term we
// intend to use this for sequence funneling.
class BASE_EXPORT ThreadControllerImpl : public ThreadController,
public RunLoop::NestingObserver {
public:
~ThreadControllerImpl() override;
// TODO(https://crbug.com/948051): replace |funneled_sequence_manager| with
// |funneled_task_runner| when we sort out the workers
static std::unique_ptr<ThreadControllerImpl> Create(
SequenceManagerImpl* funneled_sequence_manager,
const TickClock* time_source);
// ThreadController:
void SetWorkBatchSize(int work_batch_size) override;
void WillQueueTask(PendingTask* pending_task,
const char* task_queue_name) override;
void ScheduleWork() override;
void BindToCurrentThread(std::unique_ptr<MessagePump> message_pump) override;
void SetNextDelayedDoWork(LazyNow* lazy_now, TimeTicks run_time) override;
void SetSequencedTaskSource(SequencedTaskSource* sequence) override;
void SetTimerSlack(TimerSlack timer_slack) override;
bool RunsTasksInCurrentSequence() override;
const TickClock* GetClock() override;
void SetDefaultTaskRunner(scoped_refptr<SingleThreadTaskRunner>) override;
scoped_refptr<SingleThreadTaskRunner> GetDefaultTaskRunner() override;
void RestoreDefaultTaskRunner() override;
void AddNestingObserver(RunLoop::NestingObserver* observer) override;
void RemoveNestingObserver(RunLoop::NestingObserver* observer) override;
const scoped_refptr<AssociatedThreadId>& GetAssociatedThread() const override;
void SetTaskExecutionAllowed(bool allowed) override;
bool IsTaskExecutionAllowed() const override;
MessagePump* GetBoundMessagePump() const override;
#if defined(OS_IOS) || defined(OS_ANDROID)
void AttachToMessagePump() override;
#endif
#if defined(OS_IOS)
void DetachFromMessagePump() override;
#endif
bool ShouldQuitRunLoopWhenIdle() override;
// RunLoop::NestingObserver:
void OnBeginNestedRunLoop() override;
void OnExitNestedRunLoop() override;
protected:
ThreadControllerImpl(SequenceManagerImpl* sequence_manager,
scoped_refptr<SingleThreadTaskRunner> task_runner,
const TickClock* time_source);
// TODO(altimin): Make these const. Blocked on removing
// lazy initialisation support.
SequenceManagerImpl* funneled_sequence_manager_;
scoped_refptr<SingleThreadTaskRunner> task_runner_;
RunLoop::NestingObserver* nesting_observer_ = nullptr;
private:
enum class WorkType { kImmediate, kDelayed };
void DoWork(WorkType work_type);
// TODO(scheduler-dev): Maybe fold this into the main class and use
// thread annotations.
struct MainSequenceOnly {
MainSequenceOnly();
~MainSequenceOnly();
int nesting_depth = 0;
int work_batch_size_ = 1;
TimeTicks next_delayed_do_work = TimeTicks::Max();
};
scoped_refptr<AssociatedThreadId> associated_thread_;
MainSequenceOnly main_sequence_only_;
MainSequenceOnly& main_sequence_only() {
DCHECK_CALLED_ON_VALID_SEQUENCE(associated_thread_->sequence_checker);
return main_sequence_only_;
}
const MainSequenceOnly& main_sequence_only() const {
DCHECK_CALLED_ON_VALID_SEQUENCE(associated_thread_->sequence_checker);
return main_sequence_only_;
}
scoped_refptr<SingleThreadTaskRunner> message_loop_task_runner_;
const TickClock* time_source_;
RepeatingClosure immediate_do_work_closure_;
RepeatingClosure delayed_do_work_closure_;
CancelableClosure cancelable_delayed_do_work_closure_;
SequencedTaskSource* sequence_ = nullptr; // Not owned.
TaskAnnotator task_annotator_;
WorkDeduplicator work_deduplicator_;
#if DCHECK_IS_ON()
bool default_task_runner_set_ = false;
#endif
WeakPtrFactory<ThreadControllerImpl> weak_factory_{this};
DISALLOW_COPY_AND_ASSIGN(ThreadControllerImpl);
};
} // namespace internal
} // namespace sequence_manager
} // namespace base
#endif // BASE_TASK_SEQUENCE_MANAGER_THREAD_CONTROLLER_IMPL_H_

View file

@ -0,0 +1,522 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/task/sequence_manager/thread_controller_with_message_pump_impl.h"
#include "base/auto_reset.h"
#include "base/memory/ptr_util.h"
#include "base/message_loop/message_pump.h"
#include "base/threading/hang_watcher.h"
#include "base/time/tick_clock.h"
#include "base/trace_event/trace_event.h"
#include "build/build_config.h"
#if defined(OS_IOS)
#include "base/message_loop/message_pump_mac.h"
#elif defined(OS_ANDROID)
#include "base/message_loop/message_pump_android.h"
#endif
namespace base {
namespace sequence_manager {
namespace internal {
namespace {
// Returns |next_run_time| capped at 1 day from |lazy_now|. This is used to
// mitigate https://crbug.com/850450 where some platforms are unhappy with
// delays > 100,000,000 seconds. In practice, a diagnosis metric showed that no
// sleep > 1 hour ever completes (always interrupted by an earlier MessageLoop
// event) and 99% of completed sleeps are the ones scheduled for <= 1 second.
// Details @ https://crrev.com/c/1142589.
TimeTicks CapAtOneDay(TimeTicks next_run_time, LazyNow* lazy_now) {
return std::min(next_run_time, lazy_now->Now() + TimeDelta::FromDays(1));
}
} // namespace
ThreadControllerWithMessagePumpImpl::ThreadControllerWithMessagePumpImpl(
const SequenceManager::Settings& settings)
: associated_thread_(AssociatedThreadId::CreateUnbound()),
work_deduplicator_(associated_thread_),
#if DCHECK_IS_ON()
log_runloop_quit_and_quit_when_idle_(
settings.log_runloop_quit_and_quit_when_idle),
#endif
time_source_(settings.clock) {
}
ThreadControllerWithMessagePumpImpl::ThreadControllerWithMessagePumpImpl(
std::unique_ptr<MessagePump> message_pump,
const SequenceManager::Settings& settings)
: ThreadControllerWithMessagePumpImpl(settings) {
BindToCurrentThread(std::move(message_pump));
}
ThreadControllerWithMessagePumpImpl::~ThreadControllerWithMessagePumpImpl() {
// Destructors of MessagePump::Delegate and ThreadTaskRunnerHandle
// will do all the clean-up.
// ScopedSetSequenceLocalStorageMapForCurrentThread destructor will
// de-register the current thread as a sequence.
}
// static
std::unique_ptr<ThreadControllerWithMessagePumpImpl>
ThreadControllerWithMessagePumpImpl::CreateUnbound(
const SequenceManager::Settings& settings) {
return base::WrapUnique(new ThreadControllerWithMessagePumpImpl(settings));
}
ThreadControllerWithMessagePumpImpl::MainThreadOnly::MainThreadOnly() = default;
ThreadControllerWithMessagePumpImpl::MainThreadOnly::~MainThreadOnly() =
default;
void ThreadControllerWithMessagePumpImpl::SetSequencedTaskSource(
SequencedTaskSource* task_source) {
DCHECK(task_source);
DCHECK(!main_thread_only().task_source);
main_thread_only().task_source = task_source;
}
void ThreadControllerWithMessagePumpImpl::BindToCurrentThread(
std::unique_ptr<MessagePump> message_pump) {
associated_thread_->BindToCurrentThread();
pump_ = std::move(message_pump);
work_id_provider_ = WorkIdProvider::GetForCurrentThread();
RunLoop::RegisterDelegateForCurrentThread(this);
scoped_set_sequence_local_storage_map_for_current_thread_ = std::make_unique<
base::internal::ScopedSetSequenceLocalStorageMapForCurrentThread>(
&sequence_local_storage_map_);
{
base::internal::CheckedAutoLock task_runner_lock(task_runner_lock_);
if (task_runner_)
InitializeThreadTaskRunnerHandle();
}
if (work_deduplicator_.BindToCurrentThread() ==
ShouldScheduleWork::kScheduleImmediate) {
pump_->ScheduleWork();
}
}
void ThreadControllerWithMessagePumpImpl::SetWorkBatchSize(
int work_batch_size) {
DCHECK_GE(work_batch_size, 1);
main_thread_only().work_batch_size = work_batch_size;
}
void ThreadControllerWithMessagePumpImpl::SetTimerSlack(
TimerSlack timer_slack) {
DCHECK(RunsTasksInCurrentSequence());
pump_->SetTimerSlack(timer_slack);
}
void ThreadControllerWithMessagePumpImpl::WillQueueTask(
PendingTask* pending_task,
const char* task_queue_name) {
task_annotator_.WillQueueTask("SequenceManager PostTask", pending_task,
task_queue_name);
}
void ThreadControllerWithMessagePumpImpl::ScheduleWork() {
base::internal::CheckedLock::AssertNoLockHeldOnCurrentThread();
if (work_deduplicator_.OnWorkRequested() ==
ShouldScheduleWork::kScheduleImmediate) {
pump_->ScheduleWork();
}
}
void ThreadControllerWithMessagePumpImpl::SetNextDelayedDoWork(
LazyNow* lazy_now,
TimeTicks run_time) {
DCHECK_LT(lazy_now->Now(), run_time);
if (main_thread_only().next_delayed_do_work == run_time)
return;
// Cap at one day but remember the exact time for the above equality check on
// the next round.
main_thread_only().next_delayed_do_work = run_time;
run_time = CapAtOneDay(run_time, lazy_now);
// It's very rare for PostDelayedTask to be called outside of a Do(Some)Work
// in production, so most of the time this does nothing.
if (work_deduplicator_.OnDelayedWorkRequested() ==
ShouldScheduleWork::kScheduleImmediate) {
// |pump_| can't be null as all postTasks are cross-thread before binding,
// and delayed cross-thread postTasks do the thread hop through an immediate
// task.
pump_->ScheduleDelayedWork(run_time);
}
}
const TickClock* ThreadControllerWithMessagePumpImpl::GetClock() {
return time_source_;
}
bool ThreadControllerWithMessagePumpImpl::RunsTasksInCurrentSequence() {
return associated_thread_->IsBoundToCurrentThread();
}
void ThreadControllerWithMessagePumpImpl::SetDefaultTaskRunner(
scoped_refptr<SingleThreadTaskRunner> task_runner) {
base::internal::CheckedAutoLock lock(task_runner_lock_);
task_runner_ = task_runner;
if (associated_thread_->IsBound()) {
DCHECK(associated_thread_->IsBoundToCurrentThread());
// Thread task runner handle will be created in BindToCurrentThread().
InitializeThreadTaskRunnerHandle();
}
}
void ThreadControllerWithMessagePumpImpl::InitializeThreadTaskRunnerHandle() {
// Only one ThreadTaskRunnerHandle can exist at any time,
// so reset the old one.
main_thread_only().thread_task_runner_handle.reset();
main_thread_only().thread_task_runner_handle =
std::make_unique<ThreadTaskRunnerHandle>(task_runner_);
}
scoped_refptr<SingleThreadTaskRunner>
ThreadControllerWithMessagePumpImpl::GetDefaultTaskRunner() {
base::internal::CheckedAutoLock lock(task_runner_lock_);
return task_runner_;
}
void ThreadControllerWithMessagePumpImpl::RestoreDefaultTaskRunner() {
// There's no default task runner unlike with the MessageLoop.
main_thread_only().thread_task_runner_handle.reset();
}
void ThreadControllerWithMessagePumpImpl::AddNestingObserver(
RunLoop::NestingObserver* observer) {
DCHECK(!main_thread_only().nesting_observer);
DCHECK(observer);
main_thread_only().nesting_observer = observer;
RunLoop::AddNestingObserverOnCurrentThread(this);
}
void ThreadControllerWithMessagePumpImpl::RemoveNestingObserver(
RunLoop::NestingObserver* observer) {
DCHECK_EQ(main_thread_only().nesting_observer, observer);
main_thread_only().nesting_observer = nullptr;
RunLoop::RemoveNestingObserverOnCurrentThread(this);
}
const scoped_refptr<AssociatedThreadId>&
ThreadControllerWithMessagePumpImpl::GetAssociatedThread() const {
return associated_thread_;
}
void ThreadControllerWithMessagePumpImpl::BeforeDoInternalWork() {
// Nested runloops are covered by the parent loop hang watch scope.
// TODO(crbug/1034046): Provide more granular scoping that reuses the parent
// scope deadline.
if (main_thread_only().runloop_count == 1) {
hang_watch_scope_.emplace(base::HangWatchScope::kDefaultHangWatchTime);
}
work_id_provider_->IncrementWorkId();
}
void ThreadControllerWithMessagePumpImpl::BeforeWait() {
// Nested runloops are covered by the parent loop hang watch scope.
// TODO(crbug/1034046): Provide more granular scoping that reuses the parent
// scope deadline.
if (main_thread_only().runloop_count == 1) {
// Waiting for work cannot be covered by a hang watch scope because that
// means the thread can be idle for unbounded time.
hang_watch_scope_.reset();
}
work_id_provider_->IncrementWorkId();
}
MessagePump::Delegate::NextWorkInfo
ThreadControllerWithMessagePumpImpl::DoSomeWork() {
// Nested runloops are covered by the parent loop hang watch scope.
// TODO(crbug/1034046): Provide more granular scoping that reuses the parent
// scope deadline.
if (main_thread_only().runloop_count == 1) {
hang_watch_scope_.emplace(base::HangWatchScope::kDefaultHangWatchTime);
}
work_deduplicator_.OnWorkStarted();
bool ran_task = false; // Unused.
LazyNow continuation_lazy_now(time_source_);
TimeDelta delay_till_next_task =
DoWorkImpl(&continuation_lazy_now, &ran_task);
// Schedule a continuation.
WorkDeduplicator::NextTask next_task =
delay_till_next_task.is_zero() ? WorkDeduplicator::NextTask::kIsImmediate
: WorkDeduplicator::NextTask::kIsDelayed;
if (work_deduplicator_.DidCheckForMoreWork(next_task) ==
ShouldScheduleWork::kScheduleImmediate) {
// Need to run new work immediately, but due to the contract of DoSomeWork
// we only need to return a null TimeTicks to ensure that happens.
return MessagePump::Delegate::NextWorkInfo();
}
// While the math below would saturate when |delay_till_next_task.is_max()|;
// special-casing here avoids unnecessarily sampling Now() when out of work.
if (delay_till_next_task.is_max()) {
main_thread_only().next_delayed_do_work = TimeTicks::Max();
return {TimeTicks::Max()};
}
// The MessagePump will schedule the delay on our behalf, so we need to update
// |main_thread_only().next_delayed_do_work|.
// TODO(gab, alexclarke): Replace DelayTillNextTask() with NextTaskTime() to
// avoid converting back-and-forth between TimeTicks and TimeDelta.
main_thread_only().next_delayed_do_work =
continuation_lazy_now.Now() + delay_till_next_task;
// Don't request a run time past |main_thread_only().quit_runloop_after|.
if (main_thread_only().next_delayed_do_work >
main_thread_only().quit_runloop_after) {
main_thread_only().next_delayed_do_work =
main_thread_only().quit_runloop_after;
// If we've passed |quit_runloop_after| there's no more work to do.
if (continuation_lazy_now.Now() >= main_thread_only().quit_runloop_after)
return {TimeTicks::Max()};
}
return {CapAtOneDay(main_thread_only().next_delayed_do_work,
&continuation_lazy_now),
continuation_lazy_now.Now()};
}
TimeDelta ThreadControllerWithMessagePumpImpl::DoWorkImpl(
LazyNow* continuation_lazy_now,
bool* ran_task) {
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("sequence_manager"),
"ThreadControllerImpl::DoWork");
if (!main_thread_only().task_execution_allowed) {
if (main_thread_only().quit_runloop_after == TimeTicks::Max())
return TimeDelta::Max();
return main_thread_only().quit_runloop_after - continuation_lazy_now->Now();
}
DCHECK(main_thread_only().task_source);
for (int i = 0; i < main_thread_only().work_batch_size; i++) {
Task* task = main_thread_only().task_source->SelectNextTask();
if (!task)
break;
// Execute the task and assume the worst: it is probably not reentrant.
main_thread_only().task_execution_allowed = false;
work_id_provider_->IncrementWorkId();
// Trace-parsing tools (DevTools, Lighthouse, etc) consume this event
// to determine long tasks.
// The event scope must span across DidRunTask call below to make sure
// it covers RunMicrotasks event.
// See https://crbug.com/681863 and https://crbug.com/874982
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("devtools.timeline"), "RunTask");
{
// Trace events should finish before we call DidRunTask to ensure that
// SequenceManager trace events do not interfere with them.
TRACE_TASK_EXECUTION("ThreadControllerImpl::RunTask", *task);
task_annotator_.RunTask("SequenceManager RunTask", task);
}
#if DCHECK_IS_ON()
if (log_runloop_quit_and_quit_when_idle_ && !quit_when_idle_requested_ &&
ShouldQuitWhenIdle()) {
DVLOG(1) << "ThreadControllerWithMessagePumpImpl::QuitWhenIdle";
quit_when_idle_requested_ = true;
}
#endif
*ran_task = true;
main_thread_only().task_execution_allowed = true;
main_thread_only().task_source->DidRunTask();
// When Quit() is called we must stop running the batch because the caller
// expects per-task granularity.
if (main_thread_only().quit_pending)
break;
}
if (main_thread_only().quit_pending)
return TimeDelta::Max();
work_deduplicator_.WillCheckForMoreWork();
TimeDelta do_work_delay =
main_thread_only().task_source->DelayTillNextTask(continuation_lazy_now);
DCHECK_GE(do_work_delay, TimeDelta());
return do_work_delay;
}
bool ThreadControllerWithMessagePumpImpl::DoIdleWork() {
TRACE_EVENT0("sequence_manager", "SequenceManager::DoIdleWork");
// Nested runloops are covered by the parent loop hang watch scope.
// TODO(crbug/1034046): Provide more granular scoping that reuses the parent
// scope deadline.
if (main_thread_only().runloop_count == 1) {
hang_watch_scope_.emplace(base::HangWatchScope::kDefaultHangWatchTime);
}
work_id_provider_->IncrementWorkId();
#if defined(OS_WIN)
bool need_high_res_mode =
main_thread_only().task_source->HasPendingHighResolutionTasks();
if (main_thread_only().in_high_res_mode != need_high_res_mode) {
// On Windows we activate the high resolution timer so that the wait
// _if_ triggered by the timer happens with good resolution. If we don't
// do this the default resolution is 15ms which might not be acceptable
// for some tasks.
main_thread_only().in_high_res_mode = need_high_res_mode;
Time::ActivateHighResolutionTimer(need_high_res_mode);
}
#endif // defined(OS_WIN)
if (main_thread_only().task_source->OnSystemIdle()) {
// The OnSystemIdle() callback resulted in more immediate work, so schedule
// a DoWork callback. For some message pumps returning true from here is
// sufficient to do that but not on mac.
pump_->ScheduleWork();
return false;
}
// Check if any runloop timeout has expired.
if (main_thread_only().quit_runloop_after != TimeTicks::Max() &&
main_thread_only().quit_runloop_after <= time_source_->NowTicks()) {
Quit();
return false;
}
// RunLoop::Delegate knows whether we called Run() or RunUntilIdle().
if (ShouldQuitWhenIdle())
Quit();
return false;
}
void ThreadControllerWithMessagePumpImpl::Run(bool application_tasks_allowed,
TimeDelta timeout) {
DCHECK(RunsTasksInCurrentSequence());
// RunLoops can be nested so we need to restore the previous value of
// |quit_runloop_after| upon exit. NB we could use saturated arithmetic here
// but don't because we have some tests which assert the number of calls to
// Now.
AutoReset<TimeTicks> quit_runloop_after(
&main_thread_only().quit_runloop_after,
(timeout == TimeDelta::Max()) ? TimeTicks::Max()
: time_source_->NowTicks() + timeout);
#if DCHECK_IS_ON()
AutoReset<bool> quit_when_idle_requested(&quit_when_idle_requested_, false);
#endif
// Quit may have been called outside of a Run(), so |quit_pending| might be
// true here. We can't use InTopLevelDoWork() in Quit() as this call may be
// outside top-level DoWork but still in Run().
main_thread_only().quit_pending = false;
main_thread_only().runloop_count++;
if (application_tasks_allowed && !main_thread_only().task_execution_allowed) {
// Allow nested task execution as explicitly requested.
DCHECK(RunLoop::IsNestedOnCurrentThread());
main_thread_only().task_execution_allowed = true;
pump_->Run(this);
main_thread_only().task_execution_allowed = false;
} else {
pump_->Run(this);
}
#if DCHECK_IS_ON()
if (log_runloop_quit_and_quit_when_idle_)
DVLOG(1) << "ThreadControllerWithMessagePumpImpl::Quit";
#endif
main_thread_only().runloop_count--;
main_thread_only().quit_pending = false;
// Reset the hang watch scope upon exiting the outermost loop since the
// execution it covers is now completely over.
if (main_thread_only().runloop_count == 0)
hang_watch_scope_.reset();
}
void ThreadControllerWithMessagePumpImpl::OnBeginNestedRunLoop() {
// We don't need to ScheduleWork here! That's because the call to pump_->Run()
// above, which is always called for RunLoop().Run(), guarantees a call to
// Do(Some)Work on all platforms.
if (main_thread_only().nesting_observer)
main_thread_only().nesting_observer->OnBeginNestedRunLoop();
}
void ThreadControllerWithMessagePumpImpl::OnExitNestedRunLoop() {
if (main_thread_only().nesting_observer)
main_thread_only().nesting_observer->OnExitNestedRunLoop();
}
void ThreadControllerWithMessagePumpImpl::Quit() {
DCHECK(RunsTasksInCurrentSequence());
// Interrupt a batch of work.
main_thread_only().quit_pending = true;
// If we're in a nested RunLoop, continuation will be posted if necessary.
pump_->Quit();
}
void ThreadControllerWithMessagePumpImpl::EnsureWorkScheduled() {
if (work_deduplicator_.OnWorkRequested() ==
ShouldScheduleWork::kScheduleImmediate)
pump_->ScheduleWork();
}
void ThreadControllerWithMessagePumpImpl::SetTaskExecutionAllowed(
bool allowed) {
if (allowed) {
// We need to schedule work unconditionally because we might be about to
// enter an OS level nested message loop. Unlike a RunLoop().Run() we don't
// get a call to Do(Some)Work on entering for free.
work_deduplicator_.OnWorkRequested(); // Set the pending DoWork flag.
pump_->ScheduleWork();
} else {
// We've (probably) just left an OS level nested message loop. Make sure a
// subsequent PostTask within the same Task doesn't ScheduleWork with the
// pump (this will be done anyway when the task exits).
work_deduplicator_.OnWorkStarted();
}
main_thread_only().task_execution_allowed = allowed;
}
bool ThreadControllerWithMessagePumpImpl::IsTaskExecutionAllowed() const {
return main_thread_only().task_execution_allowed;
}
MessagePump* ThreadControllerWithMessagePumpImpl::GetBoundMessagePump() const {
return pump_.get();
}
#if defined(OS_IOS)
void ThreadControllerWithMessagePumpImpl::AttachToMessagePump() {
static_cast<MessagePumpCFRunLoopBase*>(pump_.get())->Attach(this);
}
void ThreadControllerWithMessagePumpImpl::DetachFromMessagePump() {
static_cast<MessagePumpCFRunLoopBase*>(pump_.get())->Detach();
}
#elif defined(OS_ANDROID)
void ThreadControllerWithMessagePumpImpl::AttachToMessagePump() {
static_cast<MessagePumpForUI*>(pump_.get())->Attach(this);
}
#endif
bool ThreadControllerWithMessagePumpImpl::ShouldQuitRunLoopWhenIdle() {
if (main_thread_only().runloop_count == 0)
return false;
// It's only safe to call ShouldQuitWhenIdle() when in a RunLoop.
return ShouldQuitWhenIdle();
}
} // namespace internal
} // namespace sequence_manager
} // namespace base

View file

@ -0,0 +1,193 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TASK_SEQUENCE_MANAGER_THREAD_CONTROLLER_WITH_MESSAGE_PUMP_IMPL_H_
#define BASE_TASK_SEQUENCE_MANAGER_THREAD_CONTROLLER_WITH_MESSAGE_PUMP_IMPL_H_
#include <memory>
#include "base/message_loop/message_pump.h"
#include "base/message_loop/work_id_provider.h"
#include "base/optional.h"
#include "base/run_loop.h"
#include "base/task/common/checked_lock.h"
#include "base/task/common/task_annotator.h"
#include "base/task/sequence_manager/associated_thread_id.h"
#include "base/task/sequence_manager/sequence_manager_impl.h"
#include "base/task/sequence_manager/sequenced_task_source.h"
#include "base/task/sequence_manager/thread_controller.h"
#include "base/task/sequence_manager/work_deduplicator.h"
#include "base/thread_annotations.h"
#include "base/threading/hang_watcher.h"
#include "base/threading/platform_thread.h"
#include "base/threading/sequence_local_storage_map.h"
#include "base/threading/thread_task_runner_handle.h"
#include "build/build_config.h"
namespace base {
namespace sequence_manager {
namespace internal {
// This is the interface between the SequenceManager and the MessagePump.
class BASE_EXPORT ThreadControllerWithMessagePumpImpl
: public ThreadController,
public MessagePump::Delegate,
public RunLoop::Delegate,
public RunLoop::NestingObserver {
public:
ThreadControllerWithMessagePumpImpl(
std::unique_ptr<MessagePump> message_pump,
const SequenceManager::Settings& settings);
~ThreadControllerWithMessagePumpImpl() override;
using ShouldScheduleWork = WorkDeduplicator::ShouldScheduleWork;
static std::unique_ptr<ThreadControllerWithMessagePumpImpl> CreateUnbound(
const SequenceManager::Settings& settings);
// ThreadController implementation:
void SetSequencedTaskSource(SequencedTaskSource* task_source) override;
void BindToCurrentThread(std::unique_ptr<MessagePump> message_pump) override;
void SetWorkBatchSize(int work_batch_size) override;
void WillQueueTask(PendingTask* pending_task,
const char* task_queue_name) override;
void ScheduleWork() override;
void SetNextDelayedDoWork(LazyNow* lazy_now, TimeTicks run_time) override;
void SetTimerSlack(TimerSlack timer_slack) override;
const TickClock* GetClock() override;
bool RunsTasksInCurrentSequence() override;
void SetDefaultTaskRunner(
scoped_refptr<SingleThreadTaskRunner> task_runner) override;
scoped_refptr<SingleThreadTaskRunner> GetDefaultTaskRunner() override;
void RestoreDefaultTaskRunner() override;
void AddNestingObserver(RunLoop::NestingObserver* observer) override;
void RemoveNestingObserver(RunLoop::NestingObserver* observer) override;
const scoped_refptr<AssociatedThreadId>& GetAssociatedThread() const override;
void SetTaskExecutionAllowed(bool allowed) override;
bool IsTaskExecutionAllowed() const override;
MessagePump* GetBoundMessagePump() const override;
#if defined(OS_IOS) || defined(OS_ANDROID)
void AttachToMessagePump() override;
#endif
#if defined(OS_IOS)
void DetachFromMessagePump() override;
#endif
bool ShouldQuitRunLoopWhenIdle() override;
// RunLoop::NestingObserver:
void OnBeginNestedRunLoop() override;
void OnExitNestedRunLoop() override;
protected:
explicit ThreadControllerWithMessagePumpImpl(
const SequenceManager::Settings& settings);
// MessagePump::Delegate implementation.
void BeforeDoInternalWork() override;
void BeforeWait() override;
MessagePump::Delegate::NextWorkInfo DoSomeWork() override;
bool DoIdleWork() override;
// RunLoop::Delegate implementation.
void Run(bool application_tasks_allowed, TimeDelta timeout) override;
void Quit() override;
void EnsureWorkScheduled() override;
private:
friend class DoWorkScope;
friend class RunScope;
// Returns the delay till the next task. If there's no delay TimeDelta::Max()
// will be returned.
TimeDelta DoWorkImpl(LazyNow* continuation_lazy_now, bool* ran_task);
void InitializeThreadTaskRunnerHandle()
EXCLUSIVE_LOCKS_REQUIRED(task_runner_lock_);
struct MainThreadOnly {
MainThreadOnly();
~MainThreadOnly();
SequencedTaskSource* task_source = nullptr; // Not owned.
RunLoop::NestingObserver* nesting_observer = nullptr; // Not owned.
std::unique_ptr<ThreadTaskRunnerHandle> thread_task_runner_handle;
// Indicates that we should yield DoWork between each task to let a possibly
// nested RunLoop exit.
bool quit_pending = false;
// Whether high resolution timing is enabled or not.
bool in_high_res_mode = false;
// Number of tasks processed in a single DoWork invocation.
int work_batch_size = 1;
int runloop_count = 0;
// When the next scheduled delayed work should run, if any.
TimeTicks next_delayed_do_work = TimeTicks::Max();
// The time after which the runloop should quit.
TimeTicks quit_runloop_after = TimeTicks::Max();
bool task_execution_allowed = true;
};
MainThreadOnly& main_thread_only() {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
return main_thread_only_;
}
const MainThreadOnly& main_thread_only() const {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
return main_thread_only_;
}
// TODO(altimin): Merge with the one in SequenceManager.
scoped_refptr<AssociatedThreadId> associated_thread_;
MainThreadOnly main_thread_only_;
mutable base::internal::CheckedLock task_runner_lock_;
scoped_refptr<SingleThreadTaskRunner> task_runner_
GUARDED_BY(task_runner_lock_);
WorkDeduplicator work_deduplicator_;
// Can only be set once (just before calling
// work_deduplicator_.BindToCurrentThread()). After that only read access is
// allowed.
std::unique_ptr<MessagePump> pump_;
TaskAnnotator task_annotator_;
#if DCHECK_IS_ON()
const bool log_runloop_quit_and_quit_when_idle_;
bool quit_when_idle_requested_ = false;
#endif
const TickClock* time_source_; // Not owned.
// Non-null provider of id state for identifying distinct work items executed
// by the message loop (task, event, etc.). Cached on the class to avoid TLS
// lookups on task execution.
WorkIdProvider* work_id_provider_ = nullptr;
// Required to register the current thread as a sequence.
base::internal::SequenceLocalStorageMap sequence_local_storage_map_;
std::unique_ptr<
base::internal::ScopedSetSequenceLocalStorageMapForCurrentThread>
scoped_set_sequence_local_storage_map_for_current_thread_;
// Reset at the start of each unit of work to cover the work itself and then
// transition to the next one.
base::Optional<HangWatchScope> hang_watch_scope_;
DISALLOW_COPY_AND_ASSIGN(ThreadControllerWithMessagePumpImpl);
};
} // namespace internal
} // namespace sequence_manager
} // namespace base
#endif // BASE_TASK_SEQUENCE_MANAGER_THREAD_CONTROLLER_WITH_MESSAGE_PUMP_IMPL_H_

View file

@ -0,0 +1,168 @@
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/task/sequence_manager/time_domain.h"
#include "base/task/sequence_manager/associated_thread_id.h"
#include "base/task/sequence_manager/sequence_manager_impl.h"
#include "base/task/sequence_manager/task_queue_impl.h"
#include "base/task/sequence_manager/work_queue.h"
#include "base/threading/thread_checker.h"
namespace base {
namespace sequence_manager {
TimeDomain::TimeDomain()
: sequence_manager_(nullptr),
associated_thread_(MakeRefCounted<internal::AssociatedThreadId>()) {}
TimeDomain::~TimeDomain() {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
}
void TimeDomain::OnRegisterWithSequenceManager(
internal::SequenceManagerImpl* sequence_manager) {
DCHECK(sequence_manager);
DCHECK(!sequence_manager_);
sequence_manager_ = sequence_manager;
associated_thread_ = sequence_manager_->associated_thread();
}
SequenceManager* TimeDomain::sequence_manager() const {
DCHECK(sequence_manager_);
return sequence_manager_;
}
// TODO(kraynov): https://crbug.com/857101 Consider making an interface
// for SequenceManagerImpl which will expose SetNextDelayedDoWork and
// MaybeScheduleImmediateWork methods to make the functions below pure-virtual.
void TimeDomain::SetNextDelayedDoWork(LazyNow* lazy_now, TimeTicks run_time) {
sequence_manager_->SetNextDelayedDoWork(lazy_now, run_time);
}
void TimeDomain::RequestDoWork() {
sequence_manager_->ScheduleWork();
}
void TimeDomain::UnregisterQueue(internal::TaskQueueImpl* queue) {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
DCHECK_EQ(queue->GetTimeDomain(), this);
LazyNow lazy_now(CreateLazyNow());
SetNextWakeUpForQueue(queue, nullopt, internal::WakeUpResolution::kLow,
&lazy_now);
}
void TimeDomain::SetNextWakeUpForQueue(
internal::TaskQueueImpl* queue,
Optional<internal::DelayedWakeUp> wake_up,
internal::WakeUpResolution resolution,
LazyNow* lazy_now) {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
DCHECK_EQ(queue->GetTimeDomain(), this);
DCHECK(queue->IsQueueEnabled() || !wake_up);
Optional<TimeTicks> previous_wake_up;
Optional<internal::WakeUpResolution> previous_queue_resolution;
if (!delayed_wake_up_queue_.empty())
previous_wake_up = delayed_wake_up_queue_.Min().wake_up.time;
if (queue->heap_handle().IsValid()) {
previous_queue_resolution =
delayed_wake_up_queue_.at(queue->heap_handle()).resolution;
}
if (wake_up) {
// Insert a new wake-up into the heap.
if (queue->heap_handle().IsValid()) {
// O(log n)
delayed_wake_up_queue_.ChangeKey(queue->heap_handle(),
{wake_up.value(), resolution, queue});
} else {
// O(log n)
delayed_wake_up_queue_.insert({wake_up.value(), resolution, queue});
}
} else {
// Remove a wake-up from heap if present.
if (queue->heap_handle().IsValid())
delayed_wake_up_queue_.erase(queue->heap_handle());
}
Optional<TimeTicks> new_wake_up;
if (!delayed_wake_up_queue_.empty())
new_wake_up = delayed_wake_up_queue_.Min().wake_up.time;
if (previous_queue_resolution &&
*previous_queue_resolution == internal::WakeUpResolution::kHigh) {
pending_high_res_wake_up_count_--;
}
if (wake_up && resolution == internal::WakeUpResolution::kHigh)
pending_high_res_wake_up_count_++;
DCHECK_GE(pending_high_res_wake_up_count_, 0);
// TODO(kraynov): https://crbug.com/857101 Review the relationship with
// SequenceManager's time. Right now it's not an issue since
// VirtualTimeDomain doesn't invoke SequenceManager itself.
if (new_wake_up == previous_wake_up) {
// Nothing to be done
return;
}
if (!new_wake_up) {
// No new wake-up to be set, cancel the previous one.
new_wake_up = TimeTicks::Max();
}
if (*new_wake_up <= lazy_now->Now()) {
RequestDoWork();
} else {
SetNextDelayedDoWork(lazy_now, *new_wake_up);
}
}
void TimeDomain::MoveReadyDelayedTasksToWorkQueues(LazyNow* lazy_now) {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
// Wake up any queues with pending delayed work. Note std::multimap stores
// the elements sorted by key, so the begin() iterator points to the earliest
// queue to wake-up.
while (!delayed_wake_up_queue_.empty() &&
delayed_wake_up_queue_.Min().wake_up.time <= lazy_now->Now()) {
internal::TaskQueueImpl* queue = delayed_wake_up_queue_.Min().queue;
queue->MoveReadyDelayedTasksToWorkQueue(lazy_now);
}
}
Optional<TimeTicks> TimeDomain::NextScheduledRunTime() const {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
if (delayed_wake_up_queue_.empty())
return nullopt;
return delayed_wake_up_queue_.Min().wake_up.time;
}
void TimeDomain::AsValueInto(trace_event::TracedValue* state) const {
state->BeginDictionary();
state->SetString("name", GetName());
state->SetInteger("registered_delay_count", delayed_wake_up_queue_.size());
if (!delayed_wake_up_queue_.empty()) {
TimeDelta delay = delayed_wake_up_queue_.Min().wake_up.time - Now();
state->SetDouble("next_delay_ms", delay.InMillisecondsF());
}
AsValueIntoInternal(state);
state->EndDictionary();
}
void TimeDomain::AsValueIntoInternal(trace_event::TracedValue* state) const {
// Can be overriden to trace some additional state.
}
bool TimeDomain::HasPendingHighResolutionTasks() const {
return pending_high_res_wake_up_count_;
}
bool TimeDomain::Empty() const {
return delayed_wake_up_queue_.empty();
}
} // namespace sequence_manager
} // namespace base

View file

@ -0,0 +1,160 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TASK_SEQUENCE_MANAGER_TIME_DOMAIN_H_
#define BASE_TASK_SEQUENCE_MANAGER_TIME_DOMAIN_H_
#include <map>
#include "base/callback.h"
#include "base/logging.h"
#include "base/macros.h"
#include "base/task/common/intrusive_heap.h"
#include "base/task/sequence_manager/lazy_now.h"
#include "base/task/sequence_manager/task_queue_impl.h"
#include "base/time/time.h"
namespace base {
namespace sequence_manager {
class SequenceManager;
namespace internal {
class AssociatedThreadId;
class SequenceManagerImpl;
class TaskQueueImpl;
} // namespace internal
// TimeDomain wakes up TaskQueues when their delayed tasks are due to run.
// This class allows overrides to enable clock overriding on some TaskQueues
// (e.g. auto-advancing virtual time, throttled clock, etc).
//
// TaskQueue maintains its own next wake-up time and communicates it
// to the TimeDomain, which aggregates wake-ups across registered TaskQueues
// into a global wake-up, which ultimately gets passed to the ThreadController.
class BASE_EXPORT TimeDomain {
public:
virtual ~TimeDomain();
// Returns LazyNow in TimeDomain's time.
// Can be called from any thread.
// TODO(alexclarke): Make this main thread only.
virtual LazyNow CreateLazyNow() const = 0;
// Evaluates TimeDomain's time.
// Can be called from any thread.
// TODO(alexclarke): Make this main thread only.
virtual TimeTicks Now() const = 0;
// Computes the delay until the time when TimeDomain needs to wake up some
// TaskQueue on the main thread. Specific time domains (e.g. virtual or
// throttled) may return TimeDelta() if TaskQueues have any delayed tasks they
// deem eligible to run. It's also allowed to advance time domains's internal
// clock when this method is called.
// Can be called from main thread only.
// NOTE: |lazy_now| and the return value are in the SequenceManager's time.
virtual Optional<TimeDelta> DelayTillNextTask(LazyNow* lazy_now) = 0;
void AsValueInto(trace_event::TracedValue* state) const;
bool HasPendingHighResolutionTasks() const;
// Returns true if there are no pending delayed tasks.
bool Empty() const;
// This is the signal that virtual time should step forward. If
// RunLoop::QuitWhenIdle has been called then |quit_when_idle_requested| will
// be true. Returns true if there is a task to run now.
virtual bool MaybeFastForwardToNextTask(bool quit_when_idle_requested) = 0;
protected:
TimeDomain();
SequenceManager* sequence_manager() const;
// Returns the earliest scheduled wake up in the TimeDomain's time.
Optional<TimeTicks> NextScheduledRunTime() const;
size_t NumberOfScheduledWakeUps() const {
return delayed_wake_up_queue_.size();
}
// Tells SequenceManager to schedule delayed work, use TimeTicks::Max()
// to unschedule. Also cancels any previous requests.
// May be overriden to control wake ups manually.
virtual void SetNextDelayedDoWork(LazyNow* lazy_now, TimeTicks run_time);
// Tells SequenceManager to schedule immediate work.
// May be overriden to control wake ups manually.
virtual void RequestDoWork();
// For implementation-specific tracing.
virtual void AsValueIntoInternal(trace_event::TracedValue* state) const;
virtual const char* GetName() const = 0;
// Called when the TimeDomain is registered. |sequence_manager| is expected to
// be valid for the duration of TimeDomain's existence.
// TODO(scheduler-dev): Pass SequenceManager in the constructor.
virtual void OnRegisterWithSequenceManager(
internal::SequenceManagerImpl* sequence_manager);
private:
friend class internal::TaskQueueImpl;
friend class internal::SequenceManagerImpl;
friend class TestTimeDomain;
// Schedule TaskQueue to wake up at certain time, repeating calls with
// the same |queue| invalidate previous requests.
// Nullopt |wake_up| cancels a previously set wake up for |queue|.
// NOTE: |lazy_now| is provided in TimeDomain's time.
void SetNextWakeUpForQueue(internal::TaskQueueImpl* queue,
Optional<internal::DelayedWakeUp> wake_up,
internal::WakeUpResolution resolution,
LazyNow* lazy_now);
// Remove the TaskQueue from any internal data sctructures.
void UnregisterQueue(internal::TaskQueueImpl* queue);
// Wake up each TaskQueue where the delay has elapsed. Note this doesn't
// ScheduleWork.
void MoveReadyDelayedTasksToWorkQueues(LazyNow* lazy_now);
struct ScheduledDelayedWakeUp {
internal::DelayedWakeUp wake_up;
internal::WakeUpResolution resolution;
internal::TaskQueueImpl* queue;
bool operator<=(const ScheduledDelayedWakeUp& other) const {
if (wake_up == other.wake_up) {
return static_cast<int>(resolution) <=
static_cast<int>(other.resolution);
}
return wake_up <= other.wake_up;
}
void SetHeapHandle(base::internal::HeapHandle handle) {
DCHECK(handle.IsValid());
queue->set_heap_handle(handle);
}
void ClearHeapHandle() {
DCHECK(queue->heap_handle().IsValid());
queue->set_heap_handle(base::internal::HeapHandle());
}
HeapHandle GetHeapHandle() const { return queue->heap_handle(); }
};
internal::SequenceManagerImpl* sequence_manager_; // Not owned.
base::internal::IntrusiveHeap<ScheduledDelayedWakeUp> delayed_wake_up_queue_;
int pending_high_res_wake_up_count_ = 0;
scoped_refptr<internal::AssociatedThreadId> associated_thread_;
DISALLOW_COPY_AND_ASSIGN(TimeDomain);
};
} // namespace sequence_manager
} // namespace base
#endif // BASE_TASK_SEQUENCE_MANAGER_TIME_DOMAIN_H_

View file

@ -0,0 +1,96 @@
// Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/task/sequence_manager/work_deduplicator.h"
#include <utility>
#include "base/logging.h"
namespace base {
namespace sequence_manager {
namespace internal {
WorkDeduplicator::WorkDeduplicator(
scoped_refptr<AssociatedThreadId> associated_thread)
: associated_thread_(std::move(associated_thread)) {}
WorkDeduplicator::~WorkDeduplicator() = default;
WorkDeduplicator::ShouldScheduleWork WorkDeduplicator::BindToCurrentThread() {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
int previous_flags = state_.fetch_or(kBoundFlag);
DCHECK_EQ(previous_flags & kBoundFlag, 0) << "Can't bind twice!";
return previous_flags & kPendingDoWorkFlag
? ShouldScheduleWork::kScheduleImmediate
: ShouldScheduleWork::kNotNeeded;
}
WorkDeduplicator::ShouldScheduleWork WorkDeduplicator::OnWorkRequested() {
// Set kPendingDoWorkFlag and return true if we were previously kIdle.
return state_.fetch_or(kPendingDoWorkFlag) == State::kIdle
? ShouldScheduleWork::kScheduleImmediate
: ShouldScheduleWork::kNotNeeded;
}
WorkDeduplicator::ShouldScheduleWork WorkDeduplicator::OnDelayedWorkRequested()
const {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
// This must be called on the associated thread or this read is racy.
return state_.load() == State::kIdle ? ShouldScheduleWork::kScheduleImmediate
: ShouldScheduleWork::kNotNeeded;
}
void WorkDeduplicator::OnWorkStarted() {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
DCHECK_EQ(state_.load() & kBoundFlag, kBoundFlag);
// Clear kPendingDoWorkFlag and mark us as in a DoWork.
state_.store(State::kInDoWork);
}
void WorkDeduplicator::WillCheckForMoreWork() {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
DCHECK_EQ(state_.load() & kBoundFlag, kBoundFlag);
// Clear kPendingDoWorkFlag if it was set.
state_.store(State::kInDoWork);
}
WorkDeduplicator::ShouldScheduleWork WorkDeduplicator::DidCheckForMoreWork(
NextTask next_task) {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
DCHECK_EQ(state_.load() & kBoundFlag, kBoundFlag);
last_work_check_result_ = ShouldScheduleWork::kScheduleImmediate;
if (next_task == NextTask::kIsImmediate) {
state_.store(State::kDoWorkPending);
} else {
// Another thread may have set kPendingDoWorkFlag between
// WillCheckForMoreWork() and here, if so we should return
// ShouldScheduleWork::kScheduleImmediate. Otherwise we don't need to
// schedule an immediate continuation.
if (!(state_.fetch_and(~kInDoWorkFlag) & kPendingDoWorkFlag))
last_work_check_result_ = ShouldScheduleWork::kNotNeeded;
}
return last_work_check_result_;
}
void WorkDeduplicator::OnDelayedWorkStarted() {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
OnWorkStarted();
}
WorkDeduplicator::ShouldScheduleWork WorkDeduplicator::OnDelayedWorkEnded(
NextTask next_task) {
DCHECK_CALLED_ON_VALID_THREAD(associated_thread_->thread_checker);
ShouldScheduleWork prev_last_work_check_result = last_work_check_result_;
WorkDeduplicator::ShouldScheduleWork should_schedule_work =
DidCheckForMoreWork(next_task);
if (prev_last_work_check_result == ShouldScheduleWork::kScheduleImmediate) {
prev_last_work_check_result = ShouldScheduleWork::kNotNeeded;
should_schedule_work = ShouldScheduleWork::kNotNeeded;
}
return should_schedule_work;
}
} // namespace internal
} // namespace sequence_manager
} // namespace base

View file

@ -0,0 +1,157 @@
// Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TASK_SEQUENCE_MANAGER_WORK_DEDUPLICATOR_H_
#define BASE_TASK_SEQUENCE_MANAGER_WORK_DEDUPLICATOR_H_
#include <atomic>
#include "base/base_export.h"
#include "base/task/sequence_manager/associated_thread_id.h"
namespace base {
namespace sequence_manager {
namespace internal {
// This class's job is to prevent redundant DoWorks being posted, which are
// expensive. The idea is a DoWork will (maybe) run a task before computing the
// delay till the next task. If the task run posts another task, we don't want
// it to schedule work because the DoWork will post a continuation as needed
// with the latest state taken into consideration (fences, enable / disable
// queue, task cancellation, etc...) Other threads can also post DoWork at any
// time, including while we're computing the delay till the next task. To
// account for that, we have split a DoWork up into two sections:
// [OnWorkStarted .. WillCheckForMoreWork] and
// [WillCheckForMoreWork .. DidCheckForMoreWork] where DidCheckForMoreWork
// detects if another thread called OnWorkRequested.
//
// Nesting is assumed to be dealt with by the ThreadController.
//
// Most methods are thread-affine except for On(Delayed)WorkRequested which are
// is thread-safe.
class BASE_EXPORT WorkDeduplicator {
public:
// Creates an unbound WorkDeduplicator. BindToCurrentThread must be called
// before work can be scheduled.
explicit WorkDeduplicator(
scoped_refptr<AssociatedThreadId> associated_thread);
~WorkDeduplicator();
enum ShouldScheduleWork {
kScheduleImmediate,
kNotNeeded,
};
// Returns ShouldScheduleWork::kSchedule if OnWorkRequested was called while
// unbound. Must be called on the associated thread.
ShouldScheduleWork BindToCurrentThread();
// Returns true if it's OK to schedule a DoWork without risk of task
// duplication. Returns false if:
// * We are unbound
// * We are in a DoWork
// * There is a pending DoWork
//
// Otherwise sets the pending DoWork flag and returns true.
// Can be called on any thread.
//
// DoWork
// ---------------------------------------------------------------------
// | <- OnWorkStarted | |
// | WillCheckForMoreWork -> | |
// | | DidCheckForMoreWork -> |
// ---------------------------------------------------------------------
// ^ ^ ^ ^
// | | | |
// A B C D
//
// Consider a DoWork and calls to OnWorkRequested at various times:
// A: return ShouldScheduleWork::kNotNeeded because there's a pending DoWork.
// B: return ShouldScheduleWork::kNotNeeded because we're in a DoWork.
// C: return ShouldScheduleWork::kNotNeeded because we're in a DoWork, however
// DidCheckForMoreWork should subsequently return
// ShouldScheduleWork::kSchedule.
// D: If DidCheckForMoreWork(kIsImmediate::kIsImmediate) was called then it
// should ShouldScheduleWork::kNotNeeded because there's a pending DoWork.
// Otherwise it should return ShouldScheduleWork::kSchedule, but a
// subsequent call to OnWorkRequested should return
// ShouldScheduleWork::kNotNeeded because there's now a pending DoWork.
ShouldScheduleWork OnWorkRequested();
// Returns ShouldScheduleWork::kSchedule if it's OK to schedule a
// DoDelayedWork without risk of redundancy. Deduplication of delayed work is
// assumed to have been done by the caller, the purpose of this method it to
// check if there's a pending Do(Some)Work which would schedule a delayed
// continuation as needed.
//
// Returns ShouldScheduleWork::kNotNeeded if:
// * We are unbound
// * We are in a DoWork
// * There is a pending DoWork
//
// Must be called on the associated thread.
ShouldScheduleWork OnDelayedWorkRequested() const;
// Marks us as having entered a DoWork, clearing the pending DoWork flag.
// Must be called on the associated thread.
void OnWorkStarted();
// Marks us as being about to check if we have more work. This notification
// helps prevent DoWork duplication in two scenarios:
// * A cross-thread immediate task is posted while we are running a task. If
// the TaskQueue is disabled we can avoid a potentially spurious DoWork.
// * A task is run which posts an immediate task but the ThreadControllerImpl
// work batch size is 2, and there's no further work. The immediate task ran
// in the work batch so we don't need another DoWork.
void WillCheckForMoreWork();
enum NextTask {
kIsImmediate,
kIsDelayed,
};
// Marks us as exiting DoWork. Returns ShouldScheduleWork::kSchedule if an
// immediate DoWork continuation should be posted. This method takes into
// account any OnWorkRequested's called between BeforeComputeDelayTillNextTask
// and here. Must be called on the associated thread.
ShouldScheduleWork DidCheckForMoreWork(NextTask next_task);
// For ThreadControllerWithMessagePumpImpl. The MessagePump calls DoWork and
// DoDelayed work sequentially. If DoWork returns
// ShouldScheduleWork::kSchedule, the pump will call ScheduleWork. We remember
// if DoWork will be scheduled so we don't accidentally call it twice from
// DoDelayedWork. Must be called on the associated thread.
// TODO(alexclarke): Remove these when the DoWork/DoDelayed work merger
// happens.
void OnDelayedWorkStarted();
ShouldScheduleWork OnDelayedWorkEnded(NextTask next_task);
private:
enum Flags {
kInDoWorkFlag = 1 << 0,
kPendingDoWorkFlag = 1 << 1,
kBoundFlag = 1 << 2,
};
enum State {
kUnbound = 0,
kIdle = Flags::kBoundFlag,
kDoWorkPending = Flags::kPendingDoWorkFlag | Flags::kBoundFlag,
kInDoWork = Flags::kInDoWorkFlag | Flags::kBoundFlag,
};
std::atomic<int> state_{State::kUnbound};
scoped_refptr<AssociatedThreadId> associated_thread_;
// TODO(alexclarke): Remove when the DoWork/DoDelayed work merger happens.
ShouldScheduleWork last_work_check_result_ = ShouldScheduleWork::kNotNeeded;
};
} // namespace internal
} // namespace sequence_manager
} // namespace base
#endif // BASE_TASK_SEQUENCE_MANAGER_WORK_DEDUPLICATOR_H_

View file

@ -0,0 +1,320 @@
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/task/sequence_manager/work_queue.h"
#include "base/task/sequence_manager/sequence_manager_impl.h"
#include "base/task/sequence_manager/work_queue_sets.h"
namespace base {
namespace sequence_manager {
namespace internal {
WorkQueue::WorkQueue(TaskQueueImpl* task_queue,
const char* name,
QueueType queue_type)
: task_queue_(task_queue), name_(name), queue_type_(queue_type) {}
void WorkQueue::AsValueInto(TimeTicks now,
trace_event::TracedValue* state) const {
for (const Task& task : tasks_) {
TaskQueueImpl::TaskAsValueInto(task, now, state);
}
}
WorkQueue::~WorkQueue() {
DCHECK(!work_queue_sets_) << task_queue_->GetName() << " : "
<< work_queue_sets_->GetName() << " : " << name_;
}
const Task* WorkQueue::GetFrontTask() const {
if (tasks_.empty())
return nullptr;
return &tasks_.front();
}
const Task* WorkQueue::GetBackTask() const {
if (tasks_.empty())
return nullptr;
return &tasks_.back();
}
bool WorkQueue::BlockedByFence() const {
if (!fence_)
return false;
// If the queue is empty then any future tasks will have a higher enqueue
// order and will be blocked. The queue is also blocked if the head is past
// the fence.
return tasks_.empty() || tasks_.front().enqueue_order() >= fence_;
}
bool WorkQueue::GetFrontTaskEnqueueOrder(EnqueueOrder* enqueue_order) const {
if (tasks_.empty() || BlockedByFence())
return false;
// Quick sanity check.
DCHECK_LE(tasks_.front().enqueue_order(), tasks_.back().enqueue_order())
<< task_queue_->GetName() << " : " << work_queue_sets_->GetName() << " : "
<< name_;
*enqueue_order = tasks_.front().enqueue_order();
return true;
}
void WorkQueue::Push(Task task) {
bool was_empty = tasks_.empty();
#ifndef NDEBUG
DCHECK(task.enqueue_order_set());
#endif
// Make sure the |enqueue_order()| is monotonically increasing.
DCHECK(was_empty || tasks_.back().enqueue_order() < task.enqueue_order());
// Amortized O(1).
tasks_.push_back(std::move(task));
if (!was_empty)
return;
// If we hit the fence, pretend to WorkQueueSets that we're empty.
if (work_queue_sets_ && !BlockedByFence())
work_queue_sets_->OnTaskPushedToEmptyQueue(this);
}
WorkQueue::TaskPusher::TaskPusher(WorkQueue* work_queue)
: work_queue_(work_queue), was_empty_(work_queue->Empty()) {}
WorkQueue::TaskPusher::TaskPusher(TaskPusher&& other)
: work_queue_(other.work_queue_), was_empty_(other.was_empty_) {
other.work_queue_ = nullptr;
}
void WorkQueue::TaskPusher::Push(Task* task) {
DCHECK(work_queue_);
#ifndef NDEBUG
DCHECK(task->enqueue_order_set());
#endif
// Make sure the |enqueue_order()| is monotonically increasing.
DCHECK(work_queue_->tasks_.empty() ||
work_queue_->tasks_.back().enqueue_order() < task->enqueue_order());
// Amortized O(1).
work_queue_->tasks_.push_back(std::move(*task));
}
WorkQueue::TaskPusher::~TaskPusher() {
// If |work_queue_| became non empty and it isn't blocked by a fence then we
// must notify |work_queue_->work_queue_sets_|.
if (was_empty_ && work_queue_ && !work_queue_->Empty() &&
work_queue_->work_queue_sets_ && !work_queue_->BlockedByFence()) {
work_queue_->work_queue_sets_->OnTaskPushedToEmptyQueue(work_queue_);
}
}
WorkQueue::TaskPusher WorkQueue::CreateTaskPusher() {
return TaskPusher(this);
}
void WorkQueue::PushNonNestableTaskToFront(Task task) {
DCHECK(task.nestable == Nestable::kNonNestable);
bool was_empty = tasks_.empty();
bool was_blocked = BlockedByFence();
#ifndef NDEBUG
DCHECK(task.enqueue_order_set());
#endif
if (!was_empty) {
// Make sure the |enqueue_order| is monotonically increasing.
DCHECK_LE(task.enqueue_order(), tasks_.front().enqueue_order())
<< task_queue_->GetName() << " : " << work_queue_sets_->GetName()
<< " : " << name_;
}
// Amortized O(1).
tasks_.push_front(std::move(task));
if (!work_queue_sets_)
return;
// Pretend to WorkQueueSets that nothing has changed if we're blocked.
if (BlockedByFence())
return;
// Pushing task to front may unblock the fence.
if (was_empty || was_blocked) {
work_queue_sets_->OnTaskPushedToEmptyQueue(this);
} else {
work_queue_sets_->OnQueuesFrontTaskChanged(this);
}
}
void WorkQueue::TakeImmediateIncomingQueueTasks() {
DCHECK(tasks_.empty());
task_queue_->TakeImmediateIncomingQueueTasks(&tasks_);
if (tasks_.empty())
return;
// If we hit the fence, pretend to WorkQueueSets that we're empty.
if (work_queue_sets_ && !BlockedByFence())
work_queue_sets_->OnTaskPushedToEmptyQueue(this);
}
Task WorkQueue::TakeTaskFromWorkQueue() {
DCHECK(work_queue_sets_);
DCHECK(!tasks_.empty());
Task pending_task = std::move(tasks_.front());
tasks_.pop_front();
// NB immediate tasks have a different pipeline to delayed ones.
if (tasks_.empty()) {
// NB delayed tasks are inserted via Push, no don't need to reload those.
if (queue_type_ == QueueType::kImmediate) {
// Short-circuit the queue reload so that OnPopMinQueueInSet does the
// right thing.
task_queue_->TakeImmediateIncomingQueueTasks(&tasks_);
}
// Since the queue is empty, now is a good time to consider reducing it's
// capacity if we're wasting memory.
tasks_.MaybeShrinkQueue();
}
DCHECK(work_queue_sets_);
#if DCHECK_IS_ON()
// If diagnostics are on it's possible task queues are being selected at
// random so we can't use the (slightly) more efficient OnPopMinQueueInSet.
work_queue_sets_->OnQueuesFrontTaskChanged(this);
#else
// OnPopMinQueueInSet calls GetFrontTaskEnqueueOrder which checks
// BlockedByFence() so we don't need to here.
work_queue_sets_->OnPopMinQueueInSet(this);
#endif
task_queue_->TraceQueueSize();
return pending_task;
}
bool WorkQueue::RemoveAllCanceledTasksFromFront() {
if (!work_queue_sets_)
return false;
bool task_removed = false;
while (!tasks_.empty() &&
(!tasks_.front().task || tasks_.front().task.IsCancelled())) {
tasks_.pop_front();
task_removed = true;
}
if (task_removed) {
if (tasks_.empty()) {
// NB delayed tasks are inserted via Push, no don't need to reload those.
if (queue_type_ == QueueType::kImmediate) {
// Short-circuit the queue reload so that OnPopMinQueueInSet does the
// right thing.
task_queue_->TakeImmediateIncomingQueueTasks(&tasks_);
}
// Since the queue is empty, now is a good time to consider reducing it's
// capacity if we're wasting memory.
tasks_.MaybeShrinkQueue();
}
// If we have a valid |heap_handle_| (i.e. we're not blocked by a fence or
// disabled) then |work_queue_sets_| needs to be told.
if (heap_handle_.IsValid())
work_queue_sets_->OnQueuesFrontTaskChanged(this);
task_queue_->TraceQueueSize();
}
return task_removed;
}
void WorkQueue::AssignToWorkQueueSets(WorkQueueSets* work_queue_sets) {
work_queue_sets_ = work_queue_sets;
}
void WorkQueue::AssignSetIndex(size_t work_queue_set_index) {
work_queue_set_index_ = work_queue_set_index;
}
bool WorkQueue::InsertFenceImpl(EnqueueOrder fence) {
DCHECK_NE(fence, 0u);
DCHECK(fence >= fence_ || fence == EnqueueOrder::blocking_fence());
bool was_blocked_by_fence = BlockedByFence();
fence_ = fence;
return was_blocked_by_fence;
}
void WorkQueue::InsertFenceSilently(EnqueueOrder fence) {
// Ensure that there is no fence present or a new one blocks queue completely.
DCHECK(!fence_ || fence_ == EnqueueOrder::blocking_fence());
InsertFenceImpl(fence);
}
bool WorkQueue::InsertFence(EnqueueOrder fence) {
bool was_blocked_by_fence = InsertFenceImpl(fence);
if (!work_queue_sets_)
return false;
// Moving the fence forward may unblock some tasks.
if (!tasks_.empty() && was_blocked_by_fence && !BlockedByFence()) {
work_queue_sets_->OnTaskPushedToEmptyQueue(this);
return true;
}
// Fence insertion may have blocked all tasks in this work queue.
if (BlockedByFence())
work_queue_sets_->OnQueueBlocked(this);
return false;
}
bool WorkQueue::RemoveFence() {
bool was_blocked_by_fence = BlockedByFence();
fence_ = EnqueueOrder::none();
if (work_queue_sets_ && !tasks_.empty() && was_blocked_by_fence) {
work_queue_sets_->OnTaskPushedToEmptyQueue(this);
return true;
}
return false;
}
bool WorkQueue::ShouldRunBefore(const WorkQueue* other_queue) const {
DCHECK(!tasks_.empty());
DCHECK(!other_queue->tasks_.empty());
EnqueueOrder enqueue_order;
EnqueueOrder other_enqueue_order;
bool have_task = GetFrontTaskEnqueueOrder(&enqueue_order);
bool have_other_task =
other_queue->GetFrontTaskEnqueueOrder(&other_enqueue_order);
DCHECK(have_task);
DCHECK(have_other_task);
return enqueue_order < other_enqueue_order;
}
void WorkQueue::MaybeShrinkQueue() {
tasks_.MaybeShrinkQueue();
}
void WorkQueue::DeletePendingTasks() {
tasks_.clear();
if (work_queue_sets_ && heap_handle().IsValid())
work_queue_sets_->OnQueuesFrontTaskChanged(this);
DCHECK(!heap_handle_.IsValid());
}
void WorkQueue::PopTaskForTesting() {
if (tasks_.empty())
return;
tasks_.pop_front();
}
void WorkQueue::CollectTasksOlderThan(EnqueueOrder reference,
std::vector<const Task*>* result) const {
for (const Task& task : tasks_) {
if (task.enqueue_order() >= reference)
break;
result->push_back(&task);
}
}
} // namespace internal
} // namespace sequence_manager
} // namespace base

View file

@ -0,0 +1,192 @@
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TASK_SEQUENCE_MANAGER_WORK_QUEUE_H_
#define BASE_TASK_SEQUENCE_MANAGER_WORK_QUEUE_H_
#include "base/base_export.h"
#include "base/task/common/intrusive_heap.h"
#include "base/task/sequence_manager/enqueue_order.h"
#include "base/task/sequence_manager/sequenced_task_source.h"
#include "base/task/sequence_manager/task_queue_impl.h"
#include "base/trace_event/trace_event.h"
#include "base/trace_event/traced_value.h"
namespace base {
namespace sequence_manager {
namespace internal {
class WorkQueueSets;
// This class keeps track of immediate and delayed tasks which are due to run
// now. It interfaces deeply with WorkQueueSets which keeps track of which queue
// (with a given priority) contains the oldest task.
//
// If a fence is inserted, WorkQueue behaves normally up until
// TakeTaskFromWorkQueue reaches or exceeds the fence. At that point it the
// API subset used by WorkQueueSets pretends the WorkQueue is empty until the
// fence is removed. This functionality is a primitive intended for use by
// throttling mechanisms.
class BASE_EXPORT WorkQueue {
public:
using QueueType = internal::TaskQueueImpl::WorkQueueType;
// Note |task_queue| can be null if queue_type is kNonNestable.
WorkQueue(TaskQueueImpl* task_queue, const char* name, QueueType queue_type);
~WorkQueue();
// Associates this work queue with the given work queue sets. This must be
// called before any tasks can be inserted into this work queue.
void AssignToWorkQueueSets(WorkQueueSets* work_queue_sets);
// Assigns the current set index.
void AssignSetIndex(size_t work_queue_set_index);
void AsValueInto(TimeTicks now, trace_event::TracedValue* state) const;
// Returns true if the |tasks_| is empty. This method ignores any fences.
bool Empty() const { return tasks_.empty(); }
// If the |tasks_| isn't empty and a fence hasn't been reached,
// |enqueue_order| gets set to the enqueue order of the front task and the
// function returns true. Otherwise the function returns false.
bool GetFrontTaskEnqueueOrder(EnqueueOrder* enqueue_order) const;
// Returns the first task in this queue or null if the queue is empty. This
// method ignores any fences.
const Task* GetFrontTask() const;
// Returns the last task in this queue or null if the queue is empty. This
// method ignores any fences.
const Task* GetBackTask() const;
// Pushes the task onto the |tasks_| and if a fence hasn't been reached
// it informs the WorkQueueSets if the head changed.
void Push(Task task);
// RAII helper that helps efficiently push N Tasks to a WorkQueue.
class BASE_EXPORT TaskPusher {
public:
TaskPusher(const TaskPusher&) = delete;
TaskPusher(TaskPusher&& other);
~TaskPusher();
void Push(Task* task);
private:
friend class WorkQueue;
explicit TaskPusher(WorkQueue* work_queue);
WorkQueue* work_queue_;
const bool was_empty_;
};
// Returns an RAII helper to efficiently push multiple tasks.
TaskPusher CreateTaskPusher();
// Pushes the task onto the front of the |tasks_| and if it's before any
// fence it informs the WorkQueueSets the head changed. Use with caution this
// API can easily lead to task starvation if misused.
void PushNonNestableTaskToFront(Task task);
// Reloads the empty |tasks_| with
// |task_queue_->TakeImmediateIncomingQueue| and if a fence hasn't been
// reached it informs the WorkQueueSets if the head changed.
void TakeImmediateIncomingQueueTasks();
size_t Size() const { return tasks_.size(); }
size_t Capacity() const { return tasks_.capacity(); }
// Pulls a task off the |tasks_| and informs the WorkQueueSets. If the
// task removed had an enqueue order >= the current fence then WorkQueue
// pretends to be empty as far as the WorkQueueSets is concerned.
Task TakeTaskFromWorkQueue();
// Removes all canceled tasks from the head of the list. Returns true if any
// tasks were removed.
bool RemoveAllCanceledTasksFromFront();
const char* name() const { return name_; }
TaskQueueImpl* task_queue() const { return task_queue_; }
WorkQueueSets* work_queue_sets() const { return work_queue_sets_; }
size_t work_queue_set_index() const { return work_queue_set_index_; }
base::internal::HeapHandle heap_handle() const { return heap_handle_; }
void set_heap_handle(base::internal::HeapHandle handle) {
heap_handle_ = handle;
}
QueueType queue_type() const { return queue_type_; }
// Returns true if the front task in this queue has an older enqueue order
// than the front task of |other_queue|. Both queue are assumed to be
// non-empty. This method ignores any fences.
bool ShouldRunBefore(const WorkQueue* other_queue) const;
// Submit a fence. When TakeTaskFromWorkQueue encounters a task whose
// enqueue_order is >= |fence| then the WorkQueue will start pretending to be.
// empty.
// Inserting a fence may supersede a previous one and unblock some tasks.
// Returns true if any tasks where unblocked, returns false otherwise.
bool InsertFence(EnqueueOrder fence);
// Submit a fence without triggering a WorkQueueSets notification.
// Caller must ensure that WorkQueueSets are properly updated.
// This method should not be called when a fence is already present.
void InsertFenceSilently(EnqueueOrder fence);
// Removes any fences that where added and if WorkQueue was pretending to be
// empty, then the real value is reported to WorkQueueSets. Returns true if
// any tasks where unblocked.
bool RemoveFence();
// Returns true if any tasks are blocked by the fence. Returns true if the
// queue is empty and fence has been set (i.e. future tasks would be blocked).
// Otherwise returns false.
bool BlockedByFence() const;
// Shrinks |tasks_| if it's wasting memory.
void MaybeShrinkQueue();
// Delete all tasks within this WorkQueue.
void DeletePendingTasks();
// Test support function. This should not be used in production code.
void PopTaskForTesting();
// Iterates through |tasks_| adding any that are older than |reference| to
// |result|.
void CollectTasksOlderThan(EnqueueOrder reference,
std::vector<const Task*>* result) const;
private:
bool InsertFenceImpl(EnqueueOrder fence);
TaskQueueImpl::TaskDeque tasks_;
WorkQueueSets* work_queue_sets_ = nullptr; // NOT OWNED.
TaskQueueImpl* const task_queue_; // NOT OWNED.
size_t work_queue_set_index_ = 0;
// Iff the queue isn't empty (or appearing to be empty due to a fence) then
// |heap_handle_| will be valid and correspond to this queue's location within
// an IntrusiveHeap inside the WorkQueueSet.
base::internal::HeapHandle heap_handle_;
const char* const name_;
EnqueueOrder fence_;
const QueueType queue_type_;
DISALLOW_COPY_AND_ASSIGN(WorkQueue);
};
} // namespace internal
} // namespace sequence_manager
} // namespace base
#endif // BASE_TASK_SEQUENCE_MANAGER_WORK_QUEUE_H_

View file

@ -0,0 +1,255 @@
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/task/sequence_manager/work_queue_sets.h"
#include "base/logging.h"
namespace base {
namespace sequence_manager {
namespace internal {
WorkQueueSets::WorkQueueSets(const char* name,
Observer* observer,
const SequenceManager::Settings& settings)
: name_(name),
#if DCHECK_IS_ON()
last_rand_(settings.random_task_selection_seed),
#endif
observer_(observer) {
}
WorkQueueSets::~WorkQueueSets() = default;
void WorkQueueSets::AddQueue(WorkQueue* work_queue, size_t set_index) {
DCHECK(!work_queue->work_queue_sets());
DCHECK_LT(set_index, work_queue_heaps_.size());
DCHECK(!work_queue->heap_handle().IsValid());
EnqueueOrder enqueue_order;
bool has_enqueue_order = work_queue->GetFrontTaskEnqueueOrder(&enqueue_order);
work_queue->AssignToWorkQueueSets(this);
work_queue->AssignSetIndex(set_index);
if (!has_enqueue_order)
return;
bool was_empty = work_queue_heaps_[set_index].empty();
work_queue_heaps_[set_index].insert({enqueue_order, work_queue});
if (was_empty)
observer_->WorkQueueSetBecameNonEmpty(set_index);
}
void WorkQueueSets::RemoveQueue(WorkQueue* work_queue) {
DCHECK_EQ(this, work_queue->work_queue_sets());
work_queue->AssignToWorkQueueSets(nullptr);
if (!work_queue->heap_handle().IsValid())
return;
size_t set_index = work_queue->work_queue_set_index();
DCHECK_LT(set_index, work_queue_heaps_.size());
work_queue_heaps_[set_index].erase(work_queue->heap_handle());
if (work_queue_heaps_[set_index].empty())
observer_->WorkQueueSetBecameEmpty(set_index);
DCHECK(!work_queue->heap_handle().IsValid());
}
void WorkQueueSets::ChangeSetIndex(WorkQueue* work_queue, size_t set_index) {
DCHECK_EQ(this, work_queue->work_queue_sets());
DCHECK_LT(set_index, work_queue_heaps_.size());
EnqueueOrder enqueue_order;
bool has_enqueue_order = work_queue->GetFrontTaskEnqueueOrder(&enqueue_order);
size_t old_set = work_queue->work_queue_set_index();
DCHECK_LT(old_set, work_queue_heaps_.size());
DCHECK_NE(old_set, set_index);
work_queue->AssignSetIndex(set_index);
DCHECK_EQ(has_enqueue_order, work_queue->heap_handle().IsValid());
if (!has_enqueue_order)
return;
work_queue_heaps_[old_set].erase(work_queue->heap_handle());
bool was_empty = work_queue_heaps_[set_index].empty();
work_queue_heaps_[set_index].insert({enqueue_order, work_queue});
if (work_queue_heaps_[old_set].empty())
observer_->WorkQueueSetBecameEmpty(old_set);
if (was_empty)
observer_->WorkQueueSetBecameNonEmpty(set_index);
}
void WorkQueueSets::OnQueuesFrontTaskChanged(WorkQueue* work_queue) {
EnqueueOrder enqueue_order;
size_t set_index = work_queue->work_queue_set_index();
DCHECK_EQ(this, work_queue->work_queue_sets());
DCHECK_LT(set_index, work_queue_heaps_.size());
DCHECK(work_queue->heap_handle().IsValid());
DCHECK(!work_queue_heaps_[set_index].empty()) << " set_index = " << set_index;
if (work_queue->GetFrontTaskEnqueueOrder(&enqueue_order)) {
// O(log n)
work_queue_heaps_[set_index].ChangeKey(work_queue->heap_handle(),
{enqueue_order, work_queue});
} else {
// O(log n)
work_queue_heaps_[set_index].erase(work_queue->heap_handle());
DCHECK(!work_queue->heap_handle().IsValid());
if (work_queue_heaps_[set_index].empty())
observer_->WorkQueueSetBecameEmpty(set_index);
}
}
void WorkQueueSets::OnTaskPushedToEmptyQueue(WorkQueue* work_queue) {
// NOTE if this function changes, we need to keep |WorkQueueSets::AddQueue| in
// sync.
DCHECK_EQ(this, work_queue->work_queue_sets());
EnqueueOrder enqueue_order;
bool has_enqueue_order = work_queue->GetFrontTaskEnqueueOrder(&enqueue_order);
DCHECK(has_enqueue_order);
size_t set_index = work_queue->work_queue_set_index();
DCHECK_LT(set_index, work_queue_heaps_.size())
<< " set_index = " << set_index;
// |work_queue| should not be in work_queue_heaps_[set_index].
DCHECK(!work_queue->heap_handle().IsValid());
bool was_empty = work_queue_heaps_[set_index].empty();
work_queue_heaps_[set_index].insert({enqueue_order, work_queue});
if (was_empty)
observer_->WorkQueueSetBecameNonEmpty(set_index);
}
void WorkQueueSets::OnPopMinQueueInSet(WorkQueue* work_queue) {
// Assume that |work_queue| contains the lowest enqueue_order.
size_t set_index = work_queue->work_queue_set_index();
DCHECK_EQ(this, work_queue->work_queue_sets());
DCHECK_LT(set_index, work_queue_heaps_.size());
DCHECK(!work_queue_heaps_[set_index].empty()) << " set_index = " << set_index;
DCHECK_EQ(work_queue_heaps_[set_index].Min().value, work_queue)
<< " set_index = " << set_index;
DCHECK(work_queue->heap_handle().IsValid());
EnqueueOrder enqueue_order;
if (work_queue->GetFrontTaskEnqueueOrder(&enqueue_order)) {
// O(log n)
work_queue_heaps_[set_index].ReplaceMin({enqueue_order, work_queue});
} else {
// O(log n)
work_queue_heaps_[set_index].Pop();
DCHECK(!work_queue->heap_handle().IsValid());
DCHECK(work_queue_heaps_[set_index].empty() ||
work_queue_heaps_[set_index].Min().value != work_queue);
if (work_queue_heaps_[set_index].empty()) {
observer_->WorkQueueSetBecameEmpty(set_index);
}
}
}
void WorkQueueSets::OnQueueBlocked(WorkQueue* work_queue) {
DCHECK_EQ(this, work_queue->work_queue_sets());
base::internal::HeapHandle heap_handle = work_queue->heap_handle();
if (!heap_handle.IsValid())
return;
size_t set_index = work_queue->work_queue_set_index();
DCHECK_LT(set_index, work_queue_heaps_.size());
work_queue_heaps_[set_index].erase(heap_handle);
if (work_queue_heaps_[set_index].empty())
observer_->WorkQueueSetBecameEmpty(set_index);
}
WorkQueue* WorkQueueSets::GetOldestQueueInSet(size_t set_index) const {
DCHECK_LT(set_index, work_queue_heaps_.size());
if (work_queue_heaps_[set_index].empty())
return nullptr;
WorkQueue* queue = work_queue_heaps_[set_index].Min().value;
DCHECK_EQ(set_index, queue->work_queue_set_index());
DCHECK(queue->heap_handle().IsValid());
return queue;
}
WorkQueue* WorkQueueSets::GetOldestQueueAndEnqueueOrderInSet(
size_t set_index,
EnqueueOrder* out_enqueue_order) const {
DCHECK_LT(set_index, work_queue_heaps_.size());
if (work_queue_heaps_[set_index].empty())
return nullptr;
const OldestTaskEnqueueOrder& oldest = work_queue_heaps_[set_index].Min();
DCHECK(oldest.value->heap_handle().IsValid());
*out_enqueue_order = oldest.key;
EnqueueOrder enqueue_order;
DCHECK(oldest.value->GetFrontTaskEnqueueOrder(&enqueue_order) &&
oldest.key == enqueue_order);
return oldest.value;
}
#if DCHECK_IS_ON()
WorkQueue* WorkQueueSets::GetRandomQueueInSet(size_t set_index) const {
DCHECK_LT(set_index, work_queue_heaps_.size());
if (work_queue_heaps_[set_index].empty())
return nullptr;
WorkQueue* queue =
work_queue_heaps_[set_index]
.begin()[Random() % work_queue_heaps_[set_index].size()]
.value;
DCHECK_EQ(set_index, queue->work_queue_set_index());
DCHECK(queue->heap_handle().IsValid());
return queue;
}
WorkQueue* WorkQueueSets::GetRandomQueueAndEnqueueOrderInSet(
size_t set_index,
EnqueueOrder* out_enqueue_order) const {
DCHECK_LT(set_index, work_queue_heaps_.size());
if (work_queue_heaps_[set_index].empty())
return nullptr;
const OldestTaskEnqueueOrder& chosen =
work_queue_heaps_[set_index]
.begin()[Random() % work_queue_heaps_[set_index].size()];
*out_enqueue_order = chosen.key;
EnqueueOrder enqueue_order;
DCHECK(chosen.value->GetFrontTaskEnqueueOrder(&enqueue_order) &&
chosen.key == enqueue_order);
return chosen.value;
}
#endif
bool WorkQueueSets::IsSetEmpty(size_t set_index) const {
DCHECK_LT(set_index, work_queue_heaps_.size())
<< " set_index = " << set_index;
return work_queue_heaps_[set_index].empty();
}
#if DCHECK_IS_ON() || !defined(NDEBUG)
bool WorkQueueSets::ContainsWorkQueueForTest(
const WorkQueue* work_queue) const {
EnqueueOrder enqueue_order;
bool has_enqueue_order = work_queue->GetFrontTaskEnqueueOrder(&enqueue_order);
for (const base::internal::IntrusiveHeap<OldestTaskEnqueueOrder>& heap :
work_queue_heaps_) {
for (const OldestTaskEnqueueOrder& heap_value_pair : heap) {
if (heap_value_pair.value == work_queue) {
DCHECK(has_enqueue_order);
DCHECK_EQ(heap_value_pair.key, enqueue_order);
DCHECK_EQ(this, work_queue->work_queue_sets());
return true;
}
}
}
if (work_queue->work_queue_sets() == this) {
DCHECK(!has_enqueue_order);
return true;
}
return false;
}
#endif
void WorkQueueSets::CollectSkippedOverLowerPriorityTasks(
const internal::WorkQueue* selected_work_queue,
std::vector<const Task*>* result) const {
EnqueueOrder selected_enqueue_order;
CHECK(selected_work_queue->GetFrontTaskEnqueueOrder(&selected_enqueue_order));
for (size_t priority = selected_work_queue->work_queue_set_index() + 1;
priority < TaskQueue::kQueuePriorityCount; priority++) {
for (const OldestTaskEnqueueOrder& pair : work_queue_heaps_[priority]) {
pair.value->CollectTasksOlderThan(selected_enqueue_order, result);
}
}
}
} // namespace internal
} // namespace sequence_manager
} // namespace base

View file

@ -0,0 +1,162 @@
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TASK_SEQUENCE_MANAGER_WORK_QUEUE_SETS_H_
#define BASE_TASK_SEQUENCE_MANAGER_WORK_QUEUE_SETS_H_
#include <array>
#include <map>
#include "base/base_export.h"
#include "base/logging.h"
#include "base/macros.h"
#include "base/task/common/intrusive_heap.h"
#include "base/task/sequence_manager/sequence_manager.h"
#include "base/task/sequence_manager/task_queue_impl.h"
#include "base/task/sequence_manager/work_queue.h"
#include "base/trace_event/traced_value.h"
namespace base {
namespace sequence_manager {
namespace internal {
// There is a WorkQueueSet for each scheduler priority and each WorkQueueSet
// uses a EnqueueOrderToWorkQueueMap to keep track of which queue in the set has
// the oldest task (i.e. the one that should be run next if the
// TaskQueueSelector chooses to run a task a given priority). The reason this
// works is because std::map is a tree based associative container and all the
// values are kept in sorted order.
class BASE_EXPORT WorkQueueSets {
public:
class Observer {
public:
virtual ~Observer() {}
virtual void WorkQueueSetBecameEmpty(size_t set_index) = 0;
virtual void WorkQueueSetBecameNonEmpty(size_t set_index) = 0;
};
WorkQueueSets(const char* name,
Observer* observer,
const SequenceManager::Settings& settings);
~WorkQueueSets();
// O(log num queues)
void AddQueue(WorkQueue* queue, size_t set_index);
// O(log num queues)
void RemoveQueue(WorkQueue* work_queue);
// O(log num queues)
void ChangeSetIndex(WorkQueue* queue, size_t set_index);
// O(log num queues)
void OnQueuesFrontTaskChanged(WorkQueue* queue);
// O(log num queues)
void OnTaskPushedToEmptyQueue(WorkQueue* work_queue);
// If empty it's O(1) amortized, otherwise it's O(log num queues). Slightly
// faster on average than OnQueuesFrontTaskChanged.
// Assumes |work_queue| contains the lowest enqueue order in the set.
void OnPopMinQueueInSet(WorkQueue* work_queue);
// O(log num queues)
void OnQueueBlocked(WorkQueue* work_queue);
// O(1)
WorkQueue* GetOldestQueueInSet(size_t set_index) const;
// O(1)
WorkQueue* GetOldestQueueAndEnqueueOrderInSet(
size_t set_index,
EnqueueOrder* out_enqueue_order) const;
#if DCHECK_IS_ON()
// O(1)
WorkQueue* GetRandomQueueInSet(size_t set_index) const;
// O(1)
WorkQueue* GetRandomQueueAndEnqueueOrderInSet(
size_t set_index,
EnqueueOrder* out_enqueue_order) const;
#endif
// O(1)
bool IsSetEmpty(size_t set_index) const;
#if DCHECK_IS_ON() || !defined(NDEBUG)
// Note this iterates over everything in |work_queue_heaps_|.
// It's intended for use with DCHECKS and for testing
bool ContainsWorkQueueForTest(const WorkQueue* queue) const;
#endif
const char* GetName() const { return name_; }
// Collects ready tasks which where skipped over when |selected_work_queue|
// was selected. Note this is somewhat expensive.
void CollectSkippedOverLowerPriorityTasks(
const internal::WorkQueue* selected_work_queue,
std::vector<const Task*>* result) const;
private:
struct OldestTaskEnqueueOrder {
EnqueueOrder key;
WorkQueue* value;
bool operator<=(const OldestTaskEnqueueOrder& other) const {
return key <= other.key;
}
void SetHeapHandle(base::internal::HeapHandle handle) {
value->set_heap_handle(handle);
}
void ClearHeapHandle() {
value->set_heap_handle(base::internal::HeapHandle());
}
HeapHandle GetHeapHandle() const { return value->heap_handle(); }
};
const char* const name_;
// For each set |work_queue_heaps_| has a queue of WorkQueue ordered by the
// oldest task in each WorkQueue.
std::array<base::internal::IntrusiveHeap<OldestTaskEnqueueOrder>,
TaskQueue::kQueuePriorityCount>
work_queue_heaps_;
#if DCHECK_IS_ON()
static inline uint64_t MurmurHash3(uint64_t value) {
value ^= value >> 33;
value *= uint64_t{0xFF51AFD7ED558CCD};
value ^= value >> 33;
value *= uint64_t{0xC4CEB9FE1A85EC53};
value ^= value >> 33;
return value;
}
// This is for a debugging feature which lets us randomize task selection. Its
// not for production use.
// TODO(alexclarke): Use a seedable PRNG from ::base if one is added.
uint64_t Random() const {
last_rand_ = MurmurHash3(last_rand_);
return last_rand_;
}
mutable uint64_t last_rand_;
#endif
Observer* const observer_;
DISALLOW_COPY_AND_ASSIGN(WorkQueueSets);
};
} // namespace internal
} // namespace sequence_manager
} // namespace base
#endif // BASE_TASK_SEQUENCE_MANAGER_WORK_QUEUE_SETS_H_

View file

@ -0,0 +1,62 @@
// Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/task/simple_task_executor.h"
namespace base {
SimpleTaskExecutor::SimpleTaskExecutor(
scoped_refptr<SingleThreadTaskRunner> task_queue)
: task_queue_(std::move(task_queue)),
previous_task_executor_(GetTaskExecutorForCurrentThread()) {
DCHECK(task_queue_);
// The TaskExecutor API does not expect nesting, but this can happen in tests
// so we have to work around it here.
if (previous_task_executor_)
SetTaskExecutorForCurrentThread(nullptr);
SetTaskExecutorForCurrentThread(this);
}
SimpleTaskExecutor::~SimpleTaskExecutor() {
if (previous_task_executor_)
SetTaskExecutorForCurrentThread(nullptr);
SetTaskExecutorForCurrentThread(previous_task_executor_);
}
bool SimpleTaskExecutor::PostDelayedTask(const Location& from_here,
const TaskTraits& traits,
OnceClosure task,
TimeDelta delay) {
return task_queue_->PostDelayedTask(from_here, std::move(task), delay);
}
scoped_refptr<TaskRunner> SimpleTaskExecutor::CreateTaskRunner(
const TaskTraits& traits) {
return task_queue_;
}
scoped_refptr<SequencedTaskRunner>
SimpleTaskExecutor::CreateSequencedTaskRunner(const TaskTraits& traits) {
return task_queue_;
}
scoped_refptr<SingleThreadTaskRunner>
SimpleTaskExecutor::CreateSingleThreadTaskRunner(
const TaskTraits& traits,
SingleThreadTaskRunnerThreadMode thread_mode) {
return task_queue_;
}
#if defined(OS_WIN)
scoped_refptr<SingleThreadTaskRunner>
SimpleTaskExecutor::CreateCOMSTATaskRunner(
const TaskTraits& traits,
SingleThreadTaskRunnerThreadMode thread_mode) {
// It seems pretty unlikely this will be used on a comsta task thread.
NOTREACHED();
return task_queue_;
}
#endif // defined(OS_WIN)
} // namespace base

View file

@ -0,0 +1,52 @@
// Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TASK_SIMPLE_TASK_EXECUTOR_H_
#define BASE_TASK_SIMPLE_TASK_EXECUTOR_H_
#include "base/task/task_executor.h"
#include "build/build_config.h"
namespace base {
// A simple TaskExecutor with exactly one SingleThreadTaskRunner.
// Must be instantiated and destroyed on the thread that runs tasks for the
// SingleThreadTaskRunner.
class BASE_EXPORT SimpleTaskExecutor : public TaskExecutor {
public:
explicit SimpleTaskExecutor(scoped_refptr<SingleThreadTaskRunner> task_queue);
~SimpleTaskExecutor() override;
bool PostDelayedTask(const Location& from_here,
const TaskTraits& traits,
OnceClosure task,
TimeDelta delay) override;
scoped_refptr<TaskRunner> CreateTaskRunner(const TaskTraits& traits) override;
scoped_refptr<SequencedTaskRunner> CreateSequencedTaskRunner(
const TaskTraits& traits) override;
scoped_refptr<SingleThreadTaskRunner> CreateSingleThreadTaskRunner(
const TaskTraits& traits,
SingleThreadTaskRunnerThreadMode thread_mode) override;
#if defined(OS_WIN)
scoped_refptr<SingleThreadTaskRunner> CreateCOMSTATaskRunner(
const TaskTraits& traits,
SingleThreadTaskRunnerThreadMode thread_mode) override;
#endif // defined(OS_WIN)
private:
const scoped_refptr<SingleThreadTaskRunner> task_queue_;
// In tests there may already be a TaskExecutor registered for the thread, we
// keep tack of the previous TaskExecutor and restored it upon destruction.
TaskExecutor* const previous_task_executor_;
};
} // namespace base
#endif // BASE_TASK_SIMPLE_TASK_EXECUTOR_H_

View file

@ -0,0 +1,50 @@
// Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/task/single_thread_task_executor.h"
#include "base/message_loop/message_pump.h"
#include "base/message_loop/message_pump_type.h"
#include "base/task/sequence_manager/sequence_manager.h"
#include "base/task/sequence_manager/sequence_manager_impl.h"
#include "build/build_config.h"
namespace base {
SingleThreadTaskExecutor::SingleThreadTaskExecutor(MessagePumpType type)
: SingleThreadTaskExecutor(type, MessagePump::Create(type)) {
DCHECK_NE(type, MessagePumpType::CUSTOM);
}
SingleThreadTaskExecutor::SingleThreadTaskExecutor(
std::unique_ptr<MessagePump> pump)
: SingleThreadTaskExecutor(MessagePumpType::CUSTOM, std::move(pump)) {}
SingleThreadTaskExecutor::SingleThreadTaskExecutor(
MessagePumpType type,
std::unique_ptr<MessagePump> pump)
: sequence_manager_(sequence_manager::CreateUnboundSequenceManager(
sequence_manager::SequenceManager::Settings::Builder()
.SetMessagePumpType(type)
.Build())),
default_task_queue_(sequence_manager_->CreateTaskQueue(
sequence_manager::TaskQueue::Spec("default_tq"))),
type_(type),
simple_task_executor_(task_runner()) {
sequence_manager_->SetDefaultTaskRunner(default_task_queue_->task_runner());
sequence_manager_->BindToMessagePump(std::move(pump));
}
SingleThreadTaskExecutor::~SingleThreadTaskExecutor() = default;
scoped_refptr<SingleThreadTaskRunner> SingleThreadTaskExecutor::task_runner()
const {
return default_task_queue_->task_runner();
}
void SingleThreadTaskExecutor::SetWorkBatchSize(size_t work_batch_size) {
sequence_manager_->SetWorkBatchSize(work_batch_size);
}
} // namespace base

View file

@ -0,0 +1,67 @@
// Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TASK_SINGLE_THREAD_TASK_EXECUTOR_H_
#define BASE_TASK_SINGLE_THREAD_TASK_EXECUTOR_H_
#include <memory>
#include "base/base_export.h"
#include "base/memory/scoped_refptr.h"
#include "base/message_loop/message_pump_type.h"
#include "base/single_thread_task_runner.h"
#include "base/task/simple_task_executor.h"
namespace base {
class MessagePump;
namespace sequence_manager {
class SequenceManager;
class TaskQueue;
} // namespace sequence_manager
// A simple single thread TaskExecutor intended for non-test usage. Tests should
// generally use TaskEnvironment or BrowserTaskEnvironment instead.
// TODO(alexclarke): Inherit from TaskExecutor to support base::Here().
class BASE_EXPORT SingleThreadTaskExecutor {
public:
// For MessagePumpType::CUSTOM use the constructor that takes a pump.
explicit SingleThreadTaskExecutor(
MessagePumpType type = MessagePumpType::DEFAULT);
// Creates a SingleThreadTaskExecutor pumping from a custom |pump|.
// The above constructor using MessagePumpType is generally preferred.
explicit SingleThreadTaskExecutor(std::unique_ptr<MessagePump> pump);
// Shuts down the SingleThreadTaskExecutor, after this no tasks can be
// executed and the base::TaskExecutor APIs are non-functional but won't crash
// if called.
~SingleThreadTaskExecutor();
scoped_refptr<SingleThreadTaskRunner> task_runner() const;
MessagePumpType type() const { return type_; }
// Sets the number of application tasks executed every time the MessagePump
// asks its delegate to DoWork(). Defaults to 1. Can be increased in some
// scenarios where the native pump (i.e. not MessagePumpType::DEFAULT) has
// high overhead and yielding to native isn't critical.
void SetWorkBatchSize(size_t work_batch_size);
private:
explicit SingleThreadTaskExecutor(MessagePumpType type,
std::unique_ptr<MessagePump> pump);
std::unique_ptr<sequence_manager::SequenceManager> sequence_manager_;
scoped_refptr<sequence_manager::TaskQueue> default_task_queue_;
MessagePumpType type_;
SimpleTaskExecutor simple_task_executor_;
DISALLOW_COPY_AND_ASSIGN(SingleThreadTaskExecutor);
};
} // namespace base
#endif // BASE_TASK_SINGLE_THREAD_TASK_EXECUTOR_H_

View file

@ -0,0 +1,23 @@
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TASK_SINGLE_THREAD_TASK_RUNNER_THREAD_MODE_H_
#define BASE_TASK_SINGLE_THREAD_TASK_RUNNER_THREAD_MODE_H_
namespace base {
enum class SingleThreadTaskRunnerThreadMode {
// Allow the SingleThreadTaskRunner's thread to be shared with others,
// allowing for efficient use of thread resources when this
// SingleThreadTaskRunner is idle. This is the default mode and is
// recommended for thread-affine code.
SHARED,
// Create a new thread, dedicated to this SingleThreadTaskRunner, and tear it
// down when the last reference to the TaskRunner is dropped.
DEDICATED,
};
} // namespace base
#endif // BASE_TASK_SINGLE_THREAD_TASK_RUNNER_THREAD_MODE_H_

View file

@ -0,0 +1,77 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/task/task_executor.h"
#include <type_traits>
#include "base/no_destructor.h"
#include "base/task/task_traits.h"
#include "base/task/task_traits_extension.h"
#include "base/threading/thread_local.h"
namespace base {
namespace {
// Maps TaskTraits extension IDs to registered TaskExecutors. Index |n|
// corresponds to id |n - 1|.
using TaskExecutorMap =
std::array<TaskExecutor*, TaskTraitsExtensionStorage::kMaxExtensionId>;
TaskExecutorMap* GetTaskExecutorMap() {
static_assert(std::is_trivially_destructible<TaskExecutorMap>::value,
"TaskExecutorMap not trivially destructible");
static TaskExecutorMap executors{};
return &executors;
}
static_assert(
TaskTraitsExtensionStorage::kInvalidExtensionId == 0,
"TaskExecutorMap depends on 0 being an invalid TaskTraits extension ID");
} // namespace
ThreadLocalPointer<TaskExecutor>* GetTLSForCurrentTaskExecutor() {
static NoDestructor<ThreadLocalPointer<TaskExecutor>> instance;
return instance.get();
}
void SetTaskExecutorForCurrentThread(TaskExecutor* task_executor) {
DCHECK(!task_executor || !GetTLSForCurrentTaskExecutor()->Get() ||
GetTLSForCurrentTaskExecutor()->Get() == task_executor);
GetTLSForCurrentTaskExecutor()->Set(task_executor);
}
TaskExecutor* GetTaskExecutorForCurrentThread() {
return GetTLSForCurrentTaskExecutor()->Get();
}
void RegisterTaskExecutor(uint8_t extension_id, TaskExecutor* task_executor) {
DCHECK_NE(extension_id, TaskTraitsExtensionStorage::kInvalidExtensionId);
DCHECK_LE(extension_id, TaskTraitsExtensionStorage::kMaxExtensionId);
DCHECK_EQ((*GetTaskExecutorMap())[extension_id - 1], nullptr);
(*GetTaskExecutorMap())[extension_id - 1] = task_executor;
}
void UnregisterTaskExecutorForTesting(uint8_t extension_id) {
DCHECK_NE(extension_id, TaskTraitsExtensionStorage::kInvalidExtensionId);
DCHECK_LE(extension_id, TaskTraitsExtensionStorage::kMaxExtensionId);
DCHECK_NE((*GetTaskExecutorMap())[extension_id - 1], nullptr);
(*GetTaskExecutorMap())[extension_id - 1] = nullptr;
}
TaskExecutor* GetRegisteredTaskExecutorForTraits(const TaskTraits& traits) {
uint8_t extension_id = traits.extension_id();
if (extension_id != TaskTraitsExtensionStorage::kInvalidExtensionId) {
TaskExecutor* executor = (*GetTaskExecutorMap())[extension_id - 1];
DCHECK(executor);
return executor;
}
return nullptr;
}
} // namespace base

View file

@ -0,0 +1,90 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TASK_TASK_EXECUTOR_H_
#define BASE_TASK_TASK_EXECUTOR_H_
#include <stdint.h>
#include "base/base_export.h"
#include "base/memory/ref_counted.h"
#include "base/sequenced_task_runner.h"
#include "base/single_thread_task_runner.h"
#include "base/task/single_thread_task_runner_thread_mode.h"
#include "base/task_runner.h"
#include "build/build_config.h"
namespace base {
class Location;
class TaskTraits;
// A TaskExecutor can execute Tasks with a specific TaskTraits extension id. To
// handle Tasks posted via the //base/task/post_task.h API, the TaskExecutor
// should be registered by calling RegisterTaskExecutor().
class BASE_EXPORT TaskExecutor {
public:
virtual ~TaskExecutor() = default;
// Posts |task| with a |delay| and specific |traits|. |delay| can be zero. For
// one off tasks that don't require a TaskRunner. Returns false if the task
// definitely won't run because of current shutdown state.
virtual bool PostDelayedTask(const Location& from_here,
const TaskTraits& traits,
OnceClosure task,
TimeDelta delay) = 0;
// Returns a TaskRunner whose PostTask invocations result in scheduling tasks
// using |traits|. Tasks may run in any order and in parallel.
virtual scoped_refptr<TaskRunner> CreateTaskRunner(
const TaskTraits& traits) = 0;
// Returns a SequencedTaskRunner whose PostTask invocations result in
// scheduling tasks using |traits|. Tasks run one at a time in posting order.
virtual scoped_refptr<SequencedTaskRunner> CreateSequencedTaskRunner(
const TaskTraits& traits) = 0;
// Returns a SingleThreadTaskRunner whose PostTask invocations result in
// scheduling tasks using |traits|. Tasks run on a single thread in posting
// order. If |traits| identifies an existing thread,
// SingleThreadTaskRunnerThreadMode::SHARED must be used.
virtual scoped_refptr<SingleThreadTaskRunner> CreateSingleThreadTaskRunner(
const TaskTraits& traits,
SingleThreadTaskRunnerThreadMode thread_mode) = 0;
#if defined(OS_WIN)
// Returns a SingleThreadTaskRunner whose PostTask invocations result in
// scheduling tasks using |traits| in a COM Single-Threaded Apartment. Tasks
// run in the same Single-Threaded Apartment in posting order for the returned
// SingleThreadTaskRunner. If |traits| identifies an existing thread,
// SingleThreadTaskRunnerThreadMode::SHARED must be used.
virtual scoped_refptr<SingleThreadTaskRunner> CreateCOMSTATaskRunner(
const TaskTraits& traits,
SingleThreadTaskRunnerThreadMode thread_mode) = 0;
#endif // defined(OS_WIN)
};
// Register a TaskExecutor with the //base/task/post_task.h API in the current
// process for tasks subsequently posted with a TaskTraits extension with the
// given |extension_id|. All executors need to be registered before any tasks
// are posted with |extension_id|. Only one executor per |extension_id| is
// supported.
void BASE_EXPORT RegisterTaskExecutor(uint8_t extension_id,
TaskExecutor* task_executor);
void BASE_EXPORT UnregisterTaskExecutorForTesting(uint8_t extension_id);
// Stores the provided TaskExecutor in TLS for the current thread, to be used by
// tasks with the CurrentThread() trait.
void BASE_EXPORT SetTaskExecutorForCurrentThread(TaskExecutor* task_executor);
// Returns the task executor registered for the current thread.
BASE_EXPORT TaskExecutor* GetTaskExecutorForCurrentThread();
// Determines whether a registered TaskExecutor will handle tasks with the given
// |traits| and, if so, returns a pointer to it. Otherwise, returns |nullptr|.
TaskExecutor* GetRegisteredTaskExecutorForTraits(const TaskTraits& traits);
} // namespace base
#endif // BASE_TASK_TASK_EXECUTOR_H_

View file

@ -0,0 +1,31 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/task/task_features.h"
#include "base/feature_list.h"
namespace base {
const Feature kAllTasksUserBlocking{"AllTasksUserBlocking",
FEATURE_DISABLED_BY_DEFAULT};
const Feature kNoDetachBelowInitialCapacity = {
"NoDetachBelowInitialCapacity", base::FEATURE_DISABLED_BY_DEFAULT};
const Feature kMayBlockWithoutDelay = {"MayBlockWithoutDelay",
base::FEATURE_DISABLED_BY_DEFAULT};
const Feature kFixedMaxBestEffortTasks = {"FixedMaxBestEffortTasks",
base::FEATURE_DISABLED_BY_DEFAULT};
#if defined(OS_WIN) || defined(OS_MACOSX)
const Feature kUseNativeThreadPool = {"UseNativeThreadPool",
base::FEATURE_DISABLED_BY_DEFAULT};
#endif
const Feature kUseFiveMinutesThreadReclaimTime = {
"UseFiveMinutesThreadReclaimTime", base::FEATURE_DISABLED_BY_DEFAULT};
} // namespace base

View file

@ -0,0 +1,51 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TASK_TASK_FEATURES_H_
#define BASE_TASK_TASK_FEATURES_H_
#include "base/base_export.h"
#include "base/metrics/field_trial_params.h"
#include "build/build_config.h"
namespace base {
struct Feature;
extern const BASE_EXPORT Feature kAllTasksUserBlocking;
// Under this feature, unused threads in ThreadGroup are only detached
// if the total number of threads in the pool is above the initial capacity.
extern const BASE_EXPORT Feature kNoDetachBelowInitialCapacity;
// Under this feature, workers blocked with MayBlock are replaced immediately
// instead of waiting for a threshold in the foreground thread group.
extern const BASE_EXPORT Feature kMayBlockWithoutDelay;
// Under this feature, best effort capacity is never increased.
// While it's unlikely we'd ship this as-is, this experiment allows us to
// determine whether blocked worker replacement logic on best-effort tasks has
// any impact on guardian metrics.
extern const BASE_EXPORT Feature kFixedMaxBestEffortTasks;
#if defined(OS_WIN) || defined(OS_MACOSX)
#define HAS_NATIVE_THREAD_POOL() 1
#else
#define HAS_NATIVE_THREAD_POOL() 0
#endif
#if HAS_NATIVE_THREAD_POOL()
// Under this feature, ThreadPoolImpl will use a ThreadGroup backed by a
// native thread pool implementation. The Windows Thread Pool API and
// libdispatch are used on Windows and macOS/iOS respectively.
extern const BASE_EXPORT Feature kUseNativeThreadPool;
#endif
// Whether threads in the ThreadPool should be reclaimed after being idle for 5
// minutes, instead of 30 seconds.
extern const BASE_EXPORT Feature kUseFiveMinutesThreadReclaimTime;
} // namespace base
#endif // BASE_TASK_TASK_FEATURES_H_

View file

@ -0,0 +1,35 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TASK_TASK_OBSERVER_H_
#define BASE_TASK_TASK_OBSERVER_H_
#include "base/base_export.h"
#include "base/pending_task.h"
namespace base {
// A TaskObserver is an object that receives notifications about tasks being
// processed on the thread it's associated with.
//
// NOTE: A TaskObserver implementation should be extremely fast!
class BASE_EXPORT TaskObserver {
public:
// This method is called before processing a task.
// |was_blocked_or_low_priority| indicates if the task was at some point in a
// queue that was blocked or less important than "normal".
virtual void WillProcessTask(const PendingTask& pending_task,
bool was_blocked_or_low_priority) = 0;
// This method is called after processing a task.
virtual void DidProcessTask(const PendingTask& pending_task) = 0;
protected:
virtual ~TaskObserver() = default;
};
} // namespace base
#endif // BASE_TASK_TASK_OBSERVER_H_

View file

@ -0,0 +1,53 @@
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/task/task_traits.h"
#include <stddef.h>
#include <ostream>
#include "base/logging.h"
namespace base {
const char* TaskPriorityToString(TaskPriority task_priority) {
switch (task_priority) {
case TaskPriority::BEST_EFFORT:
return "BEST_EFFORT";
case TaskPriority::USER_VISIBLE:
return "USER_VISIBLE";
case TaskPriority::USER_BLOCKING:
return "USER_BLOCKING";
}
NOTREACHED();
return "";
}
const char* TaskShutdownBehaviorToString(
TaskShutdownBehavior shutdown_behavior) {
switch (shutdown_behavior) {
case TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN:
return "CONTINUE_ON_SHUTDOWN";
case TaskShutdownBehavior::SKIP_ON_SHUTDOWN:
return "SKIP_ON_SHUTDOWN";
case TaskShutdownBehavior::BLOCK_SHUTDOWN:
return "BLOCK_SHUTDOWN";
}
NOTREACHED();
return "";
}
std::ostream& operator<<(std::ostream& os, const TaskPriority& task_priority) {
os << TaskPriorityToString(task_priority);
return os;
}
std::ostream& operator<<(std::ostream& os,
const TaskShutdownBehavior& shutdown_behavior) {
os << TaskShutdownBehaviorToString(shutdown_behavior);
return os;
}
} // namespace base

View file

@ -0,0 +1,387 @@
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TASK_TASK_TRAITS_H_
#define BASE_TASK_TASK_TRAITS_H_
#include <stdint.h>
#include <iosfwd>
#include <tuple>
#include <type_traits>
#include <utility>
#include "base/base_export.h"
#include "base/logging.h"
#include "base/task/task_traits_extension.h"
#include "base/traits_bag.h"
#include "build/build_config.h"
// TODO(gab): This is backwards, thread_pool.h should include task_traits.h
// but it this is necessary to have it in this direction during the migration
// from old code that used base::ThreadPool as a trait.
#include "base/task/thread_pool.h"
namespace base {
class PostTaskAndroid;
// Valid priorities supported by the task scheduling infrastructure.
//
// Note: internal algorithms depend on priorities being expressed as a
// continuous zero-based list from lowest to highest priority. Users of this API
// shouldn't otherwise care about nor use the underlying values.
//
// GENERATED_JAVA_ENUM_PACKAGE: org.chromium.base.task
enum class TaskPriority : uint8_t {
// This will always be equal to the lowest priority available.
LOWEST = 0,
// This task will only start running when machine resources are available. The
// application may preempt the task if it expects that resources will soon be
// needed by work of higher priority. Dependending on the ThreadPolicy, the
// task may run on a thread that is likely to be descheduled when higher
// priority work arrives (in this process or another).
//
// Examples:
// - Reporting metrics.
// - Persisting data to disk.
// - Loading data that is required for a potential future user interaction
// (Note: Use CreateUpdateableSequencedTaskRunner() to increase the priority
// when that user interactions happens).
BEST_EFFORT = LOWEST,
// The result of this task is visible to the user (in the UI or as a
// side-effect on the system) but it is not an immediate response to a user
// interaction.
//
// Examples:
// - Updating the UI to reflect progress on a long task.
// - Downloading a file requested by the user.
// - Loading an image that is displayed in the UI but is non-critical.
USER_VISIBLE,
// This task affects UI immediately after a user interaction.
//
// Example:
// - Loading and rendering a web page after the user clicks a link.
// - Sorting suggestions after the user types a character in the omnibox.
//
// This is the default TaskPriority in order for tasks to run in order by
// default and avoid unintended consequences. The only way to get a task to
// run at a higher priority than USER_BLOCKING is to coordinate with a
// higher-level scheduler (contact scheduler-dev@chromium.org for such use
// cases).
USER_BLOCKING,
// This will always be equal to the highest priority available.
HIGHEST = USER_BLOCKING
};
// Valid shutdown behaviors supported by the thread pool.
enum class TaskShutdownBehavior : uint8_t {
// Tasks posted with this mode which have not started executing before
// shutdown is initiated will never run. Tasks with this mode running at
// shutdown will be ignored (the worker will not be joined).
//
// This option provides a nice way to post stuff you don't want blocking
// shutdown. For example, you might be doing a slow DNS lookup and if it's
// blocked on the OS, you may not want to stop shutdown, since the result
// doesn't really matter at that point.
//
// However, you need to be very careful what you do in your callback when you
// use this option. Since the thread will continue to run until the OS
// terminates the process, the app can be in the process of tearing down when
// you're running. This means any singletons or global objects you use may
// suddenly become invalid out from under you. For this reason, it's best to
// use this only for slow but simple operations like the DNS example.
CONTINUE_ON_SHUTDOWN,
// Tasks posted with this mode that have not started executing at
// shutdown will never run. However, any task that has already begun
// executing when shutdown is invoked will be allowed to continue and
// will block shutdown until completion.
//
// Note: Because ThreadPoolInstance::Shutdown() may block while these tasks
// are executing, care must be taken to ensure that they do not block on the
// thread that called ThreadPoolInstance::Shutdown(), as this may lead to
// deadlock.
SKIP_ON_SHUTDOWN,
// Tasks posted with this mode before shutdown is complete will block shutdown
// until they're executed. Generally, this should be used only to save
// critical user data.
//
// Note: Background threads will be promoted to normal threads at shutdown
// (i.e. TaskPriority::BEST_EFFORT + TaskShutdownBehavior::BLOCK_SHUTDOWN will
// resolve without a priority inversion).
BLOCK_SHUTDOWN,
};
// Determines at which thread priority a task may run.
//
// ThreadPolicy and priority updates
// ---------------------------------
//
// If the TaskPriority of an UpdateableSequencedTaskRunner is increased while
// one of its tasks is running at background thread priority, the task's
// execution will have to complete at background thread priority (may take a
// long time) before the next task can be scheduled with the new TaskPriority.
// If it is important that priority increases take effect quickly,
// MUST_USE_FOREGROUND should be used to prevent the tasks from running at
// background thread priority. If it is important to minimize impact on the
// rest on the system when the TaskPriority is BEST_EFFORT, PREFER_BACKGROUND
// should be used.
//
// ThreadPolicy and priority inversions
// ------------------------------------
//
// A priority inversion occurs when a task running at background thread
// priority is descheduled while holding a resource needed by a thread of
// higher priority. MUST_USE_FOREGROUND can be combined with BEST_EFFORT to
// indicate that a task has a low priority, but shouldn't run at background
// thread priority in order to avoid priority inversions. Please consult with
// //base/task/OWNERS if you suspect a priority inversion.
enum class ThreadPolicy : uint8_t {
// The task runs at background thread priority if:
// - The TaskPriority is BEST_EFFORT.
// - Background thread priority is supported by the platform (see
// environment_config_unittest.cc).
// - No extension trait (e.g. BrowserThread) is used.
// Otherwise, it runs at normal thread priority.
PREFER_BACKGROUND,
// The task runs at normal thread priority, irrespective of its TaskPriority.
MUST_USE_FOREGROUND
};
// Tasks with this trait may block. This includes but is not limited to tasks
// that wait on synchronous file I/O operations: read or write a file from disk,
// interact with a pipe or a socket, rename or delete a file, enumerate files in
// a directory, etc. This trait isn't required for the mere use of locks. For
// tasks that block on base/ synchronization primitives, see the
// WithBaseSyncPrimitives trait.
struct MayBlock {};
// DEPRECATED. Use base::ScopedAllowBaseSyncPrimitives(ForTesting) instead.
//
// Tasks with this trait will pass base::AssertBaseSyncPrimitivesAllowed(), i.e.
// will be allowed on the following methods :
// - base::WaitableEvent::Wait
// - base::ConditionVariable::Wait
// - base::PlatformThread::Join
// - base::PlatformThread::Sleep
// - base::Process::WaitForExit
// - base::Process::WaitForExitWithTimeout
//
// Tasks should generally not use these methods.
//
// Instead of waiting on a WaitableEvent or a ConditionVariable, put the work
// that should happen after the wait in a callback and post that callback from
// where the WaitableEvent or ConditionVariable would have been signaled. If
// something needs to be scheduled after many tasks have executed, use
// base::BarrierClosure.
//
// On Windows, join processes asynchronously using base::win::ObjectWatcher.
//
// MayBlock() must be specified in conjunction with this trait if and only if
// removing usage of methods listed above in the labeled tasks would still
// result in tasks that may block (per MayBlock()'s definition).
//
// In doubt, consult with //base/task/OWNERS.
struct WithBaseSyncPrimitives {};
// Describes metadata for a single task or a group of tasks.
class BASE_EXPORT TaskTraits {
public:
// ValidTrait ensures TaskTraits' constructor only accepts appropriate types.
struct ValidTrait {
ValidTrait(TaskPriority);
ValidTrait(TaskShutdownBehavior);
ValidTrait(ThreadPolicy);
ValidTrait(MayBlock);
ValidTrait(WithBaseSyncPrimitives);
ValidTrait(ThreadPool);
};
// Invoking this constructor without arguments produces default TaskTraits
// that are appropriate for tasks that
// (1) don't block (ref. MayBlock() and WithBaseSyncPrimitives()),
// (2) pertain to user-blocking activity,
// (explicitly or implicitly by having an ordering dependency with a
// component that does)
// (3) can either block shutdown or be skipped on shutdown
// (the task recipient is free to choose a fitting default).
//
// To get TaskTraits for tasks that have more precise traits: provide any
// combination of ValidTrait's as arguments to this constructor.
//
// Note: When posting to well-known threads (e.g. UI/IO), default traits are
// almost always what you want unless you know for sure the task being posted
// has no explicit/implicit ordering dependency with anything else running at
// default (USER_BLOCKING) priority.
//
// E.g.
// constexpr base::TaskTraits default_traits = {};
// constexpr base::TaskTraits user_visible_traits = {
// base::TaskPriority::USER_VISIBLE};
// constexpr base::TaskTraits user_visible_may_block_traits = {
// base::TaskPriority::USER_VISIBLE, base::MayBlock()
// };
// constexpr base::TaskTraits other_user_visible_may_block_traits = {
// base::MayBlock(), base::TaskPriority::USER_VISIBLE
// };
template <class... ArgTypes,
class CheckArgumentsAreValid = std::enable_if_t<
trait_helpers::AreValidTraits<ValidTrait, ArgTypes...>::value ||
trait_helpers::AreValidTraitsForExtension<ArgTypes...>::value>>
constexpr TaskTraits(ArgTypes... args)
: extension_(trait_helpers::GetTaskTraitsExtension(
trait_helpers::AreValidTraits<ValidTrait, ArgTypes...>{},
args...)),
priority_(
trait_helpers::GetEnum<TaskPriority, TaskPriority::USER_BLOCKING>(
args...)),
shutdown_behavior_(
static_cast<uint8_t>(
trait_helpers::GetEnum<TaskShutdownBehavior,
TaskShutdownBehavior::SKIP_ON_SHUTDOWN>(
args...)) |
(trait_helpers::HasTrait<TaskShutdownBehavior, ArgTypes...>()
? kIsExplicitFlag
: 0)),
thread_policy_(
static_cast<uint8_t>(
trait_helpers::GetEnum<ThreadPolicy,
ThreadPolicy::PREFER_BACKGROUND>(
args...)) |
(trait_helpers::HasTrait<ThreadPolicy, ArgTypes...>()
? kIsExplicitFlag
: 0)),
may_block_(trait_helpers::HasTrait<MayBlock, ArgTypes...>()),
with_base_sync_primitives_(
trait_helpers::HasTrait<WithBaseSyncPrimitives, ArgTypes...>()),
use_thread_pool_(trait_helpers::HasTrait<ThreadPool, ArgTypes...>()) {}
constexpr TaskTraits(const TaskTraits& other) = default;
TaskTraits& operator=(const TaskTraits& other) = default;
// TODO(eseckler): Default the comparison operator once C++20 arrives.
bool operator==(const TaskTraits& other) const {
static_assert(sizeof(TaskTraits) == 15,
"Update comparison operator when TaskTraits change");
return extension_ == other.extension_ && priority_ == other.priority_ &&
shutdown_behavior_ == other.shutdown_behavior_ &&
thread_policy_ == other.thread_policy_ &&
may_block_ == other.may_block_ &&
with_base_sync_primitives_ == other.with_base_sync_primitives_ &&
use_thread_pool_ == other.use_thread_pool_;
}
// Sets the priority of tasks with these traits to |priority|.
void UpdatePriority(TaskPriority priority) { priority_ = priority; }
// Returns the priority of tasks with these traits.
constexpr TaskPriority priority() const { return priority_; }
// Returns true if the shutdown behavior was set explicitly.
constexpr bool shutdown_behavior_set_explicitly() const {
return shutdown_behavior_ & kIsExplicitFlag;
}
// Returns the shutdown behavior of tasks with these traits.
constexpr TaskShutdownBehavior shutdown_behavior() const {
return static_cast<TaskShutdownBehavior>(shutdown_behavior_ &
~kIsExplicitFlag);
}
// Returns true if the thread policy was set explicitly.
constexpr bool thread_policy_set_explicitly() const {
return thread_policy_ & kIsExplicitFlag;
}
// Returns the thread policy of tasks with these traits.
constexpr ThreadPolicy thread_policy() const {
return static_cast<ThreadPolicy>(thread_policy_ & ~kIsExplicitFlag);
}
// Returns true if tasks with these traits may block.
constexpr bool may_block() const { return may_block_; }
// Returns true if tasks with these traits may use base/ sync primitives.
constexpr bool with_base_sync_primitives() const {
return with_base_sync_primitives_;
}
// Returns true if tasks with these traits execute on the thread pool.
constexpr bool use_thread_pool() const { return use_thread_pool_; }
uint8_t extension_id() const { return extension_.extension_id; }
// Access the extension data by parsing it into the provided extension type.
// See task_traits_extension.h for requirements on the extension type.
template <class TaskTraitsExtension>
const TaskTraitsExtension GetExtension() const {
DCHECK_EQ(TaskTraitsExtension::kExtensionId, extension_.extension_id);
return TaskTraitsExtension::Parse(extension_);
}
private:
friend PostTaskAndroid;
// For use by PostTaskAndroid.
TaskTraits(TaskPriority priority,
bool may_block,
bool use_thread_pool,
TaskTraitsExtensionStorage extension)
: extension_(extension),
priority_(priority),
shutdown_behavior_(
static_cast<uint8_t>(TaskShutdownBehavior::SKIP_ON_SHUTDOWN)),
thread_policy_(static_cast<uint8_t>(ThreadPolicy::PREFER_BACKGROUND)),
may_block_(may_block),
with_base_sync_primitives_(false),
use_thread_pool_(use_thread_pool) {
static_assert(sizeof(TaskTraits) == 15, "Keep this constructor up to date");
// Java is expected to provide an explicit destination. See TODO in
// TaskTraits.java to move towards API-as-a-destination there as well.
const bool has_extension =
(extension_.extension_id !=
TaskTraitsExtensionStorage::kInvalidExtensionId);
DCHECK(use_thread_pool_ ^ has_extension)
<< "Traits must explicitly specify a destination (e.g. ThreadPool or a "
"named thread like BrowserThread)";
}
// This bit is set in |priority_|, |shutdown_behavior_| and |thread_policy_|
// when the value was set explicitly.
static constexpr uint8_t kIsExplicitFlag = 0x80;
// Ordered for packing.
TaskTraitsExtensionStorage extension_;
TaskPriority priority_;
uint8_t shutdown_behavior_;
uint8_t thread_policy_;
bool may_block_;
bool with_base_sync_primitives_;
bool use_thread_pool_;
};
// Returns string literals for the enums defined in this file. These methods
// should only be used for tracing and debugging.
BASE_EXPORT const char* TaskPriorityToString(TaskPriority task_priority);
BASE_EXPORT const char* TaskShutdownBehaviorToString(
TaskShutdownBehavior task_priority);
// Stream operators so that the enums defined in this file can be used in
// DCHECK and EXPECT statements.
BASE_EXPORT std::ostream& operator<<(std::ostream& os,
const TaskPriority& shutdown_behavior);
BASE_EXPORT std::ostream& operator<<(
std::ostream& os,
const TaskShutdownBehavior& shutdown_behavior);
} // namespace base
#endif // BASE_TASK_TASK_TRAITS_H_

View file

@ -0,0 +1,235 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TASK_TASK_TRAITS_EXTENSION_H_
#define BASE_TASK_TASK_TRAITS_EXTENSION_H_
#include <stdint.h>
#include <array>
#include <tuple>
#include <utility>
#include "base/base_export.h"
#include "base/traits_bag.h"
namespace base {
// Embedders can attach additional traits to a TaskTraits object in a way that
// is opaque to base. These extension traits can then be specified along the
// base traits when constructing the TaskTraits object. They are then stored and
// propagated with the TaskTraits object.
//
// To support constexpr-compatible construction, extension traits are stored in
// a fixed-size byte array in the TaskTraits object and serialized into and
// parsed from this storage by an embedder-provided extension class and
// MakeTaskTraitsExtension() template function. The embedder can later access
// the extension traits via TaskTraits::GetExtension<[ExtensionClass]>().
//
// A TaskTraits extension class needs to specify publicly:
// (1) -- static constexpr uint8_t kExtensionId.
// This field's value identifies the type of the extension uniquely within
// each process. The embedder is responsible for ensuring uniqueness and
// can assign values between kFirstEmbedderExtensionId and kMaxExtensionId
// of TaskTraitsExtensionStorage::ExtensionId.
// (2) -- static const [ExtensionClass] Parse(
// -- const base::TaskTraitsExtensionStorage& extension).
// Parses and constructs an extension object from the provided storage.
//
// For each TaskTraits extension class, the embedder has to provide a
// corresponding MakeTaskTraitsExtension definition inside the same namespace
// as its extension traits:
// (3) -- template <...>
// -- constexpr base::TaskTraitsExtensionStorage MakeTaskTraitsExtension(
// -- ArgTypes... args).
// Constructs and serializes an extension with the given arguments into
// a TaskTraitsExtensionStorage and returns it. When the extension is used,
// all traits, including the base ones, are passed to this function in
// order make sure TaskTraits constructor only participates in overload
// resolution if all traits are valid. As such, this function should only
// accept valid task traits recognised by the extension and the base task
// traits.
//
// EXAMPLE (see also base/task/test_task_traits_extension.h):
// --------
//
// namespace my_embedder {
// enum class MyExtensionTrait {kMyValue1, kMyValue2};
//
// class MyTaskTraitsExtension {
// public:
// static constexpr uint8_t kExtensionId =
// TaskTraitsExtensionStorage::kFirstEmbedderExtensionId;
//
// struct ValidTrait : public TaskTraits::ValidTrait {
// // Accept base traits in MakeTaskTraitsExtension (see above).
// using TaskTraits::ValidTrait::ValidTrait;
//
// ValidTrait(MyExtensionTrait);
// };
//
// using MyExtensionTraitFilter =
// trait_helpers::EnumTraitFilter<MyExtensionTrait, MyExtensionTrait::kA>;
//
// // Constructor that accepts only valid traits as specified by ValidTraits.
// template <class... ArgTypes,
// class CheckArgumentsAreValid = std::enable_if_t<
// base::trait_helpers::AreValidTraits<
// ValidTrait, ArgTypes...>::value>>
// constexpr MyTaskTraitsExtension(ArgTypes... args)
// : my_trait_(trait_helpers::GetTraitFromArgList<MyExtensionTraitFilter>(
// args...)) {}
//
// // Serializes MyTaskTraitsExtension into a storage object and returns it.
// constexpr base::TaskTraitsExtensionStorage Serialize() const {
// // Note: can't use reinterpret_cast or placement new because neither are
// // constexpr-compatible.
// return {kExtensionId, {{static_cast<uint8_t>(my_trait_)}}};
// }
//
// // Creates a MyTaskTraitsExtension by parsing it from a storage object.
// static const MyTaskTraitsExtension Parse(
// const base::TaskTraitsExtensionStorage& extension) {
// return MyTaskTraitsExtension(
// static_cast<MyExtensionTrait>(extension.data[0]));
// }
//
// constexpr MyExtensionTrait my_trait() const { return my_trait_; }
//
// private:
// MyExtensionTrait my_trait_;
// };
//
// // Creates a MyTaskTraitsExtension for the provided |args| and serializes it
// // into |extension|. Accepts only valid arguments for the
// // MyTaskTraitsExtension() constructor.
// template <class... ArgTypes,
// class = std::enable_if_t<
// base::trait_helpers::AreValidTraits<
// MyTaskTraitsExtension::ValidTrait, ArgTypes...>::value>>
// constexpr base::TaskTraitsExtensionStorage MakeTaskTraitsExtension(
// ArgTypes... args) {
// return MyTaskTraitsExtension(args...).Serialize();
// }
// } // namespace my_embedder
//
// // Construction of TaskTraits with extension traits.
// constexpr TaskTraits t1 = {my_embedder::MyExtensionTrait::kValueB};
// constexpr TaskTraits t2 = {base::MayBlock(),
// my_embedder::MyExtensionTrait::kValueA};
//
// // Extension traits can also be specified directly when posting a task.
// base::PostTask(FROM_HERE,
// {my_embedder::MyExtensionTrait::kValueB},
// base::BindOnce(...));
// Stores extension traits opaquely inside a fixed-size data array. We store
// this data directly (rather than in a separate object on the heap) to support
// constexpr-compatible TaskTraits construction.
struct BASE_EXPORT TaskTraitsExtensionStorage {
// Size in bytes.
// Keep in sync with org.chromium.base.task.TaskTraits.EXTENSION_STORAGE_SIZE
static constexpr size_t kStorageSize = 8;
inline constexpr TaskTraitsExtensionStorage();
inline constexpr TaskTraitsExtensionStorage(
uint8_t extension_id_in,
const std::array<uint8_t, kStorageSize>& data_in);
inline constexpr TaskTraitsExtensionStorage(
uint8_t extension_id_in,
std::array<uint8_t, kStorageSize>&& data_in);
inline constexpr TaskTraitsExtensionStorage(
const TaskTraitsExtensionStorage& other);
inline TaskTraitsExtensionStorage& operator=(
const TaskTraitsExtensionStorage& other) = default;
inline bool operator==(const TaskTraitsExtensionStorage& other) const;
enum ExtensionId : uint8_t {
// Keep in sync with org.chromium.base.task.TaskTraits.INVALID_EXTENSION_ID
kInvalidExtensionId = 0,
// The embedder is responsible for assigning the remaining values uniquely.
kFirstEmbedderExtensionId = 1,
// Maximum number of extension types is artificially limited to support
// super efficient TaskExecutor lookup in post_task.cc.
// Keep in sync with org.chromium.base.TaskTraits.MAX_EXTENSION_ID
kMaxExtensionId = 4
};
// Identifies the type of extension. See ExtensionId enum above.
uint8_t extension_id;
// Serialized extension data.
std::array<uint8_t, kStorageSize> data;
};
// TODO(https://crbug.com/874482): These constructors need to be "inline" but
// defined outside the class above, because the chromium-style clang plugin
// doesn't exempt constexpr constructors at the moment.
inline constexpr TaskTraitsExtensionStorage::TaskTraitsExtensionStorage()
: extension_id(kInvalidExtensionId), data{} {}
inline constexpr TaskTraitsExtensionStorage::TaskTraitsExtensionStorage(
uint8_t extension_id_in,
const std::array<uint8_t, kStorageSize>& data_in)
: extension_id(extension_id_in), data(data_in) {}
inline constexpr TaskTraitsExtensionStorage::TaskTraitsExtensionStorage(
uint8_t extension_id_in,
std::array<uint8_t, kStorageSize>&& data_in)
: extension_id(extension_id_in), data(std::move(data_in)) {}
inline constexpr TaskTraitsExtensionStorage::TaskTraitsExtensionStorage(
const TaskTraitsExtensionStorage& other) = default;
namespace trait_helpers {
// Helper class whose constructor tests if an extension accepts a list of
// argument types.
struct TaskTraitsExtension {
template <class... ArgTypes,
class CheckCanMakeExtension =
decltype(MakeTaskTraitsExtension(std::declval<ArgTypes>()...))>
constexpr TaskTraitsExtension(ArgTypes... args) {}
};
// Tests that that a trait extension accepts all |ArgsTypes...|.
template <class... ArgTypes>
using AreValidTraitsForExtension =
std::is_constructible<TaskTraitsExtension, ArgTypes...>;
// Helper function that returns the TaskTraitsExtensionStorage of a
// serialized extension created with |args...| if there are arguments that are
// not valid base traits, or a default constructed TaskTraitsExtensionStorage
// otherwise.
template <class... ArgTypes>
constexpr TaskTraitsExtensionStorage GetTaskTraitsExtension(
std::true_type base_traits,
ArgTypes... args) {
return TaskTraitsExtensionStorage();
}
template <class... ArgTypes>
constexpr TaskTraitsExtensionStorage GetTaskTraitsExtension(
std::false_type base_traits,
ArgTypes... args) {
return MakeTaskTraitsExtension(args...);
}
} // namespace trait_helpers
// TODO(eseckler): Default the comparison operator once C++20 arrives.
inline bool TaskTraitsExtensionStorage::operator==(
const TaskTraitsExtensionStorage& other) const {
static_assert(
9 == sizeof(TaskTraitsExtensionStorage),
"Update comparison operator when TaskTraitsExtensionStorage changes");
return extension_id == other.extension_id && data == other.data;
}
} // namespace base
#endif // BASE_TASK_TASK_TRAITS_EXTENSION_H_

View file

@ -0,0 +1,12 @@
// Copyright (c) 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/task/test_task_traits_extension.h"
namespace base {
// static
constexpr uint8_t TestTaskTraitsExtension::kExtensionId;
} // namespace base

View file

@ -0,0 +1,74 @@
// Copyright (c) 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TASK_TEST_TASK_TRAITS_EXTENSION_H_
#define BASE_TASK_TEST_TASK_TRAITS_EXTENSION_H_
#include <utility>
#include "base/task/task_traits.h"
namespace base {
enum class TestExtensionEnumTrait { kA, kB, kC };
struct TestExtensionBoolTrait {};
// Example TaskTraits extension for use in tests.
class TestTaskTraitsExtension {
public:
static constexpr uint8_t kExtensionId =
TaskTraitsExtensionStorage::kFirstEmbedderExtensionId;
struct ValidTrait : public TaskTraits::ValidTrait {
using TaskTraits::ValidTrait::ValidTrait;
ValidTrait(TestExtensionEnumTrait);
ValidTrait(TestExtensionBoolTrait);
};
template <class... ArgTypes,
class CheckArgumentsAreValid = std::enable_if_t<
trait_helpers::AreValidTraits<ValidTrait, ArgTypes...>::value>>
constexpr TestTaskTraitsExtension(ArgTypes... args)
: enum_trait_(
trait_helpers::GetEnum<TestExtensionEnumTrait,
TestExtensionEnumTrait::kA>(args...)),
bool_trait_(
trait_helpers::HasTrait<TestExtensionBoolTrait, ArgTypes...>()) {}
constexpr TaskTraitsExtensionStorage Serialize() const {
return {kExtensionId, {{static_cast<uint8_t>(enum_trait_), bool_trait_}}};
}
static const TestTaskTraitsExtension Parse(
const TaskTraitsExtensionStorage& extension) {
if (extension.data[1]) {
return TestTaskTraitsExtension(
static_cast<TestExtensionEnumTrait>(extension.data[0]),
TestExtensionBoolTrait());
} else {
return TestTaskTraitsExtension(
static_cast<TestExtensionEnumTrait>(extension.data[0]));
}
}
constexpr TestExtensionEnumTrait enum_trait() const { return enum_trait_; }
constexpr bool bool_trait() const { return bool_trait_; }
private:
TestExtensionEnumTrait enum_trait_;
bool bool_trait_;
};
template <class... ArgTypes,
class = std::enable_if_t<
trait_helpers::AreValidTraits<TestTaskTraitsExtension::ValidTrait,
ArgTypes...>::value>>
constexpr TaskTraitsExtensionStorage MakeTaskTraitsExtension(ArgTypes... args) {
return TestTaskTraitsExtension(std::forward<ArgTypes>(args)...).Serialize();
}
} // namespace base
#endif // BASE_TASK_TEST_TASK_TRAITS_EXTENSION_H_

View file

@ -0,0 +1,126 @@
// Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/task/thread_pool.h"
#include "base/logging.h"
#include "base/task/scoped_set_task_priority_for_current_thread.h"
#include "base/task/task_traits.h"
#include "base/task/thread_pool/thread_pool_impl.h"
#include "base/task/thread_pool/thread_pool_instance.h"
#include "base/threading/post_task_and_reply_impl.h"
namespace base {
namespace {
class PostTaskAndReplyWithTraitsTaskRunner
: public internal::PostTaskAndReplyImpl {
public:
explicit PostTaskAndReplyWithTraitsTaskRunner(const TaskTraits& traits)
: traits_(traits) {}
private:
bool PostTask(const Location& from_here, OnceClosure task) override {
ThreadPool::PostTask(from_here, traits_, std::move(task));
return true;
}
const TaskTraits traits_;
};
internal::ThreadPoolImpl* GetThreadPoolImpl() {
auto* instance = ThreadPoolInstance::Get();
DCHECK(instance)
<< "Ref. Prerequisite section of base/task/thread_pool.h.\n"
"Hint: if this is in a unit test, you're likely merely missing a "
"base::test::TaskEnvironment member in your fixture (or your fixture "
"is using a base::test::SingleThreadTaskEnvironment and now needs a "
"full base::test::TaskEnvironment).\n";
return static_cast<internal::ThreadPoolImpl*>(instance);
}
} // namespace
// static
bool ThreadPool::PostTask(const Location& from_here, OnceClosure task) {
return ThreadPool::PostDelayedTask(from_here, std::move(task), TimeDelta());
}
// static
bool ThreadPool::PostDelayedTask(const Location& from_here,
OnceClosure task,
TimeDelta delay) {
return ThreadPool::PostDelayedTask(from_here, {}, std::move(task), delay);
}
// static
bool ThreadPool::PostTaskAndReply(const Location& from_here,
OnceClosure task,
OnceClosure reply) {
return ThreadPool::PostTaskAndReply(from_here, {}, std::move(task),
std::move(reply));
}
// static
bool ThreadPool::PostTask(const Location& from_here,
const TaskTraits& traits,
OnceClosure task) {
return ThreadPool::PostDelayedTask(from_here, traits, std::move(task),
TimeDelta());
}
// static
bool ThreadPool::PostDelayedTask(const Location& from_here,
const TaskTraits& traits,
OnceClosure task,
TimeDelta delay) {
return GetThreadPoolImpl()->PostDelayedTask(from_here, traits,
std::move(task), delay);
}
// static
bool ThreadPool::PostTaskAndReply(const Location& from_here,
const TaskTraits& traits,
OnceClosure task,
OnceClosure reply) {
return PostTaskAndReplyWithTraitsTaskRunner(traits).PostTaskAndReply(
from_here, std::move(task), std::move(reply));
}
// static
scoped_refptr<TaskRunner> ThreadPool::CreateTaskRunner(
const TaskTraits& traits) {
return GetThreadPoolImpl()->CreateTaskRunner(traits);
}
// static
scoped_refptr<SequencedTaskRunner> ThreadPool::CreateSequencedTaskRunner(
const TaskTraits& traits) {
return GetThreadPoolImpl()->CreateSequencedTaskRunner(traits);
}
// static
scoped_refptr<UpdateableSequencedTaskRunner>
ThreadPool::CreateUpdateableSequencedTaskRunner(const TaskTraits& traits) {
return GetThreadPoolImpl()->CreateUpdateableSequencedTaskRunner(traits);
}
// static
scoped_refptr<SingleThreadTaskRunner> ThreadPool::CreateSingleThreadTaskRunner(
const TaskTraits& traits,
SingleThreadTaskRunnerThreadMode thread_mode) {
return GetThreadPoolImpl()->CreateSingleThreadTaskRunner(traits, thread_mode);
}
#if defined(OS_WIN)
// static
scoped_refptr<SingleThreadTaskRunner> ThreadPool::CreateCOMSTATaskRunner(
const TaskTraits& traits,
SingleThreadTaskRunnerThreadMode thread_mode) {
return GetThreadPoolImpl()->CreateCOMSTATaskRunner(traits, thread_mode);
}
#endif // defined(OS_WIN)
} // namespace base

View file

@ -0,0 +1,250 @@
// Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TASK_THREAD_POOL_H_
#define BASE_TASK_THREAD_POOL_H_
#include <memory>
#include <utility>
namespace base {
// TODO(gab): thread_pool.h should include task_traits.h but it can't during the
// migration because task_traits.h has to include thread_pool.h to get the old
// base::ThreadPool() trait constructor and that would create a circular
// dependency. Some of the includes below result in an extended version of this
// circular dependency. These forward-declarations are temporarily required for
// the duration of the migration.
enum class TaskPriority : uint8_t;
enum class TaskShutdownBehavior : uint8_t;
enum class ThreadPolicy : uint8_t;
struct MayBlock;
struct WithBaseSyncPrimitives;
class TaskTraits;
// UpdateableSequencedTaskRunner is part of this dance too because
// updateable_sequenced_task_runner.h includes task_traits.h
class UpdateableSequencedTaskRunner;
} // namespace base
#include "base/base_export.h"
#include "base/bind.h"
#include "base/callback.h"
#include "base/callback_helpers.h"
#include "base/location.h"
#include "base/memory/scoped_refptr.h"
#include "base/post_task_and_reply_with_result_internal.h"
#include "base/sequenced_task_runner.h"
#include "base/single_thread_task_runner.h"
#include "base/task/single_thread_task_runner_thread_mode.h"
#include "base/task_runner.h"
#include "base/time/time.h"
#include "build/build_config.h"
namespace base {
// This is the interface to post tasks to base's thread pool.
//
// To post a simple one-off task with default traits:
// base::ThreadPool::PostTask(FROM_HERE, base::BindOnce(...));
//
// To post a high priority one-off task to respond to a user interaction:
// base::ThreadPool::PostTask(
// FROM_HERE,
// {base::TaskPriority::USER_BLOCKING},
// base::BindOnce(...));
//
// To post tasks that must run in sequence with default traits:
// scoped_refptr<SequencedTaskRunner> task_runner =
// base::ThreadPool::CreateSequencedTaskRunner();
// task_runner->PostTask(FROM_HERE, base::BindOnce(...));
// task_runner->PostTask(FROM_HERE, base::BindOnce(...));
//
// To post tasks that may block, must run in sequence and can be skipped on
// shutdown:
// scoped_refptr<SequencedTaskRunner> task_runner =
// base::ThreadPool::CreateSequencedTaskRunner(
// {MayBlock(), TaskShutdownBehavior::SKIP_ON_SHUTDOWN});
// task_runner->PostTask(FROM_HERE, base::BindOnce(...));
// task_runner->PostTask(FROM_HERE, base::BindOnce(...));
//
// The default traits apply to tasks that:
// (1) don't block (ref. MayBlock() and WithBaseSyncPrimitives()),
// (2) prefer inheriting the current priority to specifying their own, and
// (3) can either block shutdown or be skipped on shutdown
// (implementation is free to choose a fitting default).
// Explicit traits must be specified for tasks for which these loose
// requirements are not sufficient.
//
// Prerequisite: A ThreadPoolInstance must have been registered for the current
// process via ThreadPoolInstance::Set() before the API below can be invoked.
// This is typically done during the initialization phase in each process. If
// your code is not running in that phase, you most likely don't have to worry
// about this. You will encounter DCHECKs or nullptr dereferences if this is
// violated. For tests, use base::test::TaskEnvironment.
class BASE_EXPORT ThreadPool {
public:
// base::ThreadPool is meant to be a static API. Do not use this constructor
// in new code! It is a temporary hack to support the old base::ThreadPool()
// trait during the migration to static base::ThreadPool:: APIs.
// Tasks and task runners with this trait will run in the thread pool,
// concurrently with tasks on other task runners. If you need mutual exclusion
// between tasks, see base::ThreadPool::CreateSequencedTaskRunner.
ThreadPool() = default;
// Equivalent to calling PostTask with default TaskTraits.
static bool PostTask(const Location& from_here, OnceClosure task);
inline static bool PostTask(OnceClosure task,
const Location& from_here = Location::Current()) {
return PostTask(from_here, std::move(task));
}
// Equivalent to calling PostDelayedTask with default TaskTraits.
//
// Use PostDelayedTask to specify a BEST_EFFORT priority if the task doesn't
// have to run as soon as |delay| expires.
static bool PostDelayedTask(const Location& from_here,
OnceClosure task,
TimeDelta delay);
// Equivalent to calling PostTaskAndReply with default TaskTraits.
static bool PostTaskAndReply(const Location& from_here,
OnceClosure task,
OnceClosure reply);
// Equivalent to calling PostTaskAndReplyWithResult with default TaskTraits.
//
// Though RepeatingCallback is convertible to OnceCallback, we need a
// CallbackType template since we can not use template deduction and object
// conversion at once on the overload resolution.
// TODO(crbug.com/714018): Update all callers of the RepeatingCallback version
// to use OnceCallback and remove the CallbackType template.
template <template <typename> class CallbackType,
typename TaskReturnType,
typename ReplyArgType,
typename = EnableIfIsBaseCallback<CallbackType>>
static bool PostTaskAndReplyWithResult(
const Location& from_here,
CallbackType<TaskReturnType()> task,
CallbackType<void(ReplyArgType)> reply) {
return ThreadPool::PostTaskAndReplyWithResult(from_here, std::move(task),
std::move(reply));
}
// Posts |task| with specific |traits|. Returns false if the task definitely
// won't run because of current shutdown state.
static bool PostTask(const Location& from_here,
const TaskTraits& traits,
OnceClosure task);
// Posts |task| with specific |traits|. |task| will not run before |delay|
// expires. Returns false if the task definitely won't run because of current
// shutdown state.
//
// Specify a BEST_EFFORT priority via |traits| if the task doesn't have to run
// as soon as |delay| expires.
static bool PostDelayedTask(const Location& from_here,
const TaskTraits& traits,
OnceClosure task,
TimeDelta delay);
// Posts |task| with specific |traits| and posts |reply| on the caller's
// execution context (i.e. same sequence or thread and same TaskTraits if
// applicable) when |task| completes. Returns false if the task definitely
// won't run because of current shutdown state. Can only be called when
// SequencedTaskRunnerHandle::IsSet().
static bool PostTaskAndReply(const Location& from_here,
const TaskTraits& traits,
OnceClosure task,
OnceClosure reply);
// Posts |task| with specific |traits| and posts |reply| with the return value
// of |task| as argument on the caller's execution context (i.e. same sequence
// or thread and same TaskTraits if applicable) when |task| completes. Returns
// false if the task definitely won't run because of current shutdown state.
// Can only be called when SequencedTaskRunnerHandle::IsSet().
//
// Though RepeatingCallback is convertible to OnceCallback, we need a
// CallbackType template since we can not use template deduction and object
// conversion at once on the overload resolution.
// TODO(crbug.com/714018): Update all callers of the RepeatingCallback version
// to use OnceCallback and remove the CallbackType template.
template <template <typename> class CallbackType,
typename TaskReturnType,
typename ReplyArgType,
typename = EnableIfIsBaseCallback<CallbackType>>
static bool PostTaskAndReplyWithResult(
const Location& from_here,
const TaskTraits& traits,
CallbackType<TaskReturnType()> task,
CallbackType<void(ReplyArgType)> reply) {
auto* result = new std::unique_ptr<TaskReturnType>();
return PostTaskAndReply(
from_here, traits,
BindOnce(&internal::ReturnAsParamAdapter<TaskReturnType>,
std::move(task), result),
BindOnce(&internal::ReplyAdapter<TaskReturnType, ReplyArgType>,
std::move(reply), Owned(result)));
}
// Returns a TaskRunner whose PostTask invocations result in scheduling tasks
// using |traits|. Tasks may run in any order and in parallel.
static scoped_refptr<TaskRunner> CreateTaskRunner(const TaskTraits& traits);
// Returns a SequencedTaskRunner whose PostTask invocations result in
// scheduling tasks using |traits|. Tasks run one at a time in posting order.
static scoped_refptr<SequencedTaskRunner> CreateSequencedTaskRunner(
const TaskTraits& traits);
// Returns a task runner whose PostTask invocations result in scheduling tasks
// using |traits|. The priority in |traits| can be updated at any time via
// UpdateableSequencedTaskRunner::UpdatePriority(). An update affects all
// tasks posted to the task runner that aren't running yet. Tasks run one at a
// time in posting order.
//
// |traits| requirements:
// - base::ThreadPolicy must be specified if the priority of the task runner
// will ever be increased from BEST_EFFORT.
static scoped_refptr<UpdateableSequencedTaskRunner>
CreateUpdateableSequencedTaskRunner(const TaskTraits& traits);
// Returns a SingleThreadTaskRunner whose PostTask invocations result in
// scheduling tasks using |traits| on a thread determined by |thread_mode|.
// See base/task/single_thread_task_runner_thread_mode.h for |thread_mode|
// details. If |traits| identifies an existing thread,
// SingleThreadTaskRunnerThreadMode::SHARED must be used. Tasks run on a
// single thread in posting order.
//
// If all you need is to make sure that tasks don't run concurrently (e.g.
// because they access a data structure which is not thread-safe), use
// CreateSequencedTaskRunner(). Only use this if you rely on a thread-affine
// API (it might be safer to assume thread-affinity when dealing with
// under-documented third-party APIs, e.g. other OS') or share data across
// tasks using thread-local storage.
static scoped_refptr<SingleThreadTaskRunner> CreateSingleThreadTaskRunner(
const TaskTraits& traits,
SingleThreadTaskRunnerThreadMode thread_mode =
SingleThreadTaskRunnerThreadMode::SHARED);
#if defined(OS_WIN)
// Returns a SingleThreadTaskRunner whose PostTask invocations result in
// scheduling tasks using |traits| in a COM Single-Threaded Apartment on a
// thread determined by |thread_mode|. See
// base/task/single_thread_task_runner_thread_mode.h for |thread_mode|
// details. If |traits| identifies an existing thread,
// SingleThreadTaskRunnerThreadMode::SHARED must be used. Tasks run in the
// same Single-Threaded Apartment in posting order for the returned
// SingleThreadTaskRunner. There is not necessarily a one-to-one
// correspondence between SingleThreadTaskRunners and Single-Threaded
// Apartments. The implementation is free to share apartments or create new
// apartments as necessary. In either case, care should be taken to make sure
// COM pointers are not smuggled across apartments.
static scoped_refptr<SingleThreadTaskRunner> CreateCOMSTATaskRunner(
const TaskTraits& traits,
SingleThreadTaskRunnerThreadMode thread_mode =
SingleThreadTaskRunnerThreadMode::SHARED);
#endif // defined(OS_WIN)
};
} // namespace base
#endif // BASE_TASK_THREAD_POOL_H_

View file

@ -0,0 +1,7 @@
etiennep@chromium.org
fdoray@chromium.org
gab@chromium.org
robliao@chromium.org
# TEAM: scheduler-dev@chromium.org
# COMPONENT: Internals>ThreadPool

View file

@ -0,0 +1,159 @@
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/task/thread_pool/delayed_task_manager.h"
#include <algorithm>
#include "base/bind.h"
#include "base/logging.h"
#include "base/sequenced_task_runner.h"
#include "base/task/post_task.h"
#include "base/task/thread_pool/task.h"
#include "base/task_runner.h"
namespace base {
namespace internal {
DelayedTaskManager::DelayedTask::DelayedTask() = default;
DelayedTaskManager::DelayedTask::DelayedTask(
Task task,
PostTaskNowCallback callback,
scoped_refptr<TaskRunner> task_runner)
: task(std::move(task)),
callback(std::move(callback)),
task_runner(std::move(task_runner)) {}
DelayedTaskManager::DelayedTask::DelayedTask(
DelayedTaskManager::DelayedTask&& other) = default;
DelayedTaskManager::DelayedTask::~DelayedTask() = default;
DelayedTaskManager::DelayedTask& DelayedTaskManager::DelayedTask::operator=(
DelayedTaskManager::DelayedTask&& other) = default;
bool DelayedTaskManager::DelayedTask::operator<=(
const DelayedTask& other) const {
if (task.delayed_run_time == other.task.delayed_run_time) {
return task.sequence_num <= other.task.sequence_num;
}
return task.delayed_run_time < other.task.delayed_run_time;
}
bool DelayedTaskManager::DelayedTask::IsScheduled() const {
return scheduled_;
}
void DelayedTaskManager::DelayedTask::SetScheduled() {
DCHECK(!scheduled_);
scheduled_ = true;
}
DelayedTaskManager::DelayedTaskManager(const TickClock* tick_clock)
: process_ripe_tasks_closure_(
BindRepeating(&DelayedTaskManager::ProcessRipeTasks,
Unretained(this))),
tick_clock_(tick_clock) {
DCHECK(tick_clock_);
}
DelayedTaskManager::~DelayedTaskManager() = default;
void DelayedTaskManager::Start(
scoped_refptr<SequencedTaskRunner> service_thread_task_runner) {
DCHECK(service_thread_task_runner);
TimeTicks process_ripe_tasks_time;
{
CheckedAutoLock auto_lock(queue_lock_);
DCHECK(!service_thread_task_runner_);
service_thread_task_runner_ = std::move(service_thread_task_runner);
process_ripe_tasks_time = GetTimeToScheduleProcessRipeTasksLockRequired();
}
ScheduleProcessRipeTasksOnServiceThread(process_ripe_tasks_time);
}
void DelayedTaskManager::AddDelayedTask(
Task task,
PostTaskNowCallback post_task_now_callback,
scoped_refptr<TaskRunner> task_runner) {
DCHECK(task.task);
DCHECK(!task.delayed_run_time.is_null());
// Use CHECK instead of DCHECK to crash earlier. See http://crbug.com/711167
// for details.
CHECK(task.task);
TimeTicks process_ripe_tasks_time;
{
CheckedAutoLock auto_lock(queue_lock_);
delayed_task_queue_.insert(DelayedTask(std::move(task),
std::move(post_task_now_callback),
std::move(task_runner)));
// Not started yet.
if (service_thread_task_runner_ == nullptr)
return;
process_ripe_tasks_time = GetTimeToScheduleProcessRipeTasksLockRequired();
}
ScheduleProcessRipeTasksOnServiceThread(process_ripe_tasks_time);
}
void DelayedTaskManager::ProcessRipeTasks() {
std::vector<DelayedTask> ripe_delayed_tasks;
TimeTicks process_ripe_tasks_time;
{
CheckedAutoLock auto_lock(queue_lock_);
const TimeTicks now = tick_clock_->NowTicks();
while (!delayed_task_queue_.empty() &&
delayed_task_queue_.Min().task.delayed_run_time <= now) {
// The const_cast on top is okay since the DelayedTask is
// transactionally being popped from |delayed_task_queue_| right after
// and the move doesn't alter the sort order.
ripe_delayed_tasks.push_back(
std::move(const_cast<DelayedTask&>(delayed_task_queue_.Min())));
delayed_task_queue_.Pop();
}
process_ripe_tasks_time = GetTimeToScheduleProcessRipeTasksLockRequired();
}
ScheduleProcessRipeTasksOnServiceThread(process_ripe_tasks_time);
for (auto& delayed_task : ripe_delayed_tasks) {
std::move(delayed_task.callback).Run(std::move(delayed_task.task));
}
}
Optional<TimeTicks> DelayedTaskManager::NextScheduledRunTime() const {
CheckedAutoLock auto_lock(queue_lock_);
if (delayed_task_queue_.empty())
return nullopt;
return delayed_task_queue_.Min().task.delayed_run_time;
}
TimeTicks DelayedTaskManager::GetTimeToScheduleProcessRipeTasksLockRequired() {
queue_lock_.AssertAcquired();
if (delayed_task_queue_.empty())
return TimeTicks::Max();
// The const_cast on top is okay since |IsScheduled()| and |SetScheduled()|
// don't alter the sort order.
DelayedTask& ripest_delayed_task =
const_cast<DelayedTask&>(delayed_task_queue_.Min());
if (ripest_delayed_task.IsScheduled())
return TimeTicks::Max();
ripest_delayed_task.SetScheduled();
return ripest_delayed_task.task.delayed_run_time;
}
void DelayedTaskManager::ScheduleProcessRipeTasksOnServiceThread(
TimeTicks next_delayed_task_run_time) {
DCHECK(!next_delayed_task_run_time.is_null());
if (next_delayed_task_run_time.is_max())
return;
const TimeTicks now = tick_clock_->NowTicks();
TimeDelta delay = std::max(TimeDelta(), next_delayed_task_run_time - now);
service_thread_task_runner_->PostDelayedTask(
FROM_HERE, process_ripe_tasks_closure_, delay);
}
} // namespace internal
} // namespace base

View file

@ -0,0 +1,136 @@
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TASK_THREAD_POOL_DELAYED_TASK_MANAGER_H_
#define BASE_TASK_THREAD_POOL_DELAYED_TASK_MANAGER_H_
#include <memory>
#include <utility>
#include "base/base_export.h"
#include "base/callback.h"
#include "base/macros.h"
#include "base/memory/ptr_util.h"
#include "base/memory/ref_counted.h"
#include "base/optional.h"
#include "base/synchronization/atomic_flag.h"
#include "base/task/common/checked_lock.h"
#include "base/task/common/intrusive_heap.h"
#include "base/task/thread_pool/task.h"
#include "base/thread_annotations.h"
#include "base/time/default_tick_clock.h"
#include "base/time/tick_clock.h"
namespace base {
class SequencedTaskRunner;
namespace internal {
// The DelayedTaskManager forwards tasks to post task callbacks when they become
// ripe for execution. Tasks are not forwarded before Start() is called. This
// class is thread-safe.
class BASE_EXPORT DelayedTaskManager {
public:
// Posts |task| for execution immediately.
using PostTaskNowCallback = OnceCallback<void(Task task)>;
// |tick_clock| can be specified for testing.
DelayedTaskManager(
const TickClock* tick_clock = DefaultTickClock::GetInstance());
~DelayedTaskManager();
// Starts the delayed task manager, allowing past and future tasks to be
// forwarded to their callbacks as they become ripe for execution.
// |service_thread_task_runner| posts tasks to the ThreadPool service
// thread.
void Start(scoped_refptr<SequencedTaskRunner> service_thread_task_runner);
// Schedules a call to |post_task_now_callback| with |task| as argument when
// |task| is ripe for execution. |task_runner| is passed to retain a
// reference until |task| is ripe.
void AddDelayedTask(Task task,
PostTaskNowCallback post_task_now_callback,
scoped_refptr<TaskRunner> task_runner);
// Pop and post all the ripe tasks in the delayed task queue.
void ProcessRipeTasks();
// Returns the |delayed_run_time| of the next scheduled task, if any.
Optional<TimeTicks> NextScheduledRunTime() const;
private:
struct DelayedTask {
DelayedTask();
DelayedTask(Task task,
PostTaskNowCallback callback,
scoped_refptr<TaskRunner> task_runner);
DelayedTask(DelayedTask&& other);
~DelayedTask();
// Required by IntrusiveHeap::insert().
DelayedTask& operator=(DelayedTask&& other);
// Required by IntrusiveHeap.
bool operator<=(const DelayedTask& other) const;
Task task;
PostTaskNowCallback callback;
scoped_refptr<TaskRunner> task_runner;
// True iff the delayed task has been marked as scheduled.
bool IsScheduled() const;
// Mark the delayed task as scheduled. Since the sort key is
// |task.delayed_run_time|, it does not alter sort order when it is called.
void SetScheduled();
// Required by IntrusiveHeap.
void SetHeapHandle(const HeapHandle& handle) {}
// Required by IntrusiveHeap.
void ClearHeapHandle() {}
// Required by IntrusiveHeap.
HeapHandle GetHeapHandle() const { return HeapHandle::Invalid(); }
private:
bool scheduled_ = false;
DISALLOW_COPY_AND_ASSIGN(DelayedTask);
};
// Get the time at which to schedule the next |ProcessRipeTasks()| execution,
// or TimeTicks::Max() if none needs to be scheduled (i.e. no task, or next
// task already scheduled).
TimeTicks GetTimeToScheduleProcessRipeTasksLockRequired()
EXCLUSIVE_LOCKS_REQUIRED(queue_lock_);
// Schedule |ProcessRipeTasks()| on the service thread to be executed at the
// given |process_ripe_tasks_time|, provided the given time is not
// TimeTicks::Max().
void ScheduleProcessRipeTasksOnServiceThread(
TimeTicks process_ripe_tasks_time);
const RepeatingClosure process_ripe_tasks_closure_;
const TickClock* const tick_clock_;
// Synchronizes access to |delayed_task_queue_| and the setting of
// |service_thread_task_runner_|. Once |service_thread_task_runner_| is set,
// it is never modified. It is therefore safe to access
// |service_thread_task_runner_| without synchronization once it is observed
// that it is non-null.
mutable CheckedLock queue_lock_;
scoped_refptr<SequencedTaskRunner> service_thread_task_runner_;
IntrusiveHeap<DelayedTask> delayed_task_queue_ GUARDED_BY(queue_lock_);
DISALLOW_COPY_AND_ASSIGN(DelayedTaskManager);
};
} // namespace internal
} // namespace base
#endif // BASE_TASK_THREAD_POOL_DELAYED_TASK_MANAGER_H_

View file

@ -0,0 +1,47 @@
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/task/thread_pool/environment_config.h"
#include "base/synchronization/lock.h"
#include "base/threading/platform_thread.h"
#include "build/build_config.h"
namespace base {
namespace internal {
namespace {
bool CanUseBackgroundPriorityForWorkerThreadImpl() {
// When Lock doesn't handle multiple thread priorities, run all
// WorkerThread with a normal priority to avoid priority inversion when a
// thread running with a normal priority tries to acquire a lock held by a
// thread running with a background priority.
if (!Lock::HandlesMultipleThreadPriorities())
return false;
#if !defined(OS_ANDROID)
// When thread priority can't be increased to NORMAL, run all threads with a
// NORMAL priority to avoid priority inversions on shutdown (ThreadPoolImpl
// increases BACKGROUND threads priority to NORMAL on shutdown while resolving
// remaining shutdown blocking tasks).
//
// This is ignored on Android, because it doesn't have a clean shutdown phase.
if (!PlatformThread::CanIncreaseThreadPriority(ThreadPriority::NORMAL))
return false;
#endif // defined(OS_ANDROID)
return true;
}
} // namespace
bool CanUseBackgroundPriorityForWorkerThread() {
static const bool can_use_background_priority_for_worker_thread =
CanUseBackgroundPriorityForWorkerThreadImpl();
return can_use_background_priority_for_worker_thread;
}
} // namespace internal
} // namespace base

View file

@ -0,0 +1,52 @@
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TASK_THREAD_POOL_ENVIRONMENT_CONFIG_H_
#define BASE_TASK_THREAD_POOL_ENVIRONMENT_CONFIG_H_
#include <stddef.h>
#include "base/base_export.h"
#include "base/task/task_traits.h"
#include "base/threading/thread.h"
namespace base {
namespace internal {
// TODO(etiennep): This is now specific to
// PooledSingleThreadTaskRunnerManager, move it there.
enum EnvironmentType {
FOREGROUND = 0,
FOREGROUND_BLOCKING,
BACKGROUND,
BACKGROUND_BLOCKING,
ENVIRONMENT_COUNT // Always last.
};
// Order must match the EnvironmentType enum.
struct EnvironmentParams {
// The threads and histograms of this environment will be labeled with
// the thread pool name concatenated to this.
const char* name_suffix;
// Preferred priority for threads in this environment; the actual thread
// priority depends on shutdown state and platform capabilities.
ThreadPriority priority_hint;
};
constexpr EnvironmentParams kEnvironmentParams[] = {
{"Foreground", base::ThreadPriority::NORMAL},
{"ForegroundBlocking", base::ThreadPriority::NORMAL},
{"Background", base::ThreadPriority::BACKGROUND},
{"BackgroundBlocking", base::ThreadPriority::BACKGROUND},
};
// Returns true if this platform supports having WorkerThreads running with a
// background priority.
bool BASE_EXPORT CanUseBackgroundPriorityForWorkerThread();
} // namespace internal
} // namespace base
#endif // BASE_TASK_THREAD_POOL_ENVIRONMENT_CONFIG_H_

View file

@ -0,0 +1,92 @@
# Historical Histogram Data
This page presents data captured from `base::ThreadPool` histograms at a given
point in time so it can be used in future design decisions.
All data is 28-day aggregation on Stable channel.
## Number of tasks between waits
Number of tasks between two waits by a foreground worker thread in a
browser/renderer process.
Histogram name: ThreadPool.NumTasksBetweenWaits.(Browser/Renderer).Foreground
Date: August 2019
Values in tables below are percentiles.
### Windows
| Number of tasks | Browser process | Renderer process |
|-----------------|-----------------|------------------|
| 1 | 87 | 92 |
| 2 | 95 | 98 |
| 5 | 99 | 100 |
### Mac
| Number of tasks | Browser process | Renderer process |
|-----------------|-----------------|------------------|
| 1 | 81 | 90 |
| 2 | 92 | 97 |
| 5 | 98 | 100 |
### Android
| Number of tasks | Browser process | Renderer process |
|-----------------|-----------------|------------------|
| 1 | 92 | 96 |
| 2 | 97 | 98 |
| 5 | 99 | 100 |
## Number of tasks run while queueing
Number of tasks run by ThreadPool while task was queuing (from time task was
posted until time it was run). Recorded for dummy heartbeat tasks in the
*browser* process. The heartbeat recording avoids dependencies between this
report and other work in the system.
Histogram name: ThreadPool.NumTasksRunWhileQueuing.Browser.*
Date: September 2019
Values in tables below are percentiles.
Note: In *renderer* processes, on all platforms/priorities, 0 tasks are run
while queuing at 99.5th percentile.
### Windows
| Number of tasks | USER_BLOCKING | USER_VISIBLE | BEST_EFFORT |
|-----------------|---------------|--------------|-------------|
| 0 | 95 | 93 | 90 |
| 1 | 98 | 95 | 92 |
| 2 | 99 | 96 | 93 |
| 5 | 100 | 98 | 95 |
### Mac
| Number of tasks | USER_BLOCKING | USER_VISIBLE | BEST_EFFORT |
|-----------------|---------------|--------------|-------------|
| 0 | 100 | 100 | 99 |
| 1 | 100 | 100 | 99 |
| 2 | 100 | 100 | 99 |
| 5 | 100 | 100 | 100 |
### Android
| Number of tasks | USER_BLOCKING | USER_VISIBLE | BEST_EFFORT |
|-----------------|---------------|--------------|-------------|
| 0 | 99 | 98 | 97 |
| 1 | 100 | 99 | 99 |
| 2 | 100 | 99 | 99 |
| 5 | 100 | 100 | 100 |
### Chrome OS
For all priorities, 0 tasks are run while queueing at 99.5th percentile.
### Analysis
The number of tasks that run while a BEST_EFFORT task is queued is unexpectedly
low. We should explore creating threads less aggressively, at the expense of
keeping BEST_EFFORT tasks in the queue for a longer time. See
[Bug 906079](https://crbug.com/906079).

View file

@ -0,0 +1,23 @@
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/task/thread_pool/initialization_util.h"
#include <algorithm>
#include "base/numerics/ranges.h"
#include "base/system/sys_info.h"
namespace base {
int RecommendedMaxNumberOfThreadsInThreadGroup(int min,
int max,
double cores_multiplier,
int offset) {
const int num_of_cores = SysInfo::NumberOfProcessors();
const int threads = std::ceil<int>(num_of_cores * cores_multiplier) + offset;
return ClampToRange(threads, min, max);
}
} // namespace base

View file

@ -0,0 +1,22 @@
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TASK_THREAD_POOL_INITIALIZATION_UTIL_H_
#define BASE_TASK_THREAD_POOL_INITIALIZATION_UTIL_H_
#include "base/base_export.h"
namespace base {
// Computes a value that may be used as the maximum number of threads in a
// ThreadGroup. Developers may use other methods to choose this maximum.
BASE_EXPORT int RecommendedMaxNumberOfThreadsInThreadGroup(
int min,
int max,
double cores_multiplier,
int offset);
} // namespace base
#endif // BASE_TASK_THREAD_POOL_INITIALIZATION_UTIL_H_

View file

@ -0,0 +1,402 @@
// Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/task/thread_pool/job_task_source.h"
#include <utility>
#include "base/bind.h"
#include "base/bind_helpers.h"
#include "base/logging.h"
#include "base/memory/ptr_util.h"
#include "base/task/task_features.h"
#include "base/task/thread_pool/pooled_task_runner_delegate.h"
#include "base/threading/thread_restrictions.h"
#include "base/time/time.h"
#include "base/time/time_override.h"
namespace base {
namespace internal {
// Memory ordering on |state_| operations
//
// The write operation on |state_| in WillRunTask() uses
// std::memory_order_release, matched by std::memory_order_acquire on read
// operations (in DidProcessTask()) to establish a
// Release-Acquire ordering. When a call to WillRunTask() is caused by an
// increase of max concurrency followed by an associated
// NotifyConcurrencyIncrease(), the priority queue lock guarantees an
// happens-after relation with NotifyConcurrencyIncrease(). This ensures that an
// increase of max concurrency that happened-before NotifyConcurrencyIncrease()
// is visible to a read operation that happens-after WillRunTask().
//
// In DidProcessTask(), this is necessary to
// ensure that the task source is always re-enqueued when it needs to. When the
// task source needs to be queued, either because the current task yielded or
// because of NotifyConcurrencyIncrease(), one of the following is true:
// A) DidProcessTask() happens-after WillRunTask():
// T1: Current task returns (because it is done) or yields.
// T2: Increases the value returned by GetMaxConcurrency()
// NotifyConcurrencyIncrease() enqueues the task source
// T3: WillRunTask(), in response to the concurrency increase - Release
// Does not keep the TaskSource in PriorityQueue because it is at max
// concurrency
// T1: DidProcessTask() - Acquire - Because of memory barrier, sees the same
// (or newer) max concurrency as T2
// Re-enqueues the TaskSource because no longer at max concurrency
// Without the memory barrier, T1 may see an outdated max concurrency that
// is lower than the actual max concurrency and won't re-enqueue the
// task source, because it thinks it's already saturated.
// The task source often needs to be re-enqueued if its task
// completed because it yielded and |max_concurrency| wasn't decreased.
// B) DidProcessTask() happens-before WillRunTask():
// T1: Current task returns (because it is done) or yields
// T2: Increases the value returned by GetMaxConcurrency()
// NotifyConcurrencyIncrease() enqueues the task source
// T1: DidProcessTask() - Acquire (ineffective)
// Since the task source is already in the queue, it doesn't matter
// whether T1 re-enqueues the task source or not.
// Note that stale values the other way around can cause incorrectly
// re-enqueuing this task_source, which is not an issue because the queues
// support empty task sources.
JobTaskSource::State::State() = default;
JobTaskSource::State::~State() = default;
JobTaskSource::State::Value JobTaskSource::State::Cancel() {
return {value_.fetch_or(kCanceledMask, std::memory_order_relaxed)};
}
JobTaskSource::State::Value
JobTaskSource::State::TryIncrementWorkerCountFromWorkerRelease(
size_t max_concurrency) {
uint32_t value_before_add = value_.load(std::memory_order_relaxed);
// std::memory_order_release on success to establish Release-Acquire ordering
// with DecrementWorkerCountAcquire() (see Memory Ordering comment at top of
// the file).
while (!(value_before_add & kCanceledMask) &&
(value_before_add >> kWorkerCountBitOffset) < max_concurrency &&
!value_.compare_exchange_weak(
value_before_add, value_before_add + kWorkerCountIncrement,
std::memory_order_release, std::memory_order_relaxed)) {
}
return {value_before_add};
}
JobTaskSource::State::Value
JobTaskSource::State::DecrementWorkerCountFromWorkerAcquire() {
const size_t value_before_sub =
value_.fetch_sub(kWorkerCountIncrement, std::memory_order_acquire);
DCHECK((value_before_sub >> kWorkerCountBitOffset) > 0);
return {value_before_sub};
}
JobTaskSource::State::Value
JobTaskSource::State::IncrementWorkerCountFromJoiningThread() {
size_t value_before_add =
value_.fetch_add(kWorkerCountIncrement, std::memory_order_relaxed);
return {value_before_add};
}
JobTaskSource::State::Value
JobTaskSource::State::DecrementWorkerCountFromJoiningThread() {
const size_t value_before_sub =
value_.fetch_sub(kWorkerCountIncrement, std::memory_order_relaxed);
DCHECK((value_before_sub >> kWorkerCountBitOffset) > 0);
return {value_before_sub};
}
JobTaskSource::State::Value JobTaskSource::State::Load() const {
return {value_.load(std::memory_order_relaxed)};
}
JobTaskSource::JoinFlag::JoinFlag() = default;
JobTaskSource::JoinFlag::~JoinFlag() = default;
void JobTaskSource::JoinFlag::SetWaiting() {
const auto previous_value =
value_.exchange(kWaitingForWorkerToYield, std::memory_order_relaxed);
DCHECK(previous_value == kNotWaiting);
}
bool JobTaskSource::JoinFlag::ShouldWorkerYield() {
// The fetch_and() sets the state to kWaitingForWorkerToSignal if it was
// previously kWaitingForWorkerToYield, otherwise it leaves it unchanged.
return value_.fetch_and(kWaitingForWorkerToSignal,
std::memory_order_relaxed) ==
kWaitingForWorkerToYield;
}
bool JobTaskSource::JoinFlag::ShouldWorkerSignal() {
return value_.exchange(kNotWaiting, std::memory_order_relaxed) != kNotWaiting;
}
JobTaskSource::JobTaskSource(
const Location& from_here,
const TaskTraits& traits,
RepeatingCallback<void(JobDelegate*)> worker_task,
RepeatingCallback<size_t()> max_concurrency_callback,
PooledTaskRunnerDelegate* delegate)
: TaskSource(traits, nullptr, TaskSourceExecutionMode::kJob),
from_here_(from_here),
max_concurrency_callback_(std::move(max_concurrency_callback)),
worker_task_(std::move(worker_task)),
primary_task_(base::BindRepeating(
[](JobTaskSource* self) {
// Each worker task has its own delegate with associated state.
JobDelegate job_delegate{self, self->delegate_};
self->worker_task_.Run(&job_delegate);
},
base::Unretained(this))),
queue_time_(TimeTicks::Now()),
delegate_(delegate) {
DCHECK(delegate_);
}
JobTaskSource::~JobTaskSource() {
// Make sure there's no outstanding active run operation left.
DCHECK_EQ(state_.Load().worker_count(), 0U);
}
ExecutionEnvironment JobTaskSource::GetExecutionEnvironment() {
return {SequenceToken::Create(), nullptr};
}
bool JobTaskSource::WillJoin() {
{
CheckedAutoLock auto_lock(lock_);
DCHECK(!worker_released_condition_); // This may only be called once.
worker_released_condition_ = lock_.CreateConditionVariable();
}
// std::memory_order_relaxed on |worker_count_| is sufficient because call to
// GetMaxConcurrency() is used for a best effort early exit. Stale values will
// only cause WaitForParticipationOpportunity() to be called.
const auto state_before_add = state_.IncrementWorkerCountFromJoiningThread();
if (!state_before_add.is_canceled() &&
state_before_add.worker_count() < GetMaxConcurrency()) {
return true;
}
return WaitForParticipationOpportunity();
}
bool JobTaskSource::RunJoinTask() {
JobDelegate job_delegate{this, nullptr};
worker_task_.Run(&job_delegate);
// std::memory_order_relaxed on |worker_count_| is sufficient because the call
// to GetMaxConcurrency() is used for a best effort early exit. Stale values
// will only cause WaitForParticipationOpportunity() to be called.
const auto state = state_.Load();
if (!state.is_canceled() && state.worker_count() <= GetMaxConcurrency())
return true;
return WaitForParticipationOpportunity();
}
void JobTaskSource::Cancel(TaskSource::Transaction* transaction) {
// Sets the kCanceledMask bit on |state_| so that further calls to
// WillRunTask() never succeed. std::memory_order_relaxed is sufficient
// because this task source never needs to be re-enqueued after Cancel().
state_.Cancel();
#if DCHECK_IS_ON()
{
AutoLock auto_lock(version_lock_);
++increase_version_;
version_condition_.Broadcast();
}
#endif // DCHECK_IS_ON()
}
bool JobTaskSource::WaitForParticipationOpportunity() {
CheckedAutoLock auto_lock(lock_);
// std::memory_order_relaxed is sufficient because no other state is
// synchronized with |state_| outside of |lock_|.
auto state = state_.Load();
size_t max_concurrency = GetMaxConcurrency();
// Wait until either:
// A) |worker_count| is below or equal to max concurrency and state is not
// canceled.
// B) All other workers returned and |worker_count| is 1.
while (!((state.worker_count() <= max_concurrency && !state.is_canceled()) ||
state.worker_count() == 1)) {
// std::memory_order_relaxed is sufficient because no other state is
// synchronized with |join_flag_| outside of |lock_|.
join_flag_.SetWaiting();
// To avoid unnecessarily waiting, if either condition A) or B) change
// |lock_| is taken and |worker_released_condition_| signaled if necessary:
// 1- In DidProcessTask(), after worker count is decremented.
// 2- In NotifyConcurrencyIncrease(), following a max_concurrency increase.
worker_released_condition_->Wait();
state = state_.Load();
max_concurrency = GetMaxConcurrency();
}
// Case A:
if (state.worker_count() <= max_concurrency && !state.is_canceled())
return true;
// Case B:
// Only the joining thread remains.
DCHECK_EQ(state.worker_count(), 1U);
DCHECK(state.is_canceled() || max_concurrency == 0U);
state_.DecrementWorkerCountFromJoiningThread();
return false;
}
TaskSource::RunStatus JobTaskSource::WillRunTask() {
const size_t max_concurrency = GetMaxConcurrency();
// std::memory_order_release on success to establish Release-Acquire ordering
// with read operations (see Memory Ordering comment at top of the file).
const auto state_before_add =
state_.TryIncrementWorkerCountFromWorkerRelease(max_concurrency);
// Don't allow this worker to run the task if either:
// A) |state_| was canceled.
// B) |worker_count| is already at |max_concurrency|.
// C) |max_concurrency| was lowered below or to |worker_count|.
// Case A:
if (state_before_add.is_canceled())
return RunStatus::kDisallowed;
const size_t worker_count_before_add = state_before_add.worker_count();
// Case B) or C):
if (worker_count_before_add >= max_concurrency)
return RunStatus::kDisallowed;
DCHECK_LT(worker_count_before_add, max_concurrency);
return max_concurrency == worker_count_before_add + 1
? RunStatus::kAllowedSaturated
: RunStatus::kAllowedNotSaturated;
}
size_t JobTaskSource::GetRemainingConcurrency() const {
// std::memory_order_relaxed is sufficient because no other state is
// synchronized with GetRemainingConcurrency().
const auto state = state_.Load();
const size_t max_concurrency = GetMaxConcurrency();
// Avoid underflows.
if (state.is_canceled() || state.worker_count() > max_concurrency)
return 0;
return max_concurrency - state.worker_count();
}
void JobTaskSource::NotifyConcurrencyIncrease() {
#if DCHECK_IS_ON()
{
AutoLock auto_lock(version_lock_);
++increase_version_;
version_condition_.Broadcast();
}
#endif // DCHECK_IS_ON()
// Avoid unnecessary locks when NotifyConcurrencyIncrease() is spuriously
// called.
if (GetRemainingConcurrency() == 0)
return;
{
// Lock is taken to access |join_flag_| below and signal
// |worker_released_condition_|.
CheckedAutoLock auto_lock(lock_);
if (join_flag_.ShouldWorkerSignal())
worker_released_condition_->Signal();
}
// Make sure the task source is in the queue if not already.
// Caveat: it's possible but unlikely that the task source has already reached
// its intended concurrency and doesn't need to be enqueued if there
// previously were too many worker. For simplicity, the task source is always
// enqueued and will get discarded if already saturated when it is popped from
// the priority queue.
delegate_->EnqueueJobTaskSource(this);
}
size_t JobTaskSource::GetMaxConcurrency() const {
return max_concurrency_callback_.Run();
}
bool JobTaskSource::ShouldYield() {
// It is safe to read |join_flag_| without a lock since this
// variable is atomic, keeping in mind that threads may not immediately see
// the new value when it is updated.
return TS_UNCHECKED_READ(join_flag_).ShouldWorkerYield() ||
state_.Load().is_canceled();
}
#if DCHECK_IS_ON()
size_t JobTaskSource::GetConcurrencyIncreaseVersion() const {
AutoLock auto_lock(version_lock_);
return increase_version_;
}
bool JobTaskSource::WaitForConcurrencyIncreaseUpdate(size_t recorded_version) {
AutoLock auto_lock(version_lock_);
constexpr TimeDelta timeout = TimeDelta::FromSeconds(1);
const base::TimeTicks start_time = subtle::TimeTicksNowIgnoringOverride();
do {
DCHECK_LE(recorded_version, increase_version_);
const auto state = state_.Load();
if (recorded_version != increase_version_ || state.is_canceled())
return true;
// Waiting is acceptable because it is in DCHECK-only code.
ScopedAllowBaseSyncPrimitivesOutsideBlockingScope
allow_base_sync_primitives;
version_condition_.TimedWait(timeout);
} while (subtle::TimeTicksNowIgnoringOverride() - start_time < timeout);
return false;
}
#endif // DCHECK_IS_ON()
Task JobTaskSource::TakeTask(TaskSource::Transaction* transaction) {
// JobTaskSource members are not lock-protected so no need to acquire a lock
// if |transaction| is nullptr.
DCHECK_GT(state_.Load().worker_count(), 0U);
DCHECK(primary_task_);
return Task(from_here_, primary_task_, TimeDelta());
}
bool JobTaskSource::DidProcessTask(TaskSource::Transaction* transaction) {
// Lock is needed to access |join_flag_| below and signal
// |worker_released_condition_|. If |transaction|, then |lock_| is already
// taken.
CheckedAutoLockMaybe auto_lock(transaction ? nullptr : &lock_);
AnnotateAcquiredLockAlias annotate(lock_, lock_);
// std::memory_order_acquire to establish Release-Acquire ordering with
// WillRunTask() (see Memory Ordering comment at top of the file).
const auto state_before_sub = state_.DecrementWorkerCountFromWorkerAcquire();
if (join_flag_.ShouldWorkerSignal())
worker_released_condition_->Signal();
// A canceled task source should never get re-enqueued.
if (state_before_sub.is_canceled())
return false;
DCHECK_GT(state_before_sub.worker_count(), 0U);
// Re-enqueue the TaskSource if the task ran and the worker count is below the
// max concurrency.
return state_before_sub.worker_count() <= GetMaxConcurrency();
}
SequenceSortKey JobTaskSource::GetSortKey() const {
return SequenceSortKey(traits_.priority(), queue_time_);
}
Task JobTaskSource::Clear(TaskSource::Transaction* transaction) {
Cancel();
// Nothing is cleared since other workers might still racily run tasks. For
// simplicity, the destructor will take care of it once all references are
// released.
return Task(from_here_, DoNothing(), TimeDelta());
}
} // namespace internal
} // namespace base

View file

@ -0,0 +1,224 @@
// Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TASK_THREAD_POOL_JOB_TASK_SOURCE_H_
#define BASE_TASK_THREAD_POOL_JOB_TASK_SOURCE_H_
#include <stddef.h>
#include <atomic>
#include <limits>
#include "base/base_export.h"
#include "base/callback.h"
#include "base/macros.h"
#include "base/optional.h"
#include "base/synchronization/condition_variable.h"
#include "base/synchronization/lock.h"
#include "base/task/post_job.h"
#include "base/task/task_traits.h"
#include "base/task/thread_pool/sequence_sort_key.h"
#include "base/task/thread_pool/task.h"
#include "base/task/thread_pool/task_source.h"
namespace base {
namespace internal {
class PooledTaskRunnerDelegate;
// A JobTaskSource generates many Tasks from a single RepeatingClosure.
//
// Derived classes control the intended concurrency with GetMaxConcurrency().
class BASE_EXPORT JobTaskSource : public TaskSource {
public:
JobTaskSource(const Location& from_here,
const TaskTraits& traits,
RepeatingCallback<void(JobDelegate*)> worker_task,
RepeatingCallback<size_t()> max_concurrency_callback,
PooledTaskRunnerDelegate* delegate);
static JobHandle CreateJobHandle(
scoped_refptr<internal::JobTaskSource> task_source) {
return JobHandle(std::move(task_source));
}
// Notifies this task source that max concurrency was increased, and the
// number of worker should be adjusted.
void NotifyConcurrencyIncrease();
// Informs this JobTaskSource that the current thread would like to join and
// contribute to running |worker_task|. Returns true if the joining thread can
// contribute (RunJoinTask() can be called), or false if joining was completed
// and all other workers returned because either there's no work remaining or
// Job was cancelled.
bool WillJoin();
// Contributes to running |worker_task| and returns true if the joining thread
// can contribute again (RunJoinTask() can be called again), or false if
// joining was completed and all other workers returned because either there's
// no work remaining or Job was cancelled. This should be called only after
// WillJoin() or RunJoinTask() previously returned true.
bool RunJoinTask();
// Cancels this JobTaskSource, causing all workers to yield and WillRunTask()
// to return RunStatus::kDisallowed.
void Cancel(TaskSource::Transaction* transaction = nullptr);
// TaskSource:
ExecutionEnvironment GetExecutionEnvironment() override;
size_t GetRemainingConcurrency() const override;
// Returns the maximum number of tasks from this TaskSource that can run
// concurrently.
size_t GetMaxConcurrency() const;
// Returns true if a worker should return from the worker task on the current
// thread ASAP.
bool ShouldYield();
PooledTaskRunnerDelegate* delegate() const { return delegate_; }
#if DCHECK_IS_ON()
size_t GetConcurrencyIncreaseVersion() const;
// Returns true if the concurrency version was updated above
// |recorded_version|, or false on timeout.
bool WaitForConcurrencyIncreaseUpdate(size_t recorded_version);
#endif // DCHECK_IS_ON()
private:
// Atomic internal state to track the number of workers running a task from
// this JobTaskSource and whether this JobTaskSource is canceled.
class State {
public:
static constexpr size_t kCanceledMask = 1;
static constexpr size_t kWorkerCountBitOffset = 1;
static constexpr size_t kWorkerCountIncrement = 1 << kWorkerCountBitOffset;
struct Value {
size_t worker_count() const { return value >> kWorkerCountBitOffset; }
// Returns true if canceled.
bool is_canceled() const { return value & kCanceledMask; }
uint32_t value;
};
State();
~State();
// Sets as canceled using std::memory_order_relaxed. Returns the state
// before the operation.
Value Cancel();
// Increments the worker count by 1 if smaller than |max_concurrency| and if
// |!is_canceled()|, using std::memory_order_release, and returns the state
// before the operation. Equivalent to Load() otherwise.
Value TryIncrementWorkerCountFromWorkerRelease(size_t max_concurrency);
// Decrements the worker count by 1 using std::memory_order_acquire. Returns
// the state before the operation.
Value DecrementWorkerCountFromWorkerAcquire();
// Increments the worker count by 1 using std::memory_order_relaxed. Returns
// the state before the operation.
Value IncrementWorkerCountFromJoiningThread();
// Decrements the worker count by 1 using std::memory_order_relaxed. Returns
// the state before the operation.
Value DecrementWorkerCountFromJoiningThread();
// Loads and returns the state, using std::memory_order_relaxed.
Value Load() const;
private:
std::atomic<uint32_t> value_{0};
};
// Atomic flag that indicates if the joining thread is currently waiting on
// another worker to yield or to signal.
class JoinFlag {
public:
static constexpr uint32_t kNotWaiting = 0;
static constexpr uint32_t kWaitingForWorkerToSignal = 1;
static constexpr uint32_t kWaitingForWorkerToYield = 3;
// kWaitingForWorkerToYield is 3 because the impl relies on the following
// property.
static_assert((kWaitingForWorkerToYield & kWaitingForWorkerToSignal) ==
kWaitingForWorkerToSignal,
"");
JoinFlag();
~JoinFlag();
// Sets the status as kWaitingForWorkerToYield using
// std::memory_order_relaxed.
void SetWaiting();
// If the flag is kWaitingForWorkerToYield, returns true indicating that the
// worker should yield, and atomically updates to kWaitingForWorkerToSignal
// (using std::memory_order_relaxed) to ensure that a single worker yields
// in response to SetWaiting().
bool ShouldWorkerYield();
// If the flag is kWaiting*, returns true indicating that the worker should
// signal, and atomically updates to kNotWaiting (using
// std::memory_order_relaxed) to ensure that a single worker signals in
// response to SetWaiting().
bool ShouldWorkerSignal();
private:
std::atomic<uint32_t> value_{kNotWaiting};
};
~JobTaskSource() override;
// Called from the joining thread. Waits for the worker count to be below or
// equal to max concurrency (will happen when a worker calls
// DidProcessTask()). Returns true if the joining thread should run a task, or
// false if joining was completed and all other workers returned because
// either there's no work remaining or Job was cancelled.
bool WaitForParticipationOpportunity();
// TaskSource:
RunStatus WillRunTask() override;
Task TakeTask(TaskSource::Transaction* transaction) override;
Task Clear(TaskSource::Transaction* transaction) override;
bool DidProcessTask(TaskSource::Transaction* transaction) override;
SequenceSortKey GetSortKey() const override;
// Current atomic state.
State state_;
// Normally, |join_flag_| is protected by |lock_|, except in ShouldYield()
// hence the use of atomics.
JoinFlag join_flag_ GUARDED_BY(lock_);
// Signaled when |join_flag_| is kWaiting* and a worker returns.
std::unique_ptr<ConditionVariable> worker_released_condition_
GUARDED_BY(lock_);
const Location from_here_;
RepeatingCallback<size_t()> max_concurrency_callback_;
// Worker task set by the job owner.
RepeatingCallback<void(JobDelegate*)> worker_task_;
// Task returned from TakeTask(), that calls |worker_task_| internally.
RepeatingClosure primary_task_;
const TimeTicks queue_time_;
PooledTaskRunnerDelegate* delegate_;
#if DCHECK_IS_ON()
// Synchronizes accesses to |increase_version_|.
mutable Lock version_lock_;
// Signaled whenever increase_version_ is updated.
ConditionVariable version_condition_{&version_lock_};
// Incremented every time max concurrency is increased.
size_t increase_version_ GUARDED_BY(version_lock_) = 0;
#endif // DCHECK_IS_ON()
DISALLOW_COPY_AND_ASSIGN(JobTaskSource);
};
} // namespace internal
} // namespace base
#endif // BASE_TASK_THREAD_POOL_JOB_TASK_SOURCE_H_

View file

@ -0,0 +1,48 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/task/thread_pool/pooled_parallel_task_runner.h"
#include "base/task/thread_pool/pooled_task_runner_delegate.h"
#include "base/task/thread_pool/sequence.h"
namespace base {
namespace internal {
PooledParallelTaskRunner::PooledParallelTaskRunner(
const TaskTraits& traits,
PooledTaskRunnerDelegate* pooled_task_runner_delegate)
: traits_(traits),
pooled_task_runner_delegate_(pooled_task_runner_delegate) {}
PooledParallelTaskRunner::~PooledParallelTaskRunner() = default;
bool PooledParallelTaskRunner::PostDelayedTask(const Location& from_here,
OnceClosure closure,
TimeDelta delay) {
if (!PooledTaskRunnerDelegate::Exists())
return false;
// Post the task as part of a one-off single-task Sequence.
scoped_refptr<Sequence> sequence = MakeRefCounted<Sequence>(
traits_, this, TaskSourceExecutionMode::kParallel);
{
CheckedAutoLock auto_lock(lock_);
sequences_.insert(sequence.get());
}
return pooled_task_runner_delegate_->PostTaskWithSequence(
Task(from_here, std::move(closure), delay), std::move(sequence));
}
void PooledParallelTaskRunner::UnregisterSequence(Sequence* sequence) {
DCHECK(sequence);
CheckedAutoLock auto_lock(lock_);
sequences_.erase(sequence);
}
} // namespace internal
} // namespace base

View file

@ -0,0 +1,59 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TASK_THREAD_POOL_POOLED_PARALLEL_TASK_RUNNER_H_
#define BASE_TASK_THREAD_POOL_POOLED_PARALLEL_TASK_RUNNER_H_
#include "base/base_export.h"
#include "base/callback_forward.h"
#include "base/containers/flat_set.h"
#include "base/location.h"
#include "base/task/common/checked_lock.h"
#include "base/task/task_traits.h"
#include "base/task_runner.h"
#include "base/thread_annotations.h"
#include "base/time/time.h"
namespace base {
namespace internal {
class Sequence;
class PooledTaskRunnerDelegate;
// A task runner that runs tasks in parallel.
class BASE_EXPORT PooledParallelTaskRunner : public TaskRunner {
public:
// Constructs a PooledParallelTaskRunner which can be used to post tasks.
PooledParallelTaskRunner(
const TaskTraits& traits,
PooledTaskRunnerDelegate* pooled_task_runner_delegate);
// TaskRunner:
bool PostDelayedTask(const Location& from_here,
OnceClosure closure,
TimeDelta delay) override;
// Removes |sequence| from |sequences_|.
void UnregisterSequence(Sequence* sequence);
private:
~PooledParallelTaskRunner() override;
const TaskTraits traits_;
PooledTaskRunnerDelegate* const pooled_task_runner_delegate_;
CheckedLock lock_;
// List of alive Sequences instantiated by this PooledParallelTaskRunner.
// Sequences are added when they are instantiated, and removed when they are
// destroyed.
base::flat_set<Sequence*> sequences_ GUARDED_BY(lock_);
DISALLOW_COPY_AND_ASSIGN(PooledParallelTaskRunner);
};
} // namespace internal
} // namespace base
#endif // BASE_TASK_THREAD_POOL_POOLED_PARALLEL_TASK_RUNNER_H_

View file

@ -0,0 +1,53 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/task/thread_pool/pooled_sequenced_task_runner.h"
#include "base/sequence_token.h"
namespace base {
namespace internal {
PooledSequencedTaskRunner::PooledSequencedTaskRunner(
const TaskTraits& traits,
PooledTaskRunnerDelegate* pooled_task_runner_delegate)
: pooled_task_runner_delegate_(pooled_task_runner_delegate),
sequence_(MakeRefCounted<Sequence>(traits,
this,
TaskSourceExecutionMode::kSequenced)) {
}
PooledSequencedTaskRunner::~PooledSequencedTaskRunner() = default;
bool PooledSequencedTaskRunner::PostDelayedTask(const Location& from_here,
OnceClosure closure,
TimeDelta delay) {
if (!PooledTaskRunnerDelegate::Exists())
return false;
Task task(from_here, std::move(closure), delay);
// Post the task as part of |sequence_|.
return pooled_task_runner_delegate_->PostTaskWithSequence(std::move(task),
sequence_);
}
bool PooledSequencedTaskRunner::PostNonNestableDelayedTask(
const Location& from_here,
OnceClosure closure,
TimeDelta delay) {
// Tasks are never nested within the thread pool.
return PostDelayedTask(from_here, std::move(closure), delay);
}
bool PooledSequencedTaskRunner::RunsTasksInCurrentSequence() const {
return sequence_->token() == SequenceToken::GetForCurrentThread();
}
void PooledSequencedTaskRunner::UpdatePriority(TaskPriority priority) {
pooled_task_runner_delegate_->UpdatePriority(sequence_, priority);
}
} // namespace internal
} // namespace base

View file

@ -0,0 +1,56 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TASK_THREAD_POOL_POOLED_SEQUENCED_TASK_RUNNER_H_
#define BASE_TASK_THREAD_POOL_POOLED_SEQUENCED_TASK_RUNNER_H_
#include "base/base_export.h"
#include "base/callback_forward.h"
#include "base/location.h"
#include "base/task/task_traits.h"
#include "base/task/thread_pool/pooled_task_runner_delegate.h"
#include "base/task/thread_pool/sequence.h"
#include "base/time/time.h"
#include "base/updateable_sequenced_task_runner.h"
namespace base {
namespace internal {
// A task runner that runs tasks in sequence.
class BASE_EXPORT PooledSequencedTaskRunner
: public UpdateableSequencedTaskRunner {
public:
// Constructs a PooledSequencedTaskRunner which can be used to post tasks.
PooledSequencedTaskRunner(
const TaskTraits& traits,
PooledTaskRunnerDelegate* pooled_task_runner_delegate);
// UpdateableSequencedTaskRunner:
bool PostDelayedTask(const Location& from_here,
OnceClosure closure,
TimeDelta delay) override;
bool PostNonNestableDelayedTask(const Location& from_here,
OnceClosure closure,
TimeDelta delay) override;
bool RunsTasksInCurrentSequence() const override;
void UpdatePriority(TaskPriority priority) override;
private:
~PooledSequencedTaskRunner() override;
PooledTaskRunnerDelegate* const pooled_task_runner_delegate_;
// Sequence for all Tasks posted through this TaskRunner.
const scoped_refptr<Sequence> sequence_;
DISALLOW_COPY_AND_ASSIGN(PooledSequencedTaskRunner);
};
} // namespace internal
} // namespace base
#endif // BASE_TASK_THREAD_POOL_POOLED_SEQUENCED_TASK_RUNNER_H_

View file

@ -0,0 +1,763 @@
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/task/thread_pool/pooled_single_thread_task_runner_manager.h"
#include <algorithm>
#include <memory>
#include <string>
#include <utility>
#include "base/bind.h"
#include "base/callback.h"
#include "base/memory/ptr_util.h"
#include "base/single_thread_task_runner.h"
#include "base/stl_util.h"
#include "base/strings/stringprintf.h"
#include "base/synchronization/atomic_flag.h"
#include "base/task/task_traits.h"
#include "base/task/thread_pool/delayed_task_manager.h"
#include "base/task/thread_pool/priority_queue.h"
#include "base/task/thread_pool/sequence.h"
#include "base/task/thread_pool/task.h"
#include "base/task/thread_pool/task_source.h"
#include "base/task/thread_pool/task_tracker.h"
#include "base/task/thread_pool/worker_thread.h"
#include "base/threading/platform_thread.h"
#include "base/time/time.h"
#if defined(OS_WIN)
#include <windows.h>
#include "base/win/scoped_com_initializer.h"
#endif // defined(OS_WIN)
namespace base {
namespace internal {
namespace {
// Boolean indicating whether there's a PooledSingleThreadTaskRunnerManager
// instance alive in this process. This variable should only be set when the
// PooledSingleThreadTaskRunnerManager instance is brought up (on the main
// thread; before any tasks are posted) and decremented when the instance is
// brought down (i.e., only when unit tests tear down the task environment and
// never in production). This makes the variable const while worker threads are
// up and as such it doesn't need to be atomic. It is used to tell when a task
// is posted from the main thread after the task environment was brought down in
// unit tests so that PooledSingleThreadTaskRunnerManager bound TaskRunners
// can return false on PostTask, letting such callers know they should complete
// necessary work synchronously. Note: |!g_manager_is_alive| is generally
// equivalent to |!ThreadPoolInstance::Get()| but has the advantage of being
// valid in thread_pool unit tests that don't instantiate a full
// thread pool.
bool g_manager_is_alive = false;
size_t GetEnvironmentIndexForTraits(const TaskTraits& traits) {
const bool is_background =
traits.priority() == TaskPriority::BEST_EFFORT &&
traits.thread_policy() == ThreadPolicy::PREFER_BACKGROUND &&
CanUseBackgroundPriorityForWorkerThread();
if (traits.may_block() || traits.with_base_sync_primitives())
return is_background ? BACKGROUND_BLOCKING : FOREGROUND_BLOCKING;
return is_background ? BACKGROUND : FOREGROUND;
}
// Allows for checking the PlatformThread::CurrentRef() against a set
// PlatformThreadRef atomically without using locks.
class AtomicThreadRefChecker {
public:
AtomicThreadRefChecker() = default;
~AtomicThreadRefChecker() = default;
void Set() {
thread_ref_ = PlatformThread::CurrentRef();
is_set_.Set();
}
bool IsCurrentThreadSameAsSetThread() {
return is_set_.IsSet() && thread_ref_ == PlatformThread::CurrentRef();
}
private:
AtomicFlag is_set_;
PlatformThreadRef thread_ref_;
DISALLOW_COPY_AND_ASSIGN(AtomicThreadRefChecker);
};
class WorkerThreadDelegate : public WorkerThread::Delegate {
public:
WorkerThreadDelegate(const std::string& thread_name,
WorkerThread::ThreadLabel thread_label,
TrackedRef<TaskTracker> task_tracker)
: task_tracker_(std::move(task_tracker)),
thread_name_(thread_name),
thread_label_(thread_label) {}
void set_worker(WorkerThread* worker) {
DCHECK(!worker_);
worker_ = worker;
}
WorkerThread::ThreadLabel GetThreadLabel() const final {
return thread_label_;
}
void OnMainEntry(const WorkerThread* /* worker */) override {
thread_ref_checker_.Set();
PlatformThread::SetName(thread_name_);
}
RegisteredTaskSource GetWork(WorkerThread* worker) override {
CheckedAutoLock auto_lock(lock_);
DCHECK(worker_awake_);
auto task_source = GetWorkLockRequired(worker);
if (!task_source) {
// The worker will sleep after this returns nullptr.
worker_awake_ = false;
return nullptr;
}
auto run_status = task_source.WillRunTask();
DCHECK_NE(run_status, TaskSource::RunStatus::kDisallowed);
return task_source;
}
void DidProcessTask(RegisteredTaskSource task_source) override {
if (task_source) {
EnqueueTaskSource(TransactionWithRegisteredTaskSource::FromTaskSource(
std::move(task_source)));
}
}
TimeDelta GetSleepTimeout() override { return TimeDelta::Max(); }
bool PostTaskNow(scoped_refptr<Sequence> sequence, Task task) {
auto transaction = sequence->BeginTransaction();
// |task| will be pushed to |sequence|, and |sequence| will be queued
// to |priority_queue_| iff |sequence_should_be_queued| is true.
const bool sequence_should_be_queued = transaction.WillPushTask();
RegisteredTaskSource task_source;
if (sequence_should_be_queued) {
task_source = task_tracker_->RegisterTaskSource(sequence);
// We shouldn't push |task| if we're not allowed to queue |task_source|.
if (!task_source)
return false;
}
if (!task_tracker_->WillPostTaskNow(task, transaction.traits().priority()))
return false;
transaction.PushTask(std::move(task));
if (task_source) {
bool should_wakeup =
EnqueueTaskSource({std::move(task_source), std::move(transaction)});
if (should_wakeup)
worker_->WakeUp();
}
return true;
}
bool RunsTasksInCurrentSequence() {
// We check the thread ref instead of the sequence for the benefit of COM
// callbacks which may execute without a sequence context.
return thread_ref_checker_.IsCurrentThreadSameAsSetThread();
}
void OnMainExit(WorkerThread* /* worker */) override {}
void DidUpdateCanRunPolicy() {
bool should_wakeup = false;
{
CheckedAutoLock auto_lock(lock_);
if (!worker_awake_ && CanRunNextTaskSource()) {
should_wakeup = true;
worker_awake_ = true;
}
}
if (should_wakeup)
worker_->WakeUp();
}
void EnableFlushPriorityQueueTaskSourcesOnDestroyForTesting() {
CheckedAutoLock auto_lock(lock_);
priority_queue_.EnableFlushTaskSourcesOnDestroyForTesting();
}
protected:
RegisteredTaskSource GetWorkLockRequired(WorkerThread* worker)
EXCLUSIVE_LOCKS_REQUIRED(lock_) {
if (!CanRunNextTaskSource()) {
return nullptr;
}
return priority_queue_.PopTaskSource();
}
const TrackedRef<TaskTracker>& task_tracker() { return task_tracker_; }
CheckedLock lock_;
bool worker_awake_ GUARDED_BY(lock_) = false;
const TrackedRef<TaskTracker> task_tracker_;
private:
// Enqueues a task source in this single-threaded worker's priority queue.
// Returns true iff the worker must wakeup, i.e. task source is allowed to run
// and the worker was not awake.
bool EnqueueTaskSource(
TransactionWithRegisteredTaskSource transaction_with_task_source) {
CheckedAutoLock auto_lock(lock_);
priority_queue_.Push(std::move(transaction_with_task_source));
if (!worker_awake_ && CanRunNextTaskSource()) {
worker_awake_ = true;
return true;
}
return false;
}
bool CanRunNextTaskSource() EXCLUSIVE_LOCKS_REQUIRED(lock_) {
return !priority_queue_.IsEmpty() &&
task_tracker_->CanRunPriority(
priority_queue_.PeekSortKey().priority());
}
const std::string thread_name_;
const WorkerThread::ThreadLabel thread_label_;
// The WorkerThread that has |this| as a delegate. Must be set before
// starting or posting a task to the WorkerThread, because it's used in
// OnMainEntry() and PostTaskNow().
WorkerThread* worker_ = nullptr;
PriorityQueue priority_queue_ GUARDED_BY(lock_);
AtomicThreadRefChecker thread_ref_checker_;
DISALLOW_COPY_AND_ASSIGN(WorkerThreadDelegate);
};
#if defined(OS_WIN)
class WorkerThreadCOMDelegate : public WorkerThreadDelegate {
public:
WorkerThreadCOMDelegate(const std::string& thread_name,
WorkerThread::ThreadLabel thread_label,
TrackedRef<TaskTracker> task_tracker)
: WorkerThreadDelegate(thread_name,
thread_label,
std::move(task_tracker)) {}
~WorkerThreadCOMDelegate() override { DCHECK(!scoped_com_initializer_); }
// WorkerThread::Delegate:
void OnMainEntry(const WorkerThread* worker) override {
WorkerThreadDelegate::OnMainEntry(worker);
scoped_com_initializer_ = std::make_unique<win::ScopedCOMInitializer>();
}
RegisteredTaskSource GetWork(WorkerThread* worker) override {
// This scheme below allows us to cover the following scenarios:
// * Only WorkerThreadDelegate::GetWork() has work:
// Always return the task source from GetWork().
// * Only the Windows Message Queue has work:
// Always return the task source from GetWorkFromWindowsMessageQueue();
// * Both WorkerThreadDelegate::GetWork() and the Windows Message Queue
// have work:
// Process task sources from each source round-robin style.
CheckedAutoLock auto_lock(lock_);
// |worker_awake_| is always set before a call to WakeUp(), but it is
// not set when messages are added to the Windows Message Queue. Ensure that
// it is set before getting work, to avoid unnecessary wake ups.
//
// Note: It wouldn't be sufficient to set |worker_awake_| in WaitForWork()
// when MsgWaitForMultipleObjectsEx() indicates that it was woken up by a
// Windows Message, because of the following scenario:
// T1: PostTask
// Queue task
// Set |worker_awake_| to true
// T2: Woken up by a Windows Message
// Set |worker_awake_| to true
// Run the task posted by T1
// Wait for work
// T1: WakeUp()
// T2: Woken up by Waitable Event
// Does not set |worker_awake_| (wake up not from Windows Message)
// GetWork
// !! Getting work while |worker_awake_| is false !!
worker_awake_ = true;
RegisteredTaskSource task_source;
if (get_work_first_) {
task_source = WorkerThreadDelegate::GetWorkLockRequired(worker);
if (task_source)
get_work_first_ = false;
}
if (!task_source) {
CheckedAutoUnlock auto_unlock(lock_);
task_source = GetWorkFromWindowsMessageQueue();
if (task_source)
get_work_first_ = true;
}
if (!task_source && !get_work_first_) {
// This case is important if we checked the Windows Message Queue first
// and found there was no work. We don't want to return null immediately
// as that could cause the thread to go to sleep while work is waiting via
// WorkerThreadDelegate::GetWork().
task_source = WorkerThreadDelegate::GetWorkLockRequired(worker);
}
if (!task_source) {
// The worker will sleep after this returns nullptr.
worker_awake_ = false;
return nullptr;
}
auto run_status = task_source.WillRunTask();
DCHECK_NE(run_status, TaskSource::RunStatus::kDisallowed);
return task_source;
}
void OnMainExit(WorkerThread* /* worker */) override {
scoped_com_initializer_.reset();
}
void WaitForWork(WaitableEvent* wake_up_event) override {
DCHECK(wake_up_event);
const TimeDelta sleep_time = GetSleepTimeout();
const DWORD milliseconds_wait = checked_cast<DWORD>(
sleep_time.is_max() ? INFINITE : sleep_time.InMilliseconds());
const HANDLE wake_up_event_handle = wake_up_event->handle();
MsgWaitForMultipleObjectsEx(1, &wake_up_event_handle, milliseconds_wait,
QS_ALLINPUT, 0);
}
private:
RegisteredTaskSource GetWorkFromWindowsMessageQueue() {
MSG msg;
if (PeekMessage(&msg, nullptr, 0, 0, PM_REMOVE) != FALSE) {
Task pump_message_task(FROM_HERE,
BindOnce(
[](MSG msg) {
TranslateMessage(&msg);
DispatchMessage(&msg);
},
std::move(msg)),
TimeDelta());
if (task_tracker()->WillPostTask(
&pump_message_task, TaskShutdownBehavior::SKIP_ON_SHUTDOWN)) {
auto transaction = message_pump_sequence_->BeginTransaction();
const bool sequence_should_be_queued = transaction.WillPushTask();
DCHECK(sequence_should_be_queued)
<< "GetWorkFromWindowsMessageQueue() does not expect "
"queueing of pump tasks.";
auto registered_task_source = task_tracker_->RegisterTaskSource(
std::move(message_pump_sequence_));
if (!registered_task_source)
return nullptr;
transaction.PushTask(std::move(pump_message_task));
return registered_task_source;
}
}
return nullptr;
}
bool get_work_first_ = true;
const scoped_refptr<Sequence> message_pump_sequence_ =
MakeRefCounted<Sequence>(TaskTraits{MayBlock()},
nullptr,
TaskSourceExecutionMode::kParallel);
std::unique_ptr<win::ScopedCOMInitializer> scoped_com_initializer_;
DISALLOW_COPY_AND_ASSIGN(WorkerThreadCOMDelegate);
};
#endif // defined(OS_WIN)
} // namespace
class PooledSingleThreadTaskRunnerManager::PooledSingleThreadTaskRunner
: public SingleThreadTaskRunner {
public:
// Constructs a PooledSingleThreadTaskRunner that indirectly controls the
// lifetime of a dedicated |worker| for |traits|.
PooledSingleThreadTaskRunner(PooledSingleThreadTaskRunnerManager* const outer,
const TaskTraits& traits,
WorkerThread* worker,
SingleThreadTaskRunnerThreadMode thread_mode)
: outer_(outer),
worker_(worker),
thread_mode_(thread_mode),
sequence_(
MakeRefCounted<Sequence>(traits,
this,
TaskSourceExecutionMode::kSingleThread)) {
DCHECK(outer_);
DCHECK(worker_);
}
// SingleThreadTaskRunner:
bool PostDelayedTask(const Location& from_here,
OnceClosure closure,
TimeDelta delay) override {
if (!g_manager_is_alive)
return false;
Task task(from_here, std::move(closure), delay);
if (!outer_->task_tracker_->WillPostTask(&task,
sequence_->shutdown_behavior())) {
return false;
}
if (task.delayed_run_time.is_null())
return GetDelegate()->PostTaskNow(sequence_, std::move(task));
// Unretained(GetDelegate()) is safe because this TaskRunner and its
// worker are kept alive as long as there are pending Tasks.
outer_->delayed_task_manager_->AddDelayedTask(
std::move(task),
BindOnce(IgnoreResult(&WorkerThreadDelegate::PostTaskNow),
Unretained(GetDelegate()), sequence_),
this);
return true;
}
bool PostNonNestableDelayedTask(const Location& from_here,
OnceClosure closure,
TimeDelta delay) override {
// Tasks are never nested within the thread pool.
return PostDelayedTask(from_here, std::move(closure), delay);
}
bool RunsTasksInCurrentSequence() const override {
if (!g_manager_is_alive)
return false;
return GetDelegate()->RunsTasksInCurrentSequence();
}
private:
~PooledSingleThreadTaskRunner() override {
// Only unregister if this is a DEDICATED SingleThreadTaskRunner. SHARED
// task runner WorkerThreads are managed separately as they are reused.
// |g_manager_is_alive| avoids a use-after-free should this
// PooledSingleThreadTaskRunner outlive its manager. It is safe to access
// |g_manager_is_alive| without synchronization primitives as it is const
// for the lifetime of the manager and ~PooledSingleThreadTaskRunner()
// either happens prior to the end of JoinForTesting() (which happens-before
// manager's destruction) or on main thread after the task environment's
// entire destruction (which happens-after the manager's destruction). Yes,
// there's a theoretical use case where the last ref to this
// PooledSingleThreadTaskRunner is handed to a thread not controlled by
// thread_pool and that this ends up causing
// ~PooledSingleThreadTaskRunner() to race with
// ~PooledSingleThreadTaskRunnerManager() but this is intentionally not
// supported (and it doesn't matter in production where we leak the task
// environment for such reasons). TSan should catch this weird paradigm
// should anyone elect to use it in a unit test and the error would point
// here.
if (g_manager_is_alive &&
thread_mode_ == SingleThreadTaskRunnerThreadMode::DEDICATED) {
outer_->UnregisterWorkerThread(worker_);
}
}
WorkerThreadDelegate* GetDelegate() const {
return static_cast<WorkerThreadDelegate*>(worker_->delegate());
}
PooledSingleThreadTaskRunnerManager* const outer_;
WorkerThread* const worker_;
const SingleThreadTaskRunnerThreadMode thread_mode_;
const scoped_refptr<Sequence> sequence_;
DISALLOW_COPY_AND_ASSIGN(PooledSingleThreadTaskRunner);
};
PooledSingleThreadTaskRunnerManager::PooledSingleThreadTaskRunnerManager(
TrackedRef<TaskTracker> task_tracker,
DelayedTaskManager* delayed_task_manager)
: task_tracker_(std::move(task_tracker)),
delayed_task_manager_(delayed_task_manager) {
DCHECK(task_tracker_);
DCHECK(delayed_task_manager_);
#if defined(OS_WIN)
static_assert(std::extent<decltype(shared_com_worker_threads_)>() ==
std::extent<decltype(shared_worker_threads_)>(),
"The size of |shared_com_worker_threads_| must match "
"|shared_worker_threads_|");
static_assert(
std::extent<
std::remove_reference<decltype(shared_com_worker_threads_[0])>>() ==
std::extent<
std::remove_reference<decltype(shared_worker_threads_[0])>>(),
"The size of |shared_com_worker_threads_| must match "
"|shared_worker_threads_|");
#endif // defined(OS_WIN)
DCHECK(!g_manager_is_alive);
g_manager_is_alive = true;
}
PooledSingleThreadTaskRunnerManager::~PooledSingleThreadTaskRunnerManager() {
DCHECK(g_manager_is_alive);
g_manager_is_alive = false;
}
void PooledSingleThreadTaskRunnerManager::Start(
WorkerThreadObserver* worker_thread_observer) {
DCHECK(!worker_thread_observer_);
worker_thread_observer_ = worker_thread_observer;
decltype(workers_) workers_to_start;
{
CheckedAutoLock auto_lock(lock_);
started_ = true;
workers_to_start = workers_;
}
// Start workers that were created before this method was called.
// Workers that already need to wake up are already signaled as part of
// PooledSingleThreadTaskRunner::PostTaskNow(). As a result, it's
// unnecessary to call WakeUp() for each worker (in fact, an extraneous
// WakeUp() would be racy and wrong - see https://crbug.com/862582).
for (scoped_refptr<WorkerThread> worker : workers_to_start) {
worker->Start(worker_thread_observer_);
}
}
void PooledSingleThreadTaskRunnerManager::DidUpdateCanRunPolicy() {
decltype(workers_) workers_to_update;
{
CheckedAutoLock auto_lock(lock_);
if (!started_)
return;
workers_to_update = workers_;
}
// Any worker created after the lock is released will see the latest
// CanRunPolicy if tasks are posted to it and thus doesn't need a
// DidUpdateCanRunPolicy() notification.
for (auto& worker : workers_to_update) {
static_cast<WorkerThreadDelegate*>(worker->delegate())
->DidUpdateCanRunPolicy();
}
}
scoped_refptr<SingleThreadTaskRunner>
PooledSingleThreadTaskRunnerManager::CreateSingleThreadTaskRunner(
const TaskTraits& traits,
SingleThreadTaskRunnerThreadMode thread_mode) {
return CreateTaskRunnerImpl<WorkerThreadDelegate>(traits, thread_mode);
}
#if defined(OS_WIN)
scoped_refptr<SingleThreadTaskRunner>
PooledSingleThreadTaskRunnerManager::CreateCOMSTATaskRunner(
const TaskTraits& traits,
SingleThreadTaskRunnerThreadMode thread_mode) {
return CreateTaskRunnerImpl<WorkerThreadCOMDelegate>(traits, thread_mode);
}
#endif // defined(OS_WIN)
// static
PooledSingleThreadTaskRunnerManager::ContinueOnShutdown
PooledSingleThreadTaskRunnerManager::TraitsToContinueOnShutdown(
const TaskTraits& traits) {
if (traits.shutdown_behavior() == TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN)
return IS_CONTINUE_ON_SHUTDOWN;
return IS_NOT_CONTINUE_ON_SHUTDOWN;
}
template <typename DelegateType>
scoped_refptr<PooledSingleThreadTaskRunnerManager::PooledSingleThreadTaskRunner>
PooledSingleThreadTaskRunnerManager::CreateTaskRunnerImpl(
const TaskTraits& traits,
SingleThreadTaskRunnerThreadMode thread_mode) {
DCHECK(thread_mode != SingleThreadTaskRunnerThreadMode::SHARED ||
!traits.with_base_sync_primitives())
<< "Using WithBaseSyncPrimitives() on a shared SingleThreadTaskRunner "
"may cause deadlocks. Either reevaluate your usage (e.g. use "
"SequencedTaskRunner) or use "
"SingleThreadTaskRunnerThreadMode::DEDICATED.";
// To simplify the code, |dedicated_worker| is a local only variable that
// allows the code to treat both the DEDICATED and SHARED cases similarly for
// SingleThreadTaskRunnerThreadMode. In DEDICATED, the scoped_refptr is backed
// by a local variable and in SHARED, the scoped_refptr is backed by a member
// variable.
WorkerThread* dedicated_worker = nullptr;
WorkerThread*& worker =
thread_mode == SingleThreadTaskRunnerThreadMode::DEDICATED
? dedicated_worker
: GetSharedWorkerThreadForTraits<DelegateType>(traits);
bool new_worker = false;
bool started;
{
CheckedAutoLock auto_lock(lock_);
if (!worker) {
const auto& environment_params =
kEnvironmentParams[GetEnvironmentIndexForTraits(traits)];
std::string worker_name;
if (thread_mode == SingleThreadTaskRunnerThreadMode::SHARED)
worker_name += "Shared";
worker_name += environment_params.name_suffix;
worker = CreateAndRegisterWorkerThread<DelegateType>(
worker_name, thread_mode,
CanUseBackgroundPriorityForWorkerThread()
? environment_params.priority_hint
: ThreadPriority::NORMAL);
new_worker = true;
}
started = started_;
}
if (new_worker && started)
worker->Start(worker_thread_observer_);
return MakeRefCounted<PooledSingleThreadTaskRunner>(this, traits, worker,
thread_mode);
}
void PooledSingleThreadTaskRunnerManager::JoinForTesting() {
decltype(workers_) local_workers;
{
CheckedAutoLock auto_lock(lock_);
local_workers = std::move(workers_);
}
for (const auto& worker : local_workers) {
static_cast<WorkerThreadDelegate*>(worker->delegate())
->EnableFlushPriorityQueueTaskSourcesOnDestroyForTesting();
worker->JoinForTesting();
}
{
CheckedAutoLock auto_lock(lock_);
DCHECK(workers_.empty())
<< "New worker(s) unexpectedly registered during join.";
workers_ = std::move(local_workers);
}
// Release shared WorkerThreads at the end so they get joined above. If
// this call happens before the joins, the WorkerThreads are effectively
// detached and may outlive the PooledSingleThreadTaskRunnerManager.
ReleaseSharedWorkerThreads();
}
template <>
std::unique_ptr<WorkerThreadDelegate>
PooledSingleThreadTaskRunnerManager::CreateWorkerThreadDelegate<
WorkerThreadDelegate>(const std::string& name,
int id,
SingleThreadTaskRunnerThreadMode thread_mode) {
return std::make_unique<WorkerThreadDelegate>(
StringPrintf("ThreadPoolSingleThread%s%d", name.c_str(), id),
thread_mode == SingleThreadTaskRunnerThreadMode::DEDICATED
? WorkerThread::ThreadLabel::DEDICATED
: WorkerThread::ThreadLabel::SHARED,
task_tracker_);
}
#if defined(OS_WIN)
template <>
std::unique_ptr<WorkerThreadDelegate>
PooledSingleThreadTaskRunnerManager::CreateWorkerThreadDelegate<
WorkerThreadCOMDelegate>(const std::string& name,
int id,
SingleThreadTaskRunnerThreadMode thread_mode) {
return std::make_unique<WorkerThreadCOMDelegate>(
StringPrintf("ThreadPoolSingleThreadCOMSTA%s%d", name.c_str(), id),
thread_mode == SingleThreadTaskRunnerThreadMode::DEDICATED
? WorkerThread::ThreadLabel::DEDICATED_COM
: WorkerThread::ThreadLabel::SHARED_COM,
task_tracker_);
}
#endif // defined(OS_WIN)
template <typename DelegateType>
WorkerThread*
PooledSingleThreadTaskRunnerManager::CreateAndRegisterWorkerThread(
const std::string& name,
SingleThreadTaskRunnerThreadMode thread_mode,
ThreadPriority priority_hint) {
int id = next_worker_id_++;
std::unique_ptr<WorkerThreadDelegate> delegate =
CreateWorkerThreadDelegate<DelegateType>(name, id, thread_mode);
WorkerThreadDelegate* delegate_raw = delegate.get();
scoped_refptr<WorkerThread> worker = MakeRefCounted<WorkerThread>(
priority_hint, std::move(delegate), task_tracker_);
delegate_raw->set_worker(worker.get());
workers_.emplace_back(std::move(worker));
return workers_.back().get();
}
template <>
WorkerThread*&
PooledSingleThreadTaskRunnerManager::GetSharedWorkerThreadForTraits<
WorkerThreadDelegate>(const TaskTraits& traits) {
return shared_worker_threads_[GetEnvironmentIndexForTraits(traits)]
[TraitsToContinueOnShutdown(traits)];
}
#if defined(OS_WIN)
template <>
WorkerThread*&
PooledSingleThreadTaskRunnerManager::GetSharedWorkerThreadForTraits<
WorkerThreadCOMDelegate>(const TaskTraits& traits) {
return shared_com_worker_threads_[GetEnvironmentIndexForTraits(traits)]
[TraitsToContinueOnShutdown(traits)];
}
#endif // defined(OS_WIN)
void PooledSingleThreadTaskRunnerManager::UnregisterWorkerThread(
WorkerThread* worker) {
// Cleanup uses a CheckedLock, so call Cleanup() after releasing |lock_|.
scoped_refptr<WorkerThread> worker_to_destroy;
{
CheckedAutoLock auto_lock(lock_);
// Skip when joining (the join logic takes care of the rest).
if (workers_.empty())
return;
auto worker_iter = std::find(workers_.begin(), workers_.end(), worker);
DCHECK(worker_iter != workers_.end());
worker_to_destroy = std::move(*worker_iter);
workers_.erase(worker_iter);
}
worker_to_destroy->Cleanup();
}
void PooledSingleThreadTaskRunnerManager::ReleaseSharedWorkerThreads() {
decltype(shared_worker_threads_) local_shared_worker_threads;
#if defined(OS_WIN)
decltype(shared_com_worker_threads_) local_shared_com_worker_threads;
#endif
{
CheckedAutoLock auto_lock(lock_);
for (size_t i = 0; i < base::size(shared_worker_threads_); ++i) {
for (size_t j = 0; j < base::size(shared_worker_threads_[i]); ++j) {
local_shared_worker_threads[i][j] = shared_worker_threads_[i][j];
shared_worker_threads_[i][j] = nullptr;
#if defined(OS_WIN)
local_shared_com_worker_threads[i][j] =
shared_com_worker_threads_[i][j];
shared_com_worker_threads_[i][j] = nullptr;
#endif
}
}
}
for (size_t i = 0; i < base::size(local_shared_worker_threads); ++i) {
for (size_t j = 0; j < base::size(local_shared_worker_threads[i]); ++j) {
if (local_shared_worker_threads[i][j])
UnregisterWorkerThread(local_shared_worker_threads[i][j]);
#if defined(OS_WIN)
if (local_shared_com_worker_threads[i][j])
UnregisterWorkerThread(local_shared_com_worker_threads[i][j]);
#endif
}
}
}
} // namespace internal
} // namespace base

View file

@ -0,0 +1,158 @@
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TASK_THREAD_POOL_POOLED_SINGLE_THREAD_TASK_RUNNER_MANAGER_H_
#define BASE_TASK_THREAD_POOL_POOLED_SINGLE_THREAD_TASK_RUNNER_MANAGER_H_
#include <memory>
#include <string>
#include <vector>
#include "base/base_export.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "base/task/common/checked_lock.h"
#include "base/task/single_thread_task_runner_thread_mode.h"
#include "base/task/thread_pool/environment_config.h"
#include "base/task/thread_pool/tracked_ref.h"
#include "base/thread_annotations.h"
#include "base/threading/platform_thread.h"
#include "build/build_config.h"
namespace base {
class TaskTraits;
class WorkerThreadObserver;
class SingleThreadTaskRunner;
namespace internal {
class DelayedTaskManager;
class WorkerThread;
class TaskTracker;
namespace {
class WorkerThreadDelegate;
} // namespace
// Manages a group of threads which are each associated with one or more
// SingleThreadTaskRunners.
//
// SingleThreadTaskRunners using SingleThreadTaskRunnerThreadMode::SHARED are
// backed by shared WorkerThreads for each COM+task environment combination.
// These workers are lazily instantiated and then only reclaimed during
// JoinForTesting()
//
// No threads are created (and hence no tasks can run) before Start() is called.
//
// This class is thread-safe.
class BASE_EXPORT PooledSingleThreadTaskRunnerManager final {
public:
PooledSingleThreadTaskRunnerManager(TrackedRef<TaskTracker> task_tracker,
DelayedTaskManager* delayed_task_manager);
~PooledSingleThreadTaskRunnerManager();
// Starts threads for existing SingleThreadTaskRunners and allows threads to
// be started when SingleThreadTaskRunners are created in the future. If
// specified, |worker_thread_observer| will be notified when a worker
// enters and exits its main function. It must not be destroyed before
// JoinForTesting() has returned (must never be destroyed in production).
void Start(WorkerThreadObserver* worker_thread_observer = nullptr);
// Wakes up workers as appropriate for the new CanRunPolicy policy. Must be
// called after an update to CanRunPolicy in TaskTracker.
void DidUpdateCanRunPolicy();
// Creates a SingleThreadTaskRunner which runs tasks with |traits| on a thread
// named "ThreadPoolSingleThread[Shared]" +
// kEnvironmentParams[GetEnvironmentIndexForTraits(traits)].name_suffix +
// index.
scoped_refptr<SingleThreadTaskRunner> CreateSingleThreadTaskRunner(
const TaskTraits& traits,
SingleThreadTaskRunnerThreadMode thread_mode);
#if defined(OS_WIN)
// Creates a SingleThreadTaskRunner which runs tasks with |traits| on a COM
// STA thread named "ThreadPoolSingleThreadCOMSTA[Shared]" +
// kEnvironmentParams[GetEnvironmentIndexForTraits(traits)].name_suffix +
// index.
scoped_refptr<SingleThreadTaskRunner> CreateCOMSTATaskRunner(
const TaskTraits& traits,
SingleThreadTaskRunnerThreadMode thread_mode);
#endif // defined(OS_WIN)
void JoinForTesting();
private:
class PooledSingleThreadTaskRunner;
enum ContinueOnShutdown {
IS_CONTINUE_ON_SHUTDOWN,
IS_NOT_CONTINUE_ON_SHUTDOWN,
CONTINUE_ON_SHUTDOWN_COUNT,
};
static ContinueOnShutdown TraitsToContinueOnShutdown(
const TaskTraits& traits);
template <typename DelegateType>
scoped_refptr<PooledSingleThreadTaskRunner> CreateTaskRunnerImpl(
const TaskTraits& traits,
SingleThreadTaskRunnerThreadMode thread_mode);
template <typename DelegateType>
std::unique_ptr<WorkerThreadDelegate> CreateWorkerThreadDelegate(
const std::string& name,
int id,
SingleThreadTaskRunnerThreadMode thread_mode);
template <typename DelegateType>
WorkerThread* CreateAndRegisterWorkerThread(
const std::string& name,
SingleThreadTaskRunnerThreadMode thread_mode,
ThreadPriority priority_hint) EXCLUSIVE_LOCKS_REQUIRED(lock_);
template <typename DelegateType>
WorkerThread*& GetSharedWorkerThreadForTraits(const TaskTraits& traits);
void UnregisterWorkerThread(WorkerThread* worker);
void ReleaseSharedWorkerThreads();
const TrackedRef<TaskTracker> task_tracker_;
DelayedTaskManager* const delayed_task_manager_;
// Optional observer notified when a worker enters and exits its main
// function. Set in Start() and never modified afterwards.
WorkerThreadObserver* worker_thread_observer_ = nullptr;
CheckedLock lock_;
std::vector<scoped_refptr<WorkerThread>> workers_ GUARDED_BY(lock_);
int next_worker_id_ GUARDED_BY(lock_) = 0;
// Workers for SingleThreadTaskRunnerThreadMode::SHARED tasks. It is
// important to have separate threads for CONTINUE_ON_SHUTDOWN and non-
// CONTINUE_ON_SHUTDOWN to avoid being in a situation where a
// CONTINUE_ON_SHUTDOWN task effectively blocks shutdown by preventing a
// BLOCK_SHUTDOWN task to be scheduled. https://crbug.com/829786
WorkerThread* shared_worker_threads_[ENVIRONMENT_COUNT]
[CONTINUE_ON_SHUTDOWN_COUNT] GUARDED_BY(
lock_) = {};
#if defined(OS_WIN)
WorkerThread* shared_com_worker_threads_
[ENVIRONMENT_COUNT][CONTINUE_ON_SHUTDOWN_COUNT] GUARDED_BY(lock_) = {};
#endif // defined(OS_WIN)
// Set to true when Start() is called.
bool started_ GUARDED_BY(lock_) = false;
DISALLOW_COPY_AND_ASSIGN(PooledSingleThreadTaskRunnerManager);
};
} // namespace internal
} // namespace base
#endif // BASE_TASK_THREAD_POOL_POOLED_SINGLE_THREAD_TASK_RUNNER_MANAGER_H_

View file

@ -0,0 +1,40 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/task/thread_pool/pooled_task_runner_delegate.h"
namespace base {
namespace internal {
namespace {
// Indicates whether a PooledTaskRunnerDelegate instance exists in the
// process. Used to tell when a task is posted from the main thread after the
// task environment was brought down in unit tests so that TaskRunners can
// return false on PostTask, letting callers know they should complete
// necessary work synchronously. A PooledTaskRunnerDelegate is usually
// instantiated before worker threads are started and deleted after worker
// threads have been joined. This makes the variable const while worker threads
// are up and as such it doesn't need to be atomic.
bool g_exists = false;
} // namespace
PooledTaskRunnerDelegate::PooledTaskRunnerDelegate() {
DCHECK(!g_exists);
g_exists = true;
}
PooledTaskRunnerDelegate::~PooledTaskRunnerDelegate() {
DCHECK(g_exists);
g_exists = false;
}
// static
bool PooledTaskRunnerDelegate::Exists() {
return g_exists;
}
} // namespace internal
} // namespace base

View file

@ -0,0 +1,64 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TASK_THREAD_POOL_POOLED_TASK_RUNNER_DELEGATE_H_
#define BASE_TASK_THREAD_POOL_POOLED_TASK_RUNNER_DELEGATE_H_
#include "base/base_export.h"
#include "base/task/task_traits.h"
#include "base/task/thread_pool/job_task_source.h"
#include "base/task/thread_pool/sequence.h"
#include "base/task/thread_pool/task.h"
#include "base/task/thread_pool/task_source.h"
namespace base {
namespace internal {
// Delegate interface for PooledParallelTaskRunner and
// PooledSequencedTaskRunner.
class BASE_EXPORT PooledTaskRunnerDelegate {
public:
PooledTaskRunnerDelegate();
virtual ~PooledTaskRunnerDelegate();
// Returns true if a PooledTaskRunnerDelegate instance exists in the
// process. This is needed in case of unit tests wherein a TaskRunner
// outlives the ThreadPoolInstance that created it.
static bool Exists();
// Returns true if |task_source| currently running must return ASAP.
// Thread-safe but may return an outdated result (if a task unnecessarily
// yields due to this, it will simply be re-scheduled).
virtual bool ShouldYield(const TaskSource* task_source) const = 0;
// Invoked when a |task| is posted to the PooledParallelTaskRunner or
// PooledSequencedTaskRunner. The implementation must post |task| to
// |sequence| within the appropriate priority queue, depending on |sequence|
// traits. Returns true if task was successfully posted.
virtual bool PostTaskWithSequence(Task task,
scoped_refptr<Sequence> sequence) = 0;
// Invoked when a task is posted as a Job. The implementation must add
// |task_source| to the appropriate priority queue, depending on |task_source|
// traits, if it's not there already. Returns true if task source was
// successfully enqueued or was already enqueued.
virtual bool EnqueueJobTaskSource(
scoped_refptr<JobTaskSource> task_source) = 0;
// Removes |task_source| from the priority queue.
virtual void RemoveJobTaskSource(
scoped_refptr<JobTaskSource> task_source) = 0;
// Invoked when the priority of |sequence|'s TaskRunner is updated. The
// implementation must update |sequence|'s priority to |priority|, then place
// |sequence| in the correct priority-queue position within the appropriate
// thread group.
virtual void UpdatePriority(scoped_refptr<TaskSource> task_source,
TaskPriority priority) = 0;
};
} // namespace internal
} // namespace base
#endif // BASE_TASK_THREAD_POOL_POOLED_TASK_RUNNER_DELEGATE_H_

View file

@ -0,0 +1,210 @@
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/task/thread_pool/priority_queue.h"
#include <utility>
#include "base/logging.h"
#include "base/memory/ptr_util.h"
#include "base/stl_util.h"
namespace base {
namespace internal {
// A class combining a TaskSource and the SequenceSortKey that determines its
// position in a PriorityQueue. Instances are only mutable via
// take_task_source() which can only be called once and renders its instance
// invalid after the call.
class PriorityQueue::TaskSourceAndSortKey {
public:
TaskSourceAndSortKey() = default;
TaskSourceAndSortKey(RegisteredTaskSource task_source,
const SequenceSortKey& sort_key)
: task_source_(std::move(task_source)), sort_key_(sort_key) {
DCHECK(task_source_);
}
// Note: while |task_source_| should always be non-null post-move (i.e. we
// shouldn't be moving an invalid TaskSourceAndSortKey around), there can't be
// a DCHECK(task_source_) on moves as IntrusiveHeap moves elements on pop
// instead of overwriting them: resulting in the move of a
// TaskSourceAndSortKey with a null |task_source_| in Transaction::Pop()'s
// implementation.
TaskSourceAndSortKey(TaskSourceAndSortKey&& other) = default;
TaskSourceAndSortKey& operator=(TaskSourceAndSortKey&& other) = default;
// Extracts |task_source_| from this object. This object is invalid after this
// call.
RegisteredTaskSource take_task_source() {
DCHECK(task_source_);
task_source_->ClearHeapHandle();
return std::move(task_source_);
}
// Compares this TaskSourceAndSortKey to |other| based on their respective
// |sort_key_|. Required by IntrusiveHeap.
bool operator<=(const TaskSourceAndSortKey& other) const {
return sort_key_ <= other.sort_key_;
}
// Required by IntrusiveHeap.
void SetHeapHandle(const HeapHandle& handle) {
DCHECK(task_source_);
task_source_->SetHeapHandle(handle);
}
// Required by IntrusiveHeap.
void ClearHeapHandle() {
// Ensure |task_source_| is not nullptr, which may be the case if
// take_task_source() was called before this.
if (task_source_)
task_source_->ClearHeapHandle();
}
// Required by IntrusiveHeap.
HeapHandle GetHeapHandle() const {
if (task_source_)
return task_source_->GetHeapHandle();
return HeapHandle::Invalid();
}
const RegisteredTaskSource& task_source() const { return task_source_; }
RegisteredTaskSource& task_source() { return task_source_; }
const SequenceSortKey& sort_key() const { return sort_key_; }
private:
RegisteredTaskSource task_source_;
SequenceSortKey sort_key_;
DISALLOW_COPY_AND_ASSIGN(TaskSourceAndSortKey);
};
PriorityQueue::PriorityQueue() = default;
PriorityQueue::~PriorityQueue() {
if (!is_flush_task_sources_on_destroy_enabled_)
return;
while (!container_.empty()) {
auto task_source = PopTaskSource();
auto task = task_source.Clear();
std::move(task.task).Run();
}
}
PriorityQueue& PriorityQueue::operator=(PriorityQueue&& other) = default;
void PriorityQueue::Push(
TransactionWithRegisteredTaskSource transaction_with_task_source) {
auto sequence_sort_key =
transaction_with_task_source.transaction.GetSortKey();
container_.insert(TaskSourceAndSortKey(
std::move(transaction_with_task_source.task_source), sequence_sort_key));
IncrementNumTaskSourcesForPriority(sequence_sort_key.priority());
}
const SequenceSortKey& PriorityQueue::PeekSortKey() const {
DCHECK(!IsEmpty());
return container_.Min().sort_key();
}
RegisteredTaskSource& PriorityQueue::PeekTaskSource() const {
DCHECK(!IsEmpty());
// The const_cast on Min() is okay since modifying the TaskSource cannot alter
// the sort order of TaskSourceAndSortKey.
auto& task_source_and_sort_key =
const_cast<PriorityQueue::TaskSourceAndSortKey&>(container_.Min());
return task_source_and_sort_key.task_source();
}
RegisteredTaskSource PriorityQueue::PopTaskSource() {
DCHECK(!IsEmpty());
// The const_cast on Min() is okay since the TaskSourceAndSortKey is
// transactionally being popped from |container_| right after and taking its
// TaskSource does not alter its sort order.
auto& task_source_and_sort_key =
const_cast<TaskSourceAndSortKey&>(container_.Min());
DecrementNumTaskSourcesForPriority(
task_source_and_sort_key.sort_key().priority());
RegisteredTaskSource task_source =
task_source_and_sort_key.take_task_source();
container_.Pop();
return task_source;
}
RegisteredTaskSource PriorityQueue::RemoveTaskSource(
const TaskSource& task_source) {
if (IsEmpty())
return nullptr;
const HeapHandle heap_handle = task_source.heap_handle();
if (!heap_handle.IsValid())
return nullptr;
TaskSourceAndSortKey& task_source_and_sort_key =
const_cast<PriorityQueue::TaskSourceAndSortKey&>(
container_.at(heap_handle));
DCHECK_EQ(task_source_and_sort_key.task_source().get(), &task_source);
RegisteredTaskSource registered_task_source =
task_source_and_sort_key.take_task_source();
DecrementNumTaskSourcesForPriority(
task_source_and_sort_key.sort_key().priority());
container_.erase(heap_handle);
return registered_task_source;
}
void PriorityQueue::UpdateSortKey(TaskSource::Transaction transaction) {
DCHECK(transaction);
if (IsEmpty())
return;
const HeapHandle heap_handle = transaction.task_source()->heap_handle();
if (!heap_handle.IsValid())
return;
auto old_sort_key = container_.at(heap_handle).sort_key();
auto new_sort_key = transaction.GetSortKey();
auto registered_task_source =
const_cast<PriorityQueue::TaskSourceAndSortKey&>(
container_.at(heap_handle))
.take_task_source();
DecrementNumTaskSourcesForPriority(old_sort_key.priority());
IncrementNumTaskSourcesForPriority(new_sort_key.priority());
container_.ChangeKey(
heap_handle,
TaskSourceAndSortKey(std::move(registered_task_source), new_sort_key));
}
bool PriorityQueue::IsEmpty() const {
return container_.empty();
}
size_t PriorityQueue::Size() const {
return container_.size();
}
void PriorityQueue::EnableFlushTaskSourcesOnDestroyForTesting() {
DCHECK(!is_flush_task_sources_on_destroy_enabled_);
is_flush_task_sources_on_destroy_enabled_ = true;
}
void PriorityQueue::DecrementNumTaskSourcesForPriority(TaskPriority priority) {
DCHECK_GT(num_task_sources_per_priority_[static_cast<int>(priority)], 0U);
--num_task_sources_per_priority_[static_cast<int>(priority)];
}
void PriorityQueue::IncrementNumTaskSourcesForPriority(TaskPriority priority) {
++num_task_sources_per_priority_[static_cast<int>(priority)];
}
} // namespace internal
} // namespace base

View file

@ -0,0 +1,99 @@
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TASK_THREAD_POOL_PRIORITY_QUEUE_H_
#define BASE_TASK_THREAD_POOL_PRIORITY_QUEUE_H_
#include <memory>
#include "base/base_export.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "base/task/common/checked_lock.h"
#include "base/task/common/intrusive_heap.h"
#include "base/task/thread_pool/sequence_sort_key.h"
#include "base/task/thread_pool/task_source.h"
namespace base {
namespace internal {
// A PriorityQueue holds TaskSources of Tasks. This class is not thread-safe
// (requires external synchronization).
class BASE_EXPORT PriorityQueue {
public:
PriorityQueue();
~PriorityQueue();
PriorityQueue& operator=(PriorityQueue&& other);
// Inserts |task_source| in the PriorityQueue with |sequence_sort_key|.
void Push(TransactionWithRegisteredTaskSource transaction_with_task_source);
// Returns a reference to the SequenceSortKey representing the priority of
// the highest pending task in this PriorityQueue. The reference becomes
// invalid the next time that this PriorityQueue is modified.
// Cannot be called on an empty PriorityQueue.
const SequenceSortKey& PeekSortKey() const;
// Returns a reference to the highest priority TaskSource in this
// PriorityQueue. Cannot be called on an empty PriorityQueue. The returned
// task source may be modified as long as its sort key isn't affected.
RegisteredTaskSource& PeekTaskSource() const;
// Removes and returns the highest priority TaskSource in this PriorityQueue.
// Cannot be called on an empty PriorityQueue.
RegisteredTaskSource PopTaskSource();
// Removes |task_source| from the PriorityQueue. Returns a
// RegisteredTaskSource which evaluates to true if successful, or false if
// |task_source| is not currently in the PriorityQueue or the PriorityQueue is
// empty.
RegisteredTaskSource RemoveTaskSource(const TaskSource& task_source);
// Updates the sort key of the TaskSource in |transaction| to
// match its current traits. No-ops if the TaskSource is not in the
// PriorityQueue or the PriorityQueue is empty.
void UpdateSortKey(TaskSource::Transaction transaction);
// Returns true if the PriorityQueue is empty.
bool IsEmpty() const;
// Returns the number of TaskSources in the PriorityQueue.
size_t Size() const;
// Returns the number of TaskSources with |priority|.
size_t GetNumTaskSourcesWithPriority(TaskPriority priority) const {
return num_task_sources_per_priority_[static_cast<int>(priority)];
}
// Set the PriorityQueue to empty all its TaskSources of Tasks when it is
// destroyed; needed to prevent memory leaks caused by a reference cycle
// (TaskSource -> Task -> TaskRunner -> TaskSource...) during test teardown.
void EnableFlushTaskSourcesOnDestroyForTesting();
private:
// A class combining a TaskSource and the SequenceSortKey that determines its
// position in a PriorityQueue.
class TaskSourceAndSortKey;
using ContainerType = IntrusiveHeap<TaskSourceAndSortKey>;
void DecrementNumTaskSourcesForPriority(TaskPriority priority);
void IncrementNumTaskSourcesForPriority(TaskPriority priority);
ContainerType container_;
std::array<size_t, static_cast<int>(TaskPriority::HIGHEST) + 1>
num_task_sources_per_priority_ = {};
// Should only be enabled by EnableFlushTaskSourcesOnDestroyForTesting().
bool is_flush_task_sources_on_destroy_enabled_ = false;
DISALLOW_COPY_AND_ASSIGN(PriorityQueue);
};
} // namespace internal
} // namespace base
#endif // BASE_TASK_THREAD_POOL_PRIORITY_QUEUE_H_

Some files were not shown because too many files have changed in this diff Show more