Repo created
This commit is contained in:
parent
81b91f4139
commit
f8c34fa5ee
22732 changed files with 4815320 additions and 2 deletions
|
|
@ -0,0 +1,7 @@
|
|||
etiennep@chromium.org
|
||||
fdoray@chromium.org
|
||||
gab@chromium.org
|
||||
robliao@chromium.org
|
||||
|
||||
# TEAM: scheduler-dev@chromium.org
|
||||
# COMPONENT: Internals>ThreadPool
|
||||
|
|
@ -0,0 +1,159 @@
|
|||
// Copyright 2016 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "base/task/thread_pool/delayed_task_manager.h"
|
||||
|
||||
#include <algorithm>
|
||||
|
||||
#include "base/bind.h"
|
||||
#include "base/logging.h"
|
||||
#include "base/sequenced_task_runner.h"
|
||||
#include "base/task/post_task.h"
|
||||
#include "base/task/thread_pool/task.h"
|
||||
#include "base/task_runner.h"
|
||||
|
||||
namespace base {
|
||||
namespace internal {
|
||||
|
||||
DelayedTaskManager::DelayedTask::DelayedTask() = default;
|
||||
|
||||
DelayedTaskManager::DelayedTask::DelayedTask(
|
||||
Task task,
|
||||
PostTaskNowCallback callback,
|
||||
scoped_refptr<TaskRunner> task_runner)
|
||||
: task(std::move(task)),
|
||||
callback(std::move(callback)),
|
||||
task_runner(std::move(task_runner)) {}
|
||||
|
||||
DelayedTaskManager::DelayedTask::DelayedTask(
|
||||
DelayedTaskManager::DelayedTask&& other) = default;
|
||||
|
||||
DelayedTaskManager::DelayedTask::~DelayedTask() = default;
|
||||
|
||||
DelayedTaskManager::DelayedTask& DelayedTaskManager::DelayedTask::operator=(
|
||||
DelayedTaskManager::DelayedTask&& other) = default;
|
||||
|
||||
bool DelayedTaskManager::DelayedTask::operator<=(
|
||||
const DelayedTask& other) const {
|
||||
if (task.delayed_run_time == other.task.delayed_run_time) {
|
||||
return task.sequence_num <= other.task.sequence_num;
|
||||
}
|
||||
return task.delayed_run_time < other.task.delayed_run_time;
|
||||
}
|
||||
|
||||
bool DelayedTaskManager::DelayedTask::IsScheduled() const {
|
||||
return scheduled_;
|
||||
}
|
||||
void DelayedTaskManager::DelayedTask::SetScheduled() {
|
||||
DCHECK(!scheduled_);
|
||||
scheduled_ = true;
|
||||
}
|
||||
|
||||
DelayedTaskManager::DelayedTaskManager(const TickClock* tick_clock)
|
||||
: process_ripe_tasks_closure_(
|
||||
BindRepeating(&DelayedTaskManager::ProcessRipeTasks,
|
||||
Unretained(this))),
|
||||
tick_clock_(tick_clock) {
|
||||
DCHECK(tick_clock_);
|
||||
}
|
||||
|
||||
DelayedTaskManager::~DelayedTaskManager() = default;
|
||||
|
||||
void DelayedTaskManager::Start(
|
||||
scoped_refptr<SequencedTaskRunner> service_thread_task_runner) {
|
||||
DCHECK(service_thread_task_runner);
|
||||
|
||||
TimeTicks process_ripe_tasks_time;
|
||||
{
|
||||
CheckedAutoLock auto_lock(queue_lock_);
|
||||
DCHECK(!service_thread_task_runner_);
|
||||
service_thread_task_runner_ = std::move(service_thread_task_runner);
|
||||
process_ripe_tasks_time = GetTimeToScheduleProcessRipeTasksLockRequired();
|
||||
}
|
||||
ScheduleProcessRipeTasksOnServiceThread(process_ripe_tasks_time);
|
||||
}
|
||||
|
||||
void DelayedTaskManager::AddDelayedTask(
|
||||
Task task,
|
||||
PostTaskNowCallback post_task_now_callback,
|
||||
scoped_refptr<TaskRunner> task_runner) {
|
||||
DCHECK(task.task);
|
||||
DCHECK(!task.delayed_run_time.is_null());
|
||||
|
||||
// Use CHECK instead of DCHECK to crash earlier. See http://crbug.com/711167
|
||||
// for details.
|
||||
CHECK(task.task);
|
||||
TimeTicks process_ripe_tasks_time;
|
||||
{
|
||||
CheckedAutoLock auto_lock(queue_lock_);
|
||||
delayed_task_queue_.insert(DelayedTask(std::move(task),
|
||||
std::move(post_task_now_callback),
|
||||
std::move(task_runner)));
|
||||
// Not started yet.
|
||||
if (service_thread_task_runner_ == nullptr)
|
||||
return;
|
||||
process_ripe_tasks_time = GetTimeToScheduleProcessRipeTasksLockRequired();
|
||||
}
|
||||
ScheduleProcessRipeTasksOnServiceThread(process_ripe_tasks_time);
|
||||
}
|
||||
|
||||
void DelayedTaskManager::ProcessRipeTasks() {
|
||||
std::vector<DelayedTask> ripe_delayed_tasks;
|
||||
TimeTicks process_ripe_tasks_time;
|
||||
|
||||
{
|
||||
CheckedAutoLock auto_lock(queue_lock_);
|
||||
const TimeTicks now = tick_clock_->NowTicks();
|
||||
while (!delayed_task_queue_.empty() &&
|
||||
delayed_task_queue_.Min().task.delayed_run_time <= now) {
|
||||
// The const_cast on top is okay since the DelayedTask is
|
||||
// transactionally being popped from |delayed_task_queue_| right after
|
||||
// and the move doesn't alter the sort order.
|
||||
ripe_delayed_tasks.push_back(
|
||||
std::move(const_cast<DelayedTask&>(delayed_task_queue_.Min())));
|
||||
delayed_task_queue_.Pop();
|
||||
}
|
||||
process_ripe_tasks_time = GetTimeToScheduleProcessRipeTasksLockRequired();
|
||||
}
|
||||
ScheduleProcessRipeTasksOnServiceThread(process_ripe_tasks_time);
|
||||
|
||||
for (auto& delayed_task : ripe_delayed_tasks) {
|
||||
std::move(delayed_task.callback).Run(std::move(delayed_task.task));
|
||||
}
|
||||
}
|
||||
|
||||
Optional<TimeTicks> DelayedTaskManager::NextScheduledRunTime() const {
|
||||
CheckedAutoLock auto_lock(queue_lock_);
|
||||
if (delayed_task_queue_.empty())
|
||||
return nullopt;
|
||||
return delayed_task_queue_.Min().task.delayed_run_time;
|
||||
}
|
||||
|
||||
TimeTicks DelayedTaskManager::GetTimeToScheduleProcessRipeTasksLockRequired() {
|
||||
queue_lock_.AssertAcquired();
|
||||
if (delayed_task_queue_.empty())
|
||||
return TimeTicks::Max();
|
||||
// The const_cast on top is okay since |IsScheduled()| and |SetScheduled()|
|
||||
// don't alter the sort order.
|
||||
DelayedTask& ripest_delayed_task =
|
||||
const_cast<DelayedTask&>(delayed_task_queue_.Min());
|
||||
if (ripest_delayed_task.IsScheduled())
|
||||
return TimeTicks::Max();
|
||||
ripest_delayed_task.SetScheduled();
|
||||
return ripest_delayed_task.task.delayed_run_time;
|
||||
}
|
||||
|
||||
void DelayedTaskManager::ScheduleProcessRipeTasksOnServiceThread(
|
||||
TimeTicks next_delayed_task_run_time) {
|
||||
DCHECK(!next_delayed_task_run_time.is_null());
|
||||
if (next_delayed_task_run_time.is_max())
|
||||
return;
|
||||
const TimeTicks now = tick_clock_->NowTicks();
|
||||
TimeDelta delay = std::max(TimeDelta(), next_delayed_task_run_time - now);
|
||||
service_thread_task_runner_->PostDelayedTask(
|
||||
FROM_HERE, process_ripe_tasks_closure_, delay);
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace base
|
||||
|
|
@ -0,0 +1,136 @@
|
|||
// Copyright 2016 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_TASK_THREAD_POOL_DELAYED_TASK_MANAGER_H_
|
||||
#define BASE_TASK_THREAD_POOL_DELAYED_TASK_MANAGER_H_
|
||||
|
||||
#include <memory>
|
||||
#include <utility>
|
||||
|
||||
#include "base/base_export.h"
|
||||
#include "base/callback.h"
|
||||
#include "base/macros.h"
|
||||
#include "base/memory/ptr_util.h"
|
||||
#include "base/memory/ref_counted.h"
|
||||
#include "base/optional.h"
|
||||
#include "base/synchronization/atomic_flag.h"
|
||||
#include "base/task/common/checked_lock.h"
|
||||
#include "base/task/common/intrusive_heap.h"
|
||||
#include "base/task/thread_pool/task.h"
|
||||
#include "base/thread_annotations.h"
|
||||
#include "base/time/default_tick_clock.h"
|
||||
#include "base/time/tick_clock.h"
|
||||
|
||||
namespace base {
|
||||
|
||||
class SequencedTaskRunner;
|
||||
|
||||
namespace internal {
|
||||
|
||||
// The DelayedTaskManager forwards tasks to post task callbacks when they become
|
||||
// ripe for execution. Tasks are not forwarded before Start() is called. This
|
||||
// class is thread-safe.
|
||||
class BASE_EXPORT DelayedTaskManager {
|
||||
public:
|
||||
// Posts |task| for execution immediately.
|
||||
using PostTaskNowCallback = OnceCallback<void(Task task)>;
|
||||
|
||||
// |tick_clock| can be specified for testing.
|
||||
DelayedTaskManager(
|
||||
const TickClock* tick_clock = DefaultTickClock::GetInstance());
|
||||
~DelayedTaskManager();
|
||||
|
||||
// Starts the delayed task manager, allowing past and future tasks to be
|
||||
// forwarded to their callbacks as they become ripe for execution.
|
||||
// |service_thread_task_runner| posts tasks to the ThreadPool service
|
||||
// thread.
|
||||
void Start(scoped_refptr<SequencedTaskRunner> service_thread_task_runner);
|
||||
|
||||
// Schedules a call to |post_task_now_callback| with |task| as argument when
|
||||
// |task| is ripe for execution. |task_runner| is passed to retain a
|
||||
// reference until |task| is ripe.
|
||||
void AddDelayedTask(Task task,
|
||||
PostTaskNowCallback post_task_now_callback,
|
||||
scoped_refptr<TaskRunner> task_runner);
|
||||
|
||||
// Pop and post all the ripe tasks in the delayed task queue.
|
||||
void ProcessRipeTasks();
|
||||
|
||||
// Returns the |delayed_run_time| of the next scheduled task, if any.
|
||||
Optional<TimeTicks> NextScheduledRunTime() const;
|
||||
|
||||
private:
|
||||
struct DelayedTask {
|
||||
DelayedTask();
|
||||
DelayedTask(Task task,
|
||||
PostTaskNowCallback callback,
|
||||
scoped_refptr<TaskRunner> task_runner);
|
||||
DelayedTask(DelayedTask&& other);
|
||||
~DelayedTask();
|
||||
|
||||
// Required by IntrusiveHeap::insert().
|
||||
DelayedTask& operator=(DelayedTask&& other);
|
||||
|
||||
// Required by IntrusiveHeap.
|
||||
bool operator<=(const DelayedTask& other) const;
|
||||
|
||||
Task task;
|
||||
PostTaskNowCallback callback;
|
||||
scoped_refptr<TaskRunner> task_runner;
|
||||
|
||||
// True iff the delayed task has been marked as scheduled.
|
||||
bool IsScheduled() const;
|
||||
|
||||
// Mark the delayed task as scheduled. Since the sort key is
|
||||
// |task.delayed_run_time|, it does not alter sort order when it is called.
|
||||
void SetScheduled();
|
||||
|
||||
// Required by IntrusiveHeap.
|
||||
void SetHeapHandle(const HeapHandle& handle) {}
|
||||
|
||||
// Required by IntrusiveHeap.
|
||||
void ClearHeapHandle() {}
|
||||
|
||||
// Required by IntrusiveHeap.
|
||||
HeapHandle GetHeapHandle() const { return HeapHandle::Invalid(); }
|
||||
|
||||
private:
|
||||
bool scheduled_ = false;
|
||||
DISALLOW_COPY_AND_ASSIGN(DelayedTask);
|
||||
};
|
||||
|
||||
// Get the time at which to schedule the next |ProcessRipeTasks()| execution,
|
||||
// or TimeTicks::Max() if none needs to be scheduled (i.e. no task, or next
|
||||
// task already scheduled).
|
||||
TimeTicks GetTimeToScheduleProcessRipeTasksLockRequired()
|
||||
EXCLUSIVE_LOCKS_REQUIRED(queue_lock_);
|
||||
|
||||
// Schedule |ProcessRipeTasks()| on the service thread to be executed at the
|
||||
// given |process_ripe_tasks_time|, provided the given time is not
|
||||
// TimeTicks::Max().
|
||||
void ScheduleProcessRipeTasksOnServiceThread(
|
||||
TimeTicks process_ripe_tasks_time);
|
||||
|
||||
const RepeatingClosure process_ripe_tasks_closure_;
|
||||
|
||||
const TickClock* const tick_clock_;
|
||||
|
||||
// Synchronizes access to |delayed_task_queue_| and the setting of
|
||||
// |service_thread_task_runner_|. Once |service_thread_task_runner_| is set,
|
||||
// it is never modified. It is therefore safe to access
|
||||
// |service_thread_task_runner_| without synchronization once it is observed
|
||||
// that it is non-null.
|
||||
mutable CheckedLock queue_lock_;
|
||||
|
||||
scoped_refptr<SequencedTaskRunner> service_thread_task_runner_;
|
||||
|
||||
IntrusiveHeap<DelayedTask> delayed_task_queue_ GUARDED_BY(queue_lock_);
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(DelayedTaskManager);
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
} // namespace base
|
||||
|
||||
#endif // BASE_TASK_THREAD_POOL_DELAYED_TASK_MANAGER_H_
|
||||
|
|
@ -0,0 +1,47 @@
|
|||
// Copyright 2017 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "base/task/thread_pool/environment_config.h"
|
||||
|
||||
#include "base/synchronization/lock.h"
|
||||
#include "base/threading/platform_thread.h"
|
||||
#include "build/build_config.h"
|
||||
|
||||
namespace base {
|
||||
namespace internal {
|
||||
|
||||
namespace {
|
||||
|
||||
bool CanUseBackgroundPriorityForWorkerThreadImpl() {
|
||||
// When Lock doesn't handle multiple thread priorities, run all
|
||||
// WorkerThread with a normal priority to avoid priority inversion when a
|
||||
// thread running with a normal priority tries to acquire a lock held by a
|
||||
// thread running with a background priority.
|
||||
if (!Lock::HandlesMultipleThreadPriorities())
|
||||
return false;
|
||||
|
||||
#if !defined(OS_ANDROID)
|
||||
// When thread priority can't be increased to NORMAL, run all threads with a
|
||||
// NORMAL priority to avoid priority inversions on shutdown (ThreadPoolImpl
|
||||
// increases BACKGROUND threads priority to NORMAL on shutdown while resolving
|
||||
// remaining shutdown blocking tasks).
|
||||
//
|
||||
// This is ignored on Android, because it doesn't have a clean shutdown phase.
|
||||
if (!PlatformThread::CanIncreaseThreadPriority(ThreadPriority::NORMAL))
|
||||
return false;
|
||||
#endif // defined(OS_ANDROID)
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
bool CanUseBackgroundPriorityForWorkerThread() {
|
||||
static const bool can_use_background_priority_for_worker_thread =
|
||||
CanUseBackgroundPriorityForWorkerThreadImpl();
|
||||
return can_use_background_priority_for_worker_thread;
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace base
|
||||
|
|
@ -0,0 +1,52 @@
|
|||
// Copyright 2017 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_TASK_THREAD_POOL_ENVIRONMENT_CONFIG_H_
|
||||
#define BASE_TASK_THREAD_POOL_ENVIRONMENT_CONFIG_H_
|
||||
|
||||
#include <stddef.h>
|
||||
|
||||
#include "base/base_export.h"
|
||||
#include "base/task/task_traits.h"
|
||||
#include "base/threading/thread.h"
|
||||
|
||||
namespace base {
|
||||
namespace internal {
|
||||
|
||||
// TODO(etiennep): This is now specific to
|
||||
// PooledSingleThreadTaskRunnerManager, move it there.
|
||||
enum EnvironmentType {
|
||||
FOREGROUND = 0,
|
||||
FOREGROUND_BLOCKING,
|
||||
BACKGROUND,
|
||||
BACKGROUND_BLOCKING,
|
||||
ENVIRONMENT_COUNT // Always last.
|
||||
};
|
||||
|
||||
// Order must match the EnvironmentType enum.
|
||||
struct EnvironmentParams {
|
||||
// The threads and histograms of this environment will be labeled with
|
||||
// the thread pool name concatenated to this.
|
||||
const char* name_suffix;
|
||||
|
||||
// Preferred priority for threads in this environment; the actual thread
|
||||
// priority depends on shutdown state and platform capabilities.
|
||||
ThreadPriority priority_hint;
|
||||
};
|
||||
|
||||
constexpr EnvironmentParams kEnvironmentParams[] = {
|
||||
{"Foreground", base::ThreadPriority::NORMAL},
|
||||
{"ForegroundBlocking", base::ThreadPriority::NORMAL},
|
||||
{"Background", base::ThreadPriority::BACKGROUND},
|
||||
{"BackgroundBlocking", base::ThreadPriority::BACKGROUND},
|
||||
};
|
||||
|
||||
// Returns true if this platform supports having WorkerThreads running with a
|
||||
// background priority.
|
||||
bool BASE_EXPORT CanUseBackgroundPriorityForWorkerThread();
|
||||
|
||||
} // namespace internal
|
||||
} // namespace base
|
||||
|
||||
#endif // BASE_TASK_THREAD_POOL_ENVIRONMENT_CONFIG_H_
|
||||
|
|
@ -0,0 +1,92 @@
|
|||
# Historical Histogram Data
|
||||
|
||||
This page presents data captured from `base::ThreadPool` histograms at a given
|
||||
point in time so it can be used in future design decisions.
|
||||
|
||||
All data is 28-day aggregation on Stable channel.
|
||||
|
||||
## Number of tasks between waits
|
||||
|
||||
Number of tasks between two waits by a foreground worker thread in a
|
||||
browser/renderer process.
|
||||
|
||||
Histogram name: ThreadPool.NumTasksBetweenWaits.(Browser/Renderer).Foreground
|
||||
Date: August 2019
|
||||
Values in tables below are percentiles.
|
||||
|
||||
### Windows
|
||||
|
||||
| Number of tasks | Browser process | Renderer process |
|
||||
|-----------------|-----------------|------------------|
|
||||
| 1 | 87 | 92 |
|
||||
| 2 | 95 | 98 |
|
||||
| 5 | 99 | 100 |
|
||||
|
||||
### Mac
|
||||
|
||||
| Number of tasks | Browser process | Renderer process |
|
||||
|-----------------|-----------------|------------------|
|
||||
| 1 | 81 | 90 |
|
||||
| 2 | 92 | 97 |
|
||||
| 5 | 98 | 100 |
|
||||
|
||||
### Android
|
||||
|
||||
| Number of tasks | Browser process | Renderer process |
|
||||
|-----------------|-----------------|------------------|
|
||||
| 1 | 92 | 96 |
|
||||
| 2 | 97 | 98 |
|
||||
| 5 | 99 | 100 |
|
||||
|
||||
|
||||
## Number of tasks run while queueing
|
||||
|
||||
Number of tasks run by ThreadPool while task was queuing (from time task was
|
||||
posted until time it was run). Recorded for dummy heartbeat tasks in the
|
||||
*browser* process. The heartbeat recording avoids dependencies between this
|
||||
report and other work in the system.
|
||||
|
||||
Histogram name: ThreadPool.NumTasksRunWhileQueuing.Browser.*
|
||||
Date: September 2019
|
||||
Values in tables below are percentiles.
|
||||
|
||||
Note: In *renderer* processes, on all platforms/priorities, 0 tasks are run
|
||||
while queuing at 99.5th percentile.
|
||||
|
||||
### Windows
|
||||
|
||||
| Number of tasks | USER_BLOCKING | USER_VISIBLE | BEST_EFFORT |
|
||||
|-----------------|---------------|--------------|-------------|
|
||||
| 0 | 95 | 93 | 90 |
|
||||
| 1 | 98 | 95 | 92 |
|
||||
| 2 | 99 | 96 | 93 |
|
||||
| 5 | 100 | 98 | 95 |
|
||||
|
||||
### Mac
|
||||
|
||||
| Number of tasks | USER_BLOCKING | USER_VISIBLE | BEST_EFFORT |
|
||||
|-----------------|---------------|--------------|-------------|
|
||||
| 0 | 100 | 100 | 99 |
|
||||
| 1 | 100 | 100 | 99 |
|
||||
| 2 | 100 | 100 | 99 |
|
||||
| 5 | 100 | 100 | 100 |
|
||||
|
||||
### Android
|
||||
|
||||
| Number of tasks | USER_BLOCKING | USER_VISIBLE | BEST_EFFORT |
|
||||
|-----------------|---------------|--------------|-------------|
|
||||
| 0 | 99 | 98 | 97 |
|
||||
| 1 | 100 | 99 | 99 |
|
||||
| 2 | 100 | 99 | 99 |
|
||||
| 5 | 100 | 100 | 100 |
|
||||
|
||||
### Chrome OS
|
||||
|
||||
For all priorities, 0 tasks are run while queueing at 99.5th percentile.
|
||||
|
||||
### Analysis
|
||||
|
||||
The number of tasks that run while a BEST_EFFORT task is queued is unexpectedly
|
||||
low. We should explore creating threads less aggressively, at the expense of
|
||||
keeping BEST_EFFORT tasks in the queue for a longer time. See
|
||||
[Bug 906079](https://crbug.com/906079).
|
||||
|
|
@ -0,0 +1,23 @@
|
|||
// Copyright 2016 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "base/task/thread_pool/initialization_util.h"
|
||||
|
||||
#include <algorithm>
|
||||
|
||||
#include "base/numerics/ranges.h"
|
||||
#include "base/system/sys_info.h"
|
||||
|
||||
namespace base {
|
||||
|
||||
int RecommendedMaxNumberOfThreadsInThreadGroup(int min,
|
||||
int max,
|
||||
double cores_multiplier,
|
||||
int offset) {
|
||||
const int num_of_cores = SysInfo::NumberOfProcessors();
|
||||
const int threads = std::ceil<int>(num_of_cores * cores_multiplier) + offset;
|
||||
return ClampToRange(threads, min, max);
|
||||
}
|
||||
|
||||
} // namespace base
|
||||
|
|
@ -0,0 +1,22 @@
|
|||
// Copyright 2016 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_TASK_THREAD_POOL_INITIALIZATION_UTIL_H_
|
||||
#define BASE_TASK_THREAD_POOL_INITIALIZATION_UTIL_H_
|
||||
|
||||
#include "base/base_export.h"
|
||||
|
||||
namespace base {
|
||||
|
||||
// Computes a value that may be used as the maximum number of threads in a
|
||||
// ThreadGroup. Developers may use other methods to choose this maximum.
|
||||
BASE_EXPORT int RecommendedMaxNumberOfThreadsInThreadGroup(
|
||||
int min,
|
||||
int max,
|
||||
double cores_multiplier,
|
||||
int offset);
|
||||
|
||||
} // namespace base
|
||||
|
||||
#endif // BASE_TASK_THREAD_POOL_INITIALIZATION_UTIL_H_
|
||||
|
|
@ -0,0 +1,402 @@
|
|||
// Copyright 2019 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "base/task/thread_pool/job_task_source.h"
|
||||
|
||||
#include <utility>
|
||||
|
||||
#include "base/bind.h"
|
||||
#include "base/bind_helpers.h"
|
||||
#include "base/logging.h"
|
||||
#include "base/memory/ptr_util.h"
|
||||
#include "base/task/task_features.h"
|
||||
#include "base/task/thread_pool/pooled_task_runner_delegate.h"
|
||||
#include "base/threading/thread_restrictions.h"
|
||||
#include "base/time/time.h"
|
||||
#include "base/time/time_override.h"
|
||||
|
||||
namespace base {
|
||||
namespace internal {
|
||||
|
||||
// Memory ordering on |state_| operations
|
||||
//
|
||||
// The write operation on |state_| in WillRunTask() uses
|
||||
// std::memory_order_release, matched by std::memory_order_acquire on read
|
||||
// operations (in DidProcessTask()) to establish a
|
||||
// Release-Acquire ordering. When a call to WillRunTask() is caused by an
|
||||
// increase of max concurrency followed by an associated
|
||||
// NotifyConcurrencyIncrease(), the priority queue lock guarantees an
|
||||
// happens-after relation with NotifyConcurrencyIncrease(). This ensures that an
|
||||
// increase of max concurrency that happened-before NotifyConcurrencyIncrease()
|
||||
// is visible to a read operation that happens-after WillRunTask().
|
||||
//
|
||||
// In DidProcessTask(), this is necessary to
|
||||
// ensure that the task source is always re-enqueued when it needs to. When the
|
||||
// task source needs to be queued, either because the current task yielded or
|
||||
// because of NotifyConcurrencyIncrease(), one of the following is true:
|
||||
// A) DidProcessTask() happens-after WillRunTask():
|
||||
// T1: Current task returns (because it is done) or yields.
|
||||
// T2: Increases the value returned by GetMaxConcurrency()
|
||||
// NotifyConcurrencyIncrease() enqueues the task source
|
||||
// T3: WillRunTask(), in response to the concurrency increase - Release
|
||||
// Does not keep the TaskSource in PriorityQueue because it is at max
|
||||
// concurrency
|
||||
// T1: DidProcessTask() - Acquire - Because of memory barrier, sees the same
|
||||
// (or newer) max concurrency as T2
|
||||
// Re-enqueues the TaskSource because no longer at max concurrency
|
||||
// Without the memory barrier, T1 may see an outdated max concurrency that
|
||||
// is lower than the actual max concurrency and won't re-enqueue the
|
||||
// task source, because it thinks it's already saturated.
|
||||
// The task source often needs to be re-enqueued if its task
|
||||
// completed because it yielded and |max_concurrency| wasn't decreased.
|
||||
// B) DidProcessTask() happens-before WillRunTask():
|
||||
// T1: Current task returns (because it is done) or yields
|
||||
// T2: Increases the value returned by GetMaxConcurrency()
|
||||
// NotifyConcurrencyIncrease() enqueues the task source
|
||||
// T1: DidProcessTask() - Acquire (ineffective)
|
||||
// Since the task source is already in the queue, it doesn't matter
|
||||
// whether T1 re-enqueues the task source or not.
|
||||
// Note that stale values the other way around can cause incorrectly
|
||||
// re-enqueuing this task_source, which is not an issue because the queues
|
||||
// support empty task sources.
|
||||
|
||||
JobTaskSource::State::State() = default;
|
||||
JobTaskSource::State::~State() = default;
|
||||
|
||||
JobTaskSource::State::Value JobTaskSource::State::Cancel() {
|
||||
return {value_.fetch_or(kCanceledMask, std::memory_order_relaxed)};
|
||||
}
|
||||
|
||||
JobTaskSource::State::Value
|
||||
JobTaskSource::State::TryIncrementWorkerCountFromWorkerRelease(
|
||||
size_t max_concurrency) {
|
||||
uint32_t value_before_add = value_.load(std::memory_order_relaxed);
|
||||
|
||||
// std::memory_order_release on success to establish Release-Acquire ordering
|
||||
// with DecrementWorkerCountAcquire() (see Memory Ordering comment at top of
|
||||
// the file).
|
||||
while (!(value_before_add & kCanceledMask) &&
|
||||
(value_before_add >> kWorkerCountBitOffset) < max_concurrency &&
|
||||
!value_.compare_exchange_weak(
|
||||
value_before_add, value_before_add + kWorkerCountIncrement,
|
||||
std::memory_order_release, std::memory_order_relaxed)) {
|
||||
}
|
||||
return {value_before_add};
|
||||
}
|
||||
|
||||
JobTaskSource::State::Value
|
||||
JobTaskSource::State::DecrementWorkerCountFromWorkerAcquire() {
|
||||
const size_t value_before_sub =
|
||||
value_.fetch_sub(kWorkerCountIncrement, std::memory_order_acquire);
|
||||
DCHECK((value_before_sub >> kWorkerCountBitOffset) > 0);
|
||||
return {value_before_sub};
|
||||
}
|
||||
|
||||
JobTaskSource::State::Value
|
||||
JobTaskSource::State::IncrementWorkerCountFromJoiningThread() {
|
||||
size_t value_before_add =
|
||||
value_.fetch_add(kWorkerCountIncrement, std::memory_order_relaxed);
|
||||
return {value_before_add};
|
||||
}
|
||||
|
||||
JobTaskSource::State::Value
|
||||
JobTaskSource::State::DecrementWorkerCountFromJoiningThread() {
|
||||
const size_t value_before_sub =
|
||||
value_.fetch_sub(kWorkerCountIncrement, std::memory_order_relaxed);
|
||||
DCHECK((value_before_sub >> kWorkerCountBitOffset) > 0);
|
||||
return {value_before_sub};
|
||||
}
|
||||
|
||||
JobTaskSource::State::Value JobTaskSource::State::Load() const {
|
||||
return {value_.load(std::memory_order_relaxed)};
|
||||
}
|
||||
|
||||
JobTaskSource::JoinFlag::JoinFlag() = default;
|
||||
JobTaskSource::JoinFlag::~JoinFlag() = default;
|
||||
|
||||
void JobTaskSource::JoinFlag::SetWaiting() {
|
||||
const auto previous_value =
|
||||
value_.exchange(kWaitingForWorkerToYield, std::memory_order_relaxed);
|
||||
DCHECK(previous_value == kNotWaiting);
|
||||
}
|
||||
|
||||
bool JobTaskSource::JoinFlag::ShouldWorkerYield() {
|
||||
// The fetch_and() sets the state to kWaitingForWorkerToSignal if it was
|
||||
// previously kWaitingForWorkerToYield, otherwise it leaves it unchanged.
|
||||
return value_.fetch_and(kWaitingForWorkerToSignal,
|
||||
std::memory_order_relaxed) ==
|
||||
kWaitingForWorkerToYield;
|
||||
}
|
||||
|
||||
bool JobTaskSource::JoinFlag::ShouldWorkerSignal() {
|
||||
return value_.exchange(kNotWaiting, std::memory_order_relaxed) != kNotWaiting;
|
||||
}
|
||||
|
||||
JobTaskSource::JobTaskSource(
|
||||
const Location& from_here,
|
||||
const TaskTraits& traits,
|
||||
RepeatingCallback<void(JobDelegate*)> worker_task,
|
||||
RepeatingCallback<size_t()> max_concurrency_callback,
|
||||
PooledTaskRunnerDelegate* delegate)
|
||||
: TaskSource(traits, nullptr, TaskSourceExecutionMode::kJob),
|
||||
from_here_(from_here),
|
||||
max_concurrency_callback_(std::move(max_concurrency_callback)),
|
||||
worker_task_(std::move(worker_task)),
|
||||
primary_task_(base::BindRepeating(
|
||||
[](JobTaskSource* self) {
|
||||
// Each worker task has its own delegate with associated state.
|
||||
JobDelegate job_delegate{self, self->delegate_};
|
||||
self->worker_task_.Run(&job_delegate);
|
||||
},
|
||||
base::Unretained(this))),
|
||||
queue_time_(TimeTicks::Now()),
|
||||
delegate_(delegate) {
|
||||
DCHECK(delegate_);
|
||||
}
|
||||
|
||||
JobTaskSource::~JobTaskSource() {
|
||||
// Make sure there's no outstanding active run operation left.
|
||||
DCHECK_EQ(state_.Load().worker_count(), 0U);
|
||||
}
|
||||
|
||||
ExecutionEnvironment JobTaskSource::GetExecutionEnvironment() {
|
||||
return {SequenceToken::Create(), nullptr};
|
||||
}
|
||||
|
||||
bool JobTaskSource::WillJoin() {
|
||||
{
|
||||
CheckedAutoLock auto_lock(lock_);
|
||||
DCHECK(!worker_released_condition_); // This may only be called once.
|
||||
worker_released_condition_ = lock_.CreateConditionVariable();
|
||||
}
|
||||
// std::memory_order_relaxed on |worker_count_| is sufficient because call to
|
||||
// GetMaxConcurrency() is used for a best effort early exit. Stale values will
|
||||
// only cause WaitForParticipationOpportunity() to be called.
|
||||
const auto state_before_add = state_.IncrementWorkerCountFromJoiningThread();
|
||||
|
||||
if (!state_before_add.is_canceled() &&
|
||||
state_before_add.worker_count() < GetMaxConcurrency()) {
|
||||
return true;
|
||||
}
|
||||
return WaitForParticipationOpportunity();
|
||||
}
|
||||
|
||||
bool JobTaskSource::RunJoinTask() {
|
||||
JobDelegate job_delegate{this, nullptr};
|
||||
worker_task_.Run(&job_delegate);
|
||||
|
||||
// std::memory_order_relaxed on |worker_count_| is sufficient because the call
|
||||
// to GetMaxConcurrency() is used for a best effort early exit. Stale values
|
||||
// will only cause WaitForParticipationOpportunity() to be called.
|
||||
const auto state = state_.Load();
|
||||
if (!state.is_canceled() && state.worker_count() <= GetMaxConcurrency())
|
||||
return true;
|
||||
|
||||
return WaitForParticipationOpportunity();
|
||||
}
|
||||
|
||||
void JobTaskSource::Cancel(TaskSource::Transaction* transaction) {
|
||||
// Sets the kCanceledMask bit on |state_| so that further calls to
|
||||
// WillRunTask() never succeed. std::memory_order_relaxed is sufficient
|
||||
// because this task source never needs to be re-enqueued after Cancel().
|
||||
state_.Cancel();
|
||||
|
||||
#if DCHECK_IS_ON()
|
||||
{
|
||||
AutoLock auto_lock(version_lock_);
|
||||
++increase_version_;
|
||||
version_condition_.Broadcast();
|
||||
}
|
||||
#endif // DCHECK_IS_ON()
|
||||
}
|
||||
|
||||
bool JobTaskSource::WaitForParticipationOpportunity() {
|
||||
CheckedAutoLock auto_lock(lock_);
|
||||
|
||||
// std::memory_order_relaxed is sufficient because no other state is
|
||||
// synchronized with |state_| outside of |lock_|.
|
||||
auto state = state_.Load();
|
||||
size_t max_concurrency = GetMaxConcurrency();
|
||||
|
||||
// Wait until either:
|
||||
// A) |worker_count| is below or equal to max concurrency and state is not
|
||||
// canceled.
|
||||
// B) All other workers returned and |worker_count| is 1.
|
||||
while (!((state.worker_count() <= max_concurrency && !state.is_canceled()) ||
|
||||
state.worker_count() == 1)) {
|
||||
// std::memory_order_relaxed is sufficient because no other state is
|
||||
// synchronized with |join_flag_| outside of |lock_|.
|
||||
join_flag_.SetWaiting();
|
||||
|
||||
// To avoid unnecessarily waiting, if either condition A) or B) change
|
||||
// |lock_| is taken and |worker_released_condition_| signaled if necessary:
|
||||
// 1- In DidProcessTask(), after worker count is decremented.
|
||||
// 2- In NotifyConcurrencyIncrease(), following a max_concurrency increase.
|
||||
worker_released_condition_->Wait();
|
||||
state = state_.Load();
|
||||
max_concurrency = GetMaxConcurrency();
|
||||
}
|
||||
// Case A:
|
||||
if (state.worker_count() <= max_concurrency && !state.is_canceled())
|
||||
return true;
|
||||
// Case B:
|
||||
// Only the joining thread remains.
|
||||
DCHECK_EQ(state.worker_count(), 1U);
|
||||
DCHECK(state.is_canceled() || max_concurrency == 0U);
|
||||
state_.DecrementWorkerCountFromJoiningThread();
|
||||
return false;
|
||||
}
|
||||
|
||||
TaskSource::RunStatus JobTaskSource::WillRunTask() {
|
||||
const size_t max_concurrency = GetMaxConcurrency();
|
||||
// std::memory_order_release on success to establish Release-Acquire ordering
|
||||
// with read operations (see Memory Ordering comment at top of the file).
|
||||
const auto state_before_add =
|
||||
state_.TryIncrementWorkerCountFromWorkerRelease(max_concurrency);
|
||||
|
||||
// Don't allow this worker to run the task if either:
|
||||
// A) |state_| was canceled.
|
||||
// B) |worker_count| is already at |max_concurrency|.
|
||||
// C) |max_concurrency| was lowered below or to |worker_count|.
|
||||
// Case A:
|
||||
if (state_before_add.is_canceled())
|
||||
return RunStatus::kDisallowed;
|
||||
const size_t worker_count_before_add = state_before_add.worker_count();
|
||||
// Case B) or C):
|
||||
if (worker_count_before_add >= max_concurrency)
|
||||
return RunStatus::kDisallowed;
|
||||
|
||||
DCHECK_LT(worker_count_before_add, max_concurrency);
|
||||
return max_concurrency == worker_count_before_add + 1
|
||||
? RunStatus::kAllowedSaturated
|
||||
: RunStatus::kAllowedNotSaturated;
|
||||
}
|
||||
|
||||
size_t JobTaskSource::GetRemainingConcurrency() const {
|
||||
// std::memory_order_relaxed is sufficient because no other state is
|
||||
// synchronized with GetRemainingConcurrency().
|
||||
const auto state = state_.Load();
|
||||
const size_t max_concurrency = GetMaxConcurrency();
|
||||
// Avoid underflows.
|
||||
if (state.is_canceled() || state.worker_count() > max_concurrency)
|
||||
return 0;
|
||||
return max_concurrency - state.worker_count();
|
||||
}
|
||||
|
||||
void JobTaskSource::NotifyConcurrencyIncrease() {
|
||||
#if DCHECK_IS_ON()
|
||||
{
|
||||
AutoLock auto_lock(version_lock_);
|
||||
++increase_version_;
|
||||
version_condition_.Broadcast();
|
||||
}
|
||||
#endif // DCHECK_IS_ON()
|
||||
|
||||
// Avoid unnecessary locks when NotifyConcurrencyIncrease() is spuriously
|
||||
// called.
|
||||
if (GetRemainingConcurrency() == 0)
|
||||
return;
|
||||
|
||||
{
|
||||
// Lock is taken to access |join_flag_| below and signal
|
||||
// |worker_released_condition_|.
|
||||
CheckedAutoLock auto_lock(lock_);
|
||||
if (join_flag_.ShouldWorkerSignal())
|
||||
worker_released_condition_->Signal();
|
||||
}
|
||||
|
||||
// Make sure the task source is in the queue if not already.
|
||||
// Caveat: it's possible but unlikely that the task source has already reached
|
||||
// its intended concurrency and doesn't need to be enqueued if there
|
||||
// previously were too many worker. For simplicity, the task source is always
|
||||
// enqueued and will get discarded if already saturated when it is popped from
|
||||
// the priority queue.
|
||||
delegate_->EnqueueJobTaskSource(this);
|
||||
}
|
||||
|
||||
size_t JobTaskSource::GetMaxConcurrency() const {
|
||||
return max_concurrency_callback_.Run();
|
||||
}
|
||||
|
||||
bool JobTaskSource::ShouldYield() {
|
||||
// It is safe to read |join_flag_| without a lock since this
|
||||
// variable is atomic, keeping in mind that threads may not immediately see
|
||||
// the new value when it is updated.
|
||||
return TS_UNCHECKED_READ(join_flag_).ShouldWorkerYield() ||
|
||||
state_.Load().is_canceled();
|
||||
}
|
||||
|
||||
#if DCHECK_IS_ON()
|
||||
|
||||
size_t JobTaskSource::GetConcurrencyIncreaseVersion() const {
|
||||
AutoLock auto_lock(version_lock_);
|
||||
return increase_version_;
|
||||
}
|
||||
|
||||
bool JobTaskSource::WaitForConcurrencyIncreaseUpdate(size_t recorded_version) {
|
||||
AutoLock auto_lock(version_lock_);
|
||||
constexpr TimeDelta timeout = TimeDelta::FromSeconds(1);
|
||||
const base::TimeTicks start_time = subtle::TimeTicksNowIgnoringOverride();
|
||||
do {
|
||||
DCHECK_LE(recorded_version, increase_version_);
|
||||
const auto state = state_.Load();
|
||||
if (recorded_version != increase_version_ || state.is_canceled())
|
||||
return true;
|
||||
// Waiting is acceptable because it is in DCHECK-only code.
|
||||
ScopedAllowBaseSyncPrimitivesOutsideBlockingScope
|
||||
allow_base_sync_primitives;
|
||||
version_condition_.TimedWait(timeout);
|
||||
} while (subtle::TimeTicksNowIgnoringOverride() - start_time < timeout);
|
||||
return false;
|
||||
}
|
||||
|
||||
#endif // DCHECK_IS_ON()
|
||||
|
||||
Task JobTaskSource::TakeTask(TaskSource::Transaction* transaction) {
|
||||
// JobTaskSource members are not lock-protected so no need to acquire a lock
|
||||
// if |transaction| is nullptr.
|
||||
DCHECK_GT(state_.Load().worker_count(), 0U);
|
||||
DCHECK(primary_task_);
|
||||
return Task(from_here_, primary_task_, TimeDelta());
|
||||
}
|
||||
|
||||
bool JobTaskSource::DidProcessTask(TaskSource::Transaction* transaction) {
|
||||
// Lock is needed to access |join_flag_| below and signal
|
||||
// |worker_released_condition_|. If |transaction|, then |lock_| is already
|
||||
// taken.
|
||||
CheckedAutoLockMaybe auto_lock(transaction ? nullptr : &lock_);
|
||||
AnnotateAcquiredLockAlias annotate(lock_, lock_);
|
||||
|
||||
// std::memory_order_acquire to establish Release-Acquire ordering with
|
||||
// WillRunTask() (see Memory Ordering comment at top of the file).
|
||||
const auto state_before_sub = state_.DecrementWorkerCountFromWorkerAcquire();
|
||||
|
||||
if (join_flag_.ShouldWorkerSignal())
|
||||
worker_released_condition_->Signal();
|
||||
|
||||
// A canceled task source should never get re-enqueued.
|
||||
if (state_before_sub.is_canceled())
|
||||
return false;
|
||||
|
||||
DCHECK_GT(state_before_sub.worker_count(), 0U);
|
||||
|
||||
// Re-enqueue the TaskSource if the task ran and the worker count is below the
|
||||
// max concurrency.
|
||||
return state_before_sub.worker_count() <= GetMaxConcurrency();
|
||||
}
|
||||
|
||||
SequenceSortKey JobTaskSource::GetSortKey() const {
|
||||
return SequenceSortKey(traits_.priority(), queue_time_);
|
||||
}
|
||||
|
||||
Task JobTaskSource::Clear(TaskSource::Transaction* transaction) {
|
||||
Cancel();
|
||||
// Nothing is cleared since other workers might still racily run tasks. For
|
||||
// simplicity, the destructor will take care of it once all references are
|
||||
// released.
|
||||
return Task(from_here_, DoNothing(), TimeDelta());
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace base
|
||||
|
|
@ -0,0 +1,224 @@
|
|||
// Copyright 2019 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_TASK_THREAD_POOL_JOB_TASK_SOURCE_H_
|
||||
#define BASE_TASK_THREAD_POOL_JOB_TASK_SOURCE_H_
|
||||
|
||||
#include <stddef.h>
|
||||
|
||||
#include <atomic>
|
||||
#include <limits>
|
||||
|
||||
#include "base/base_export.h"
|
||||
#include "base/callback.h"
|
||||
#include "base/macros.h"
|
||||
#include "base/optional.h"
|
||||
#include "base/synchronization/condition_variable.h"
|
||||
#include "base/synchronization/lock.h"
|
||||
#include "base/task/post_job.h"
|
||||
#include "base/task/task_traits.h"
|
||||
#include "base/task/thread_pool/sequence_sort_key.h"
|
||||
#include "base/task/thread_pool/task.h"
|
||||
#include "base/task/thread_pool/task_source.h"
|
||||
|
||||
namespace base {
|
||||
namespace internal {
|
||||
|
||||
class PooledTaskRunnerDelegate;
|
||||
|
||||
// A JobTaskSource generates many Tasks from a single RepeatingClosure.
|
||||
//
|
||||
// Derived classes control the intended concurrency with GetMaxConcurrency().
|
||||
class BASE_EXPORT JobTaskSource : public TaskSource {
|
||||
public:
|
||||
JobTaskSource(const Location& from_here,
|
||||
const TaskTraits& traits,
|
||||
RepeatingCallback<void(JobDelegate*)> worker_task,
|
||||
RepeatingCallback<size_t()> max_concurrency_callback,
|
||||
PooledTaskRunnerDelegate* delegate);
|
||||
|
||||
static JobHandle CreateJobHandle(
|
||||
scoped_refptr<internal::JobTaskSource> task_source) {
|
||||
return JobHandle(std::move(task_source));
|
||||
}
|
||||
|
||||
// Notifies this task source that max concurrency was increased, and the
|
||||
// number of worker should be adjusted.
|
||||
void NotifyConcurrencyIncrease();
|
||||
|
||||
// Informs this JobTaskSource that the current thread would like to join and
|
||||
// contribute to running |worker_task|. Returns true if the joining thread can
|
||||
// contribute (RunJoinTask() can be called), or false if joining was completed
|
||||
// and all other workers returned because either there's no work remaining or
|
||||
// Job was cancelled.
|
||||
bool WillJoin();
|
||||
|
||||
// Contributes to running |worker_task| and returns true if the joining thread
|
||||
// can contribute again (RunJoinTask() can be called again), or false if
|
||||
// joining was completed and all other workers returned because either there's
|
||||
// no work remaining or Job was cancelled. This should be called only after
|
||||
// WillJoin() or RunJoinTask() previously returned true.
|
||||
bool RunJoinTask();
|
||||
|
||||
// Cancels this JobTaskSource, causing all workers to yield and WillRunTask()
|
||||
// to return RunStatus::kDisallowed.
|
||||
void Cancel(TaskSource::Transaction* transaction = nullptr);
|
||||
|
||||
// TaskSource:
|
||||
ExecutionEnvironment GetExecutionEnvironment() override;
|
||||
size_t GetRemainingConcurrency() const override;
|
||||
|
||||
// Returns the maximum number of tasks from this TaskSource that can run
|
||||
// concurrently.
|
||||
size_t GetMaxConcurrency() const;
|
||||
|
||||
// Returns true if a worker should return from the worker task on the current
|
||||
// thread ASAP.
|
||||
bool ShouldYield();
|
||||
|
||||
PooledTaskRunnerDelegate* delegate() const { return delegate_; }
|
||||
|
||||
#if DCHECK_IS_ON()
|
||||
size_t GetConcurrencyIncreaseVersion() const;
|
||||
// Returns true if the concurrency version was updated above
|
||||
// |recorded_version|, or false on timeout.
|
||||
bool WaitForConcurrencyIncreaseUpdate(size_t recorded_version);
|
||||
#endif // DCHECK_IS_ON()
|
||||
|
||||
private:
|
||||
// Atomic internal state to track the number of workers running a task from
|
||||
// this JobTaskSource and whether this JobTaskSource is canceled.
|
||||
class State {
|
||||
public:
|
||||
static constexpr size_t kCanceledMask = 1;
|
||||
static constexpr size_t kWorkerCountBitOffset = 1;
|
||||
static constexpr size_t kWorkerCountIncrement = 1 << kWorkerCountBitOffset;
|
||||
|
||||
struct Value {
|
||||
size_t worker_count() const { return value >> kWorkerCountBitOffset; }
|
||||
// Returns true if canceled.
|
||||
bool is_canceled() const { return value & kCanceledMask; }
|
||||
|
||||
uint32_t value;
|
||||
};
|
||||
|
||||
State();
|
||||
~State();
|
||||
|
||||
// Sets as canceled using std::memory_order_relaxed. Returns the state
|
||||
// before the operation.
|
||||
Value Cancel();
|
||||
|
||||
// Increments the worker count by 1 if smaller than |max_concurrency| and if
|
||||
// |!is_canceled()|, using std::memory_order_release, and returns the state
|
||||
// before the operation. Equivalent to Load() otherwise.
|
||||
Value TryIncrementWorkerCountFromWorkerRelease(size_t max_concurrency);
|
||||
|
||||
// Decrements the worker count by 1 using std::memory_order_acquire. Returns
|
||||
// the state before the operation.
|
||||
Value DecrementWorkerCountFromWorkerAcquire();
|
||||
|
||||
// Increments the worker count by 1 using std::memory_order_relaxed. Returns
|
||||
// the state before the operation.
|
||||
Value IncrementWorkerCountFromJoiningThread();
|
||||
|
||||
// Decrements the worker count by 1 using std::memory_order_relaxed. Returns
|
||||
// the state before the operation.
|
||||
Value DecrementWorkerCountFromJoiningThread();
|
||||
|
||||
// Loads and returns the state, using std::memory_order_relaxed.
|
||||
Value Load() const;
|
||||
|
||||
private:
|
||||
std::atomic<uint32_t> value_{0};
|
||||
};
|
||||
|
||||
// Atomic flag that indicates if the joining thread is currently waiting on
|
||||
// another worker to yield or to signal.
|
||||
class JoinFlag {
|
||||
public:
|
||||
static constexpr uint32_t kNotWaiting = 0;
|
||||
static constexpr uint32_t kWaitingForWorkerToSignal = 1;
|
||||
static constexpr uint32_t kWaitingForWorkerToYield = 3;
|
||||
// kWaitingForWorkerToYield is 3 because the impl relies on the following
|
||||
// property.
|
||||
static_assert((kWaitingForWorkerToYield & kWaitingForWorkerToSignal) ==
|
||||
kWaitingForWorkerToSignal,
|
||||
"");
|
||||
|
||||
JoinFlag();
|
||||
~JoinFlag();
|
||||
|
||||
// Sets the status as kWaitingForWorkerToYield using
|
||||
// std::memory_order_relaxed.
|
||||
void SetWaiting();
|
||||
|
||||
// If the flag is kWaitingForWorkerToYield, returns true indicating that the
|
||||
// worker should yield, and atomically updates to kWaitingForWorkerToSignal
|
||||
// (using std::memory_order_relaxed) to ensure that a single worker yields
|
||||
// in response to SetWaiting().
|
||||
bool ShouldWorkerYield();
|
||||
|
||||
// If the flag is kWaiting*, returns true indicating that the worker should
|
||||
// signal, and atomically updates to kNotWaiting (using
|
||||
// std::memory_order_relaxed) to ensure that a single worker signals in
|
||||
// response to SetWaiting().
|
||||
bool ShouldWorkerSignal();
|
||||
|
||||
private:
|
||||
std::atomic<uint32_t> value_{kNotWaiting};
|
||||
};
|
||||
|
||||
~JobTaskSource() override;
|
||||
|
||||
// Called from the joining thread. Waits for the worker count to be below or
|
||||
// equal to max concurrency (will happen when a worker calls
|
||||
// DidProcessTask()). Returns true if the joining thread should run a task, or
|
||||
// false if joining was completed and all other workers returned because
|
||||
// either there's no work remaining or Job was cancelled.
|
||||
bool WaitForParticipationOpportunity();
|
||||
|
||||
// TaskSource:
|
||||
RunStatus WillRunTask() override;
|
||||
Task TakeTask(TaskSource::Transaction* transaction) override;
|
||||
Task Clear(TaskSource::Transaction* transaction) override;
|
||||
bool DidProcessTask(TaskSource::Transaction* transaction) override;
|
||||
SequenceSortKey GetSortKey() const override;
|
||||
|
||||
// Current atomic state.
|
||||
State state_;
|
||||
// Normally, |join_flag_| is protected by |lock_|, except in ShouldYield()
|
||||
// hence the use of atomics.
|
||||
JoinFlag join_flag_ GUARDED_BY(lock_);
|
||||
// Signaled when |join_flag_| is kWaiting* and a worker returns.
|
||||
std::unique_ptr<ConditionVariable> worker_released_condition_
|
||||
GUARDED_BY(lock_);
|
||||
|
||||
const Location from_here_;
|
||||
RepeatingCallback<size_t()> max_concurrency_callback_;
|
||||
|
||||
// Worker task set by the job owner.
|
||||
RepeatingCallback<void(JobDelegate*)> worker_task_;
|
||||
// Task returned from TakeTask(), that calls |worker_task_| internally.
|
||||
RepeatingClosure primary_task_;
|
||||
|
||||
const TimeTicks queue_time_;
|
||||
PooledTaskRunnerDelegate* delegate_;
|
||||
|
||||
#if DCHECK_IS_ON()
|
||||
// Synchronizes accesses to |increase_version_|.
|
||||
mutable Lock version_lock_;
|
||||
// Signaled whenever increase_version_ is updated.
|
||||
ConditionVariable version_condition_{&version_lock_};
|
||||
// Incremented every time max concurrency is increased.
|
||||
size_t increase_version_ GUARDED_BY(version_lock_) = 0;
|
||||
#endif // DCHECK_IS_ON()
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(JobTaskSource);
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
} // namespace base
|
||||
|
||||
#endif // BASE_TASK_THREAD_POOL_JOB_TASK_SOURCE_H_
|
||||
|
|
@ -0,0 +1,48 @@
|
|||
// Copyright 2018 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "base/task/thread_pool/pooled_parallel_task_runner.h"
|
||||
#include "base/task/thread_pool/pooled_task_runner_delegate.h"
|
||||
|
||||
#include "base/task/thread_pool/sequence.h"
|
||||
|
||||
namespace base {
|
||||
namespace internal {
|
||||
|
||||
PooledParallelTaskRunner::PooledParallelTaskRunner(
|
||||
const TaskTraits& traits,
|
||||
PooledTaskRunnerDelegate* pooled_task_runner_delegate)
|
||||
: traits_(traits),
|
||||
pooled_task_runner_delegate_(pooled_task_runner_delegate) {}
|
||||
|
||||
PooledParallelTaskRunner::~PooledParallelTaskRunner() = default;
|
||||
|
||||
bool PooledParallelTaskRunner::PostDelayedTask(const Location& from_here,
|
||||
OnceClosure closure,
|
||||
TimeDelta delay) {
|
||||
if (!PooledTaskRunnerDelegate::Exists())
|
||||
return false;
|
||||
|
||||
// Post the task as part of a one-off single-task Sequence.
|
||||
scoped_refptr<Sequence> sequence = MakeRefCounted<Sequence>(
|
||||
traits_, this, TaskSourceExecutionMode::kParallel);
|
||||
|
||||
{
|
||||
CheckedAutoLock auto_lock(lock_);
|
||||
sequences_.insert(sequence.get());
|
||||
}
|
||||
|
||||
return pooled_task_runner_delegate_->PostTaskWithSequence(
|
||||
Task(from_here, std::move(closure), delay), std::move(sequence));
|
||||
}
|
||||
|
||||
void PooledParallelTaskRunner::UnregisterSequence(Sequence* sequence) {
|
||||
DCHECK(sequence);
|
||||
|
||||
CheckedAutoLock auto_lock(lock_);
|
||||
sequences_.erase(sequence);
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace base
|
||||
|
|
@ -0,0 +1,59 @@
|
|||
// Copyright 2018 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_TASK_THREAD_POOL_POOLED_PARALLEL_TASK_RUNNER_H_
|
||||
#define BASE_TASK_THREAD_POOL_POOLED_PARALLEL_TASK_RUNNER_H_
|
||||
|
||||
#include "base/base_export.h"
|
||||
#include "base/callback_forward.h"
|
||||
#include "base/containers/flat_set.h"
|
||||
#include "base/location.h"
|
||||
#include "base/task/common/checked_lock.h"
|
||||
#include "base/task/task_traits.h"
|
||||
#include "base/task_runner.h"
|
||||
#include "base/thread_annotations.h"
|
||||
#include "base/time/time.h"
|
||||
|
||||
namespace base {
|
||||
namespace internal {
|
||||
|
||||
class Sequence;
|
||||
class PooledTaskRunnerDelegate;
|
||||
|
||||
// A task runner that runs tasks in parallel.
|
||||
class BASE_EXPORT PooledParallelTaskRunner : public TaskRunner {
|
||||
public:
|
||||
// Constructs a PooledParallelTaskRunner which can be used to post tasks.
|
||||
PooledParallelTaskRunner(
|
||||
const TaskTraits& traits,
|
||||
PooledTaskRunnerDelegate* pooled_task_runner_delegate);
|
||||
|
||||
// TaskRunner:
|
||||
bool PostDelayedTask(const Location& from_here,
|
||||
OnceClosure closure,
|
||||
TimeDelta delay) override;
|
||||
|
||||
// Removes |sequence| from |sequences_|.
|
||||
void UnregisterSequence(Sequence* sequence);
|
||||
|
||||
private:
|
||||
~PooledParallelTaskRunner() override;
|
||||
|
||||
const TaskTraits traits_;
|
||||
PooledTaskRunnerDelegate* const pooled_task_runner_delegate_;
|
||||
|
||||
CheckedLock lock_;
|
||||
|
||||
// List of alive Sequences instantiated by this PooledParallelTaskRunner.
|
||||
// Sequences are added when they are instantiated, and removed when they are
|
||||
// destroyed.
|
||||
base::flat_set<Sequence*> sequences_ GUARDED_BY(lock_);
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(PooledParallelTaskRunner);
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
} // namespace base
|
||||
|
||||
#endif // BASE_TASK_THREAD_POOL_POOLED_PARALLEL_TASK_RUNNER_H_
|
||||
|
|
@ -0,0 +1,53 @@
|
|||
// Copyright 2018 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "base/task/thread_pool/pooled_sequenced_task_runner.h"
|
||||
|
||||
#include "base/sequence_token.h"
|
||||
|
||||
namespace base {
|
||||
namespace internal {
|
||||
|
||||
PooledSequencedTaskRunner::PooledSequencedTaskRunner(
|
||||
const TaskTraits& traits,
|
||||
PooledTaskRunnerDelegate* pooled_task_runner_delegate)
|
||||
: pooled_task_runner_delegate_(pooled_task_runner_delegate),
|
||||
sequence_(MakeRefCounted<Sequence>(traits,
|
||||
this,
|
||||
TaskSourceExecutionMode::kSequenced)) {
|
||||
}
|
||||
|
||||
PooledSequencedTaskRunner::~PooledSequencedTaskRunner() = default;
|
||||
|
||||
bool PooledSequencedTaskRunner::PostDelayedTask(const Location& from_here,
|
||||
OnceClosure closure,
|
||||
TimeDelta delay) {
|
||||
if (!PooledTaskRunnerDelegate::Exists())
|
||||
return false;
|
||||
|
||||
Task task(from_here, std::move(closure), delay);
|
||||
|
||||
// Post the task as part of |sequence_|.
|
||||
return pooled_task_runner_delegate_->PostTaskWithSequence(std::move(task),
|
||||
sequence_);
|
||||
}
|
||||
|
||||
bool PooledSequencedTaskRunner::PostNonNestableDelayedTask(
|
||||
const Location& from_here,
|
||||
OnceClosure closure,
|
||||
TimeDelta delay) {
|
||||
// Tasks are never nested within the thread pool.
|
||||
return PostDelayedTask(from_here, std::move(closure), delay);
|
||||
}
|
||||
|
||||
bool PooledSequencedTaskRunner::RunsTasksInCurrentSequence() const {
|
||||
return sequence_->token() == SequenceToken::GetForCurrentThread();
|
||||
}
|
||||
|
||||
void PooledSequencedTaskRunner::UpdatePriority(TaskPriority priority) {
|
||||
pooled_task_runner_delegate_->UpdatePriority(sequence_, priority);
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace base
|
||||
|
|
@ -0,0 +1,56 @@
|
|||
// Copyright 2018 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_TASK_THREAD_POOL_POOLED_SEQUENCED_TASK_RUNNER_H_
|
||||
#define BASE_TASK_THREAD_POOL_POOLED_SEQUENCED_TASK_RUNNER_H_
|
||||
|
||||
#include "base/base_export.h"
|
||||
#include "base/callback_forward.h"
|
||||
#include "base/location.h"
|
||||
#include "base/task/task_traits.h"
|
||||
#include "base/task/thread_pool/pooled_task_runner_delegate.h"
|
||||
#include "base/task/thread_pool/sequence.h"
|
||||
#include "base/time/time.h"
|
||||
#include "base/updateable_sequenced_task_runner.h"
|
||||
|
||||
namespace base {
|
||||
namespace internal {
|
||||
|
||||
// A task runner that runs tasks in sequence.
|
||||
class BASE_EXPORT PooledSequencedTaskRunner
|
||||
: public UpdateableSequencedTaskRunner {
|
||||
public:
|
||||
// Constructs a PooledSequencedTaskRunner which can be used to post tasks.
|
||||
PooledSequencedTaskRunner(
|
||||
const TaskTraits& traits,
|
||||
PooledTaskRunnerDelegate* pooled_task_runner_delegate);
|
||||
|
||||
// UpdateableSequencedTaskRunner:
|
||||
bool PostDelayedTask(const Location& from_here,
|
||||
OnceClosure closure,
|
||||
TimeDelta delay) override;
|
||||
|
||||
bool PostNonNestableDelayedTask(const Location& from_here,
|
||||
OnceClosure closure,
|
||||
TimeDelta delay) override;
|
||||
|
||||
bool RunsTasksInCurrentSequence() const override;
|
||||
|
||||
void UpdatePriority(TaskPriority priority) override;
|
||||
|
||||
private:
|
||||
~PooledSequencedTaskRunner() override;
|
||||
|
||||
PooledTaskRunnerDelegate* const pooled_task_runner_delegate_;
|
||||
|
||||
// Sequence for all Tasks posted through this TaskRunner.
|
||||
const scoped_refptr<Sequence> sequence_;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(PooledSequencedTaskRunner);
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
} // namespace base
|
||||
|
||||
#endif // BASE_TASK_THREAD_POOL_POOLED_SEQUENCED_TASK_RUNNER_H_
|
||||
|
|
@ -0,0 +1,763 @@
|
|||
// Copyright 2017 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "base/task/thread_pool/pooled_single_thread_task_runner_manager.h"
|
||||
|
||||
#include <algorithm>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
|
||||
#include "base/bind.h"
|
||||
#include "base/callback.h"
|
||||
#include "base/memory/ptr_util.h"
|
||||
#include "base/single_thread_task_runner.h"
|
||||
#include "base/stl_util.h"
|
||||
#include "base/strings/stringprintf.h"
|
||||
#include "base/synchronization/atomic_flag.h"
|
||||
#include "base/task/task_traits.h"
|
||||
#include "base/task/thread_pool/delayed_task_manager.h"
|
||||
#include "base/task/thread_pool/priority_queue.h"
|
||||
#include "base/task/thread_pool/sequence.h"
|
||||
#include "base/task/thread_pool/task.h"
|
||||
#include "base/task/thread_pool/task_source.h"
|
||||
#include "base/task/thread_pool/task_tracker.h"
|
||||
#include "base/task/thread_pool/worker_thread.h"
|
||||
#include "base/threading/platform_thread.h"
|
||||
#include "base/time/time.h"
|
||||
|
||||
#if defined(OS_WIN)
|
||||
#include <windows.h>
|
||||
|
||||
#include "base/win/scoped_com_initializer.h"
|
||||
#endif // defined(OS_WIN)
|
||||
|
||||
namespace base {
|
||||
namespace internal {
|
||||
|
||||
namespace {
|
||||
|
||||
// Boolean indicating whether there's a PooledSingleThreadTaskRunnerManager
|
||||
// instance alive in this process. This variable should only be set when the
|
||||
// PooledSingleThreadTaskRunnerManager instance is brought up (on the main
|
||||
// thread; before any tasks are posted) and decremented when the instance is
|
||||
// brought down (i.e., only when unit tests tear down the task environment and
|
||||
// never in production). This makes the variable const while worker threads are
|
||||
// up and as such it doesn't need to be atomic. It is used to tell when a task
|
||||
// is posted from the main thread after the task environment was brought down in
|
||||
// unit tests so that PooledSingleThreadTaskRunnerManager bound TaskRunners
|
||||
// can return false on PostTask, letting such callers know they should complete
|
||||
// necessary work synchronously. Note: |!g_manager_is_alive| is generally
|
||||
// equivalent to |!ThreadPoolInstance::Get()| but has the advantage of being
|
||||
// valid in thread_pool unit tests that don't instantiate a full
|
||||
// thread pool.
|
||||
bool g_manager_is_alive = false;
|
||||
|
||||
size_t GetEnvironmentIndexForTraits(const TaskTraits& traits) {
|
||||
const bool is_background =
|
||||
traits.priority() == TaskPriority::BEST_EFFORT &&
|
||||
traits.thread_policy() == ThreadPolicy::PREFER_BACKGROUND &&
|
||||
CanUseBackgroundPriorityForWorkerThread();
|
||||
if (traits.may_block() || traits.with_base_sync_primitives())
|
||||
return is_background ? BACKGROUND_BLOCKING : FOREGROUND_BLOCKING;
|
||||
return is_background ? BACKGROUND : FOREGROUND;
|
||||
}
|
||||
|
||||
// Allows for checking the PlatformThread::CurrentRef() against a set
|
||||
// PlatformThreadRef atomically without using locks.
|
||||
class AtomicThreadRefChecker {
|
||||
public:
|
||||
AtomicThreadRefChecker() = default;
|
||||
~AtomicThreadRefChecker() = default;
|
||||
|
||||
void Set() {
|
||||
thread_ref_ = PlatformThread::CurrentRef();
|
||||
is_set_.Set();
|
||||
}
|
||||
|
||||
bool IsCurrentThreadSameAsSetThread() {
|
||||
return is_set_.IsSet() && thread_ref_ == PlatformThread::CurrentRef();
|
||||
}
|
||||
|
||||
private:
|
||||
AtomicFlag is_set_;
|
||||
PlatformThreadRef thread_ref_;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(AtomicThreadRefChecker);
|
||||
};
|
||||
|
||||
class WorkerThreadDelegate : public WorkerThread::Delegate {
|
||||
public:
|
||||
WorkerThreadDelegate(const std::string& thread_name,
|
||||
WorkerThread::ThreadLabel thread_label,
|
||||
TrackedRef<TaskTracker> task_tracker)
|
||||
: task_tracker_(std::move(task_tracker)),
|
||||
thread_name_(thread_name),
|
||||
thread_label_(thread_label) {}
|
||||
|
||||
void set_worker(WorkerThread* worker) {
|
||||
DCHECK(!worker_);
|
||||
worker_ = worker;
|
||||
}
|
||||
|
||||
WorkerThread::ThreadLabel GetThreadLabel() const final {
|
||||
return thread_label_;
|
||||
}
|
||||
|
||||
void OnMainEntry(const WorkerThread* /* worker */) override {
|
||||
thread_ref_checker_.Set();
|
||||
PlatformThread::SetName(thread_name_);
|
||||
}
|
||||
|
||||
RegisteredTaskSource GetWork(WorkerThread* worker) override {
|
||||
CheckedAutoLock auto_lock(lock_);
|
||||
DCHECK(worker_awake_);
|
||||
auto task_source = GetWorkLockRequired(worker);
|
||||
if (!task_source) {
|
||||
// The worker will sleep after this returns nullptr.
|
||||
worker_awake_ = false;
|
||||
return nullptr;
|
||||
}
|
||||
auto run_status = task_source.WillRunTask();
|
||||
DCHECK_NE(run_status, TaskSource::RunStatus::kDisallowed);
|
||||
return task_source;
|
||||
}
|
||||
|
||||
void DidProcessTask(RegisteredTaskSource task_source) override {
|
||||
if (task_source) {
|
||||
EnqueueTaskSource(TransactionWithRegisteredTaskSource::FromTaskSource(
|
||||
std::move(task_source)));
|
||||
}
|
||||
}
|
||||
|
||||
TimeDelta GetSleepTimeout() override { return TimeDelta::Max(); }
|
||||
|
||||
bool PostTaskNow(scoped_refptr<Sequence> sequence, Task task) {
|
||||
auto transaction = sequence->BeginTransaction();
|
||||
|
||||
// |task| will be pushed to |sequence|, and |sequence| will be queued
|
||||
// to |priority_queue_| iff |sequence_should_be_queued| is true.
|
||||
const bool sequence_should_be_queued = transaction.WillPushTask();
|
||||
RegisteredTaskSource task_source;
|
||||
if (sequence_should_be_queued) {
|
||||
task_source = task_tracker_->RegisterTaskSource(sequence);
|
||||
// We shouldn't push |task| if we're not allowed to queue |task_source|.
|
||||
if (!task_source)
|
||||
return false;
|
||||
}
|
||||
if (!task_tracker_->WillPostTaskNow(task, transaction.traits().priority()))
|
||||
return false;
|
||||
transaction.PushTask(std::move(task));
|
||||
if (task_source) {
|
||||
bool should_wakeup =
|
||||
EnqueueTaskSource({std::move(task_source), std::move(transaction)});
|
||||
if (should_wakeup)
|
||||
worker_->WakeUp();
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool RunsTasksInCurrentSequence() {
|
||||
// We check the thread ref instead of the sequence for the benefit of COM
|
||||
// callbacks which may execute without a sequence context.
|
||||
return thread_ref_checker_.IsCurrentThreadSameAsSetThread();
|
||||
}
|
||||
|
||||
void OnMainExit(WorkerThread* /* worker */) override {}
|
||||
|
||||
void DidUpdateCanRunPolicy() {
|
||||
bool should_wakeup = false;
|
||||
{
|
||||
CheckedAutoLock auto_lock(lock_);
|
||||
if (!worker_awake_ && CanRunNextTaskSource()) {
|
||||
should_wakeup = true;
|
||||
worker_awake_ = true;
|
||||
}
|
||||
}
|
||||
if (should_wakeup)
|
||||
worker_->WakeUp();
|
||||
}
|
||||
|
||||
void EnableFlushPriorityQueueTaskSourcesOnDestroyForTesting() {
|
||||
CheckedAutoLock auto_lock(lock_);
|
||||
priority_queue_.EnableFlushTaskSourcesOnDestroyForTesting();
|
||||
}
|
||||
|
||||
protected:
|
||||
RegisteredTaskSource GetWorkLockRequired(WorkerThread* worker)
|
||||
EXCLUSIVE_LOCKS_REQUIRED(lock_) {
|
||||
if (!CanRunNextTaskSource()) {
|
||||
return nullptr;
|
||||
}
|
||||
return priority_queue_.PopTaskSource();
|
||||
}
|
||||
|
||||
const TrackedRef<TaskTracker>& task_tracker() { return task_tracker_; }
|
||||
|
||||
CheckedLock lock_;
|
||||
bool worker_awake_ GUARDED_BY(lock_) = false;
|
||||
|
||||
const TrackedRef<TaskTracker> task_tracker_;
|
||||
|
||||
private:
|
||||
// Enqueues a task source in this single-threaded worker's priority queue.
|
||||
// Returns true iff the worker must wakeup, i.e. task source is allowed to run
|
||||
// and the worker was not awake.
|
||||
bool EnqueueTaskSource(
|
||||
TransactionWithRegisteredTaskSource transaction_with_task_source) {
|
||||
CheckedAutoLock auto_lock(lock_);
|
||||
priority_queue_.Push(std::move(transaction_with_task_source));
|
||||
if (!worker_awake_ && CanRunNextTaskSource()) {
|
||||
worker_awake_ = true;
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
bool CanRunNextTaskSource() EXCLUSIVE_LOCKS_REQUIRED(lock_) {
|
||||
return !priority_queue_.IsEmpty() &&
|
||||
task_tracker_->CanRunPriority(
|
||||
priority_queue_.PeekSortKey().priority());
|
||||
}
|
||||
|
||||
const std::string thread_name_;
|
||||
const WorkerThread::ThreadLabel thread_label_;
|
||||
|
||||
// The WorkerThread that has |this| as a delegate. Must be set before
|
||||
// starting or posting a task to the WorkerThread, because it's used in
|
||||
// OnMainEntry() and PostTaskNow().
|
||||
WorkerThread* worker_ = nullptr;
|
||||
|
||||
PriorityQueue priority_queue_ GUARDED_BY(lock_);
|
||||
|
||||
AtomicThreadRefChecker thread_ref_checker_;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(WorkerThreadDelegate);
|
||||
};
|
||||
|
||||
#if defined(OS_WIN)
|
||||
|
||||
class WorkerThreadCOMDelegate : public WorkerThreadDelegate {
|
||||
public:
|
||||
WorkerThreadCOMDelegate(const std::string& thread_name,
|
||||
WorkerThread::ThreadLabel thread_label,
|
||||
TrackedRef<TaskTracker> task_tracker)
|
||||
: WorkerThreadDelegate(thread_name,
|
||||
thread_label,
|
||||
std::move(task_tracker)) {}
|
||||
|
||||
~WorkerThreadCOMDelegate() override { DCHECK(!scoped_com_initializer_); }
|
||||
|
||||
// WorkerThread::Delegate:
|
||||
void OnMainEntry(const WorkerThread* worker) override {
|
||||
WorkerThreadDelegate::OnMainEntry(worker);
|
||||
|
||||
scoped_com_initializer_ = std::make_unique<win::ScopedCOMInitializer>();
|
||||
}
|
||||
|
||||
RegisteredTaskSource GetWork(WorkerThread* worker) override {
|
||||
// This scheme below allows us to cover the following scenarios:
|
||||
// * Only WorkerThreadDelegate::GetWork() has work:
|
||||
// Always return the task source from GetWork().
|
||||
// * Only the Windows Message Queue has work:
|
||||
// Always return the task source from GetWorkFromWindowsMessageQueue();
|
||||
// * Both WorkerThreadDelegate::GetWork() and the Windows Message Queue
|
||||
// have work:
|
||||
// Process task sources from each source round-robin style.
|
||||
CheckedAutoLock auto_lock(lock_);
|
||||
|
||||
// |worker_awake_| is always set before a call to WakeUp(), but it is
|
||||
// not set when messages are added to the Windows Message Queue. Ensure that
|
||||
// it is set before getting work, to avoid unnecessary wake ups.
|
||||
//
|
||||
// Note: It wouldn't be sufficient to set |worker_awake_| in WaitForWork()
|
||||
// when MsgWaitForMultipleObjectsEx() indicates that it was woken up by a
|
||||
// Windows Message, because of the following scenario:
|
||||
// T1: PostTask
|
||||
// Queue task
|
||||
// Set |worker_awake_| to true
|
||||
// T2: Woken up by a Windows Message
|
||||
// Set |worker_awake_| to true
|
||||
// Run the task posted by T1
|
||||
// Wait for work
|
||||
// T1: WakeUp()
|
||||
// T2: Woken up by Waitable Event
|
||||
// Does not set |worker_awake_| (wake up not from Windows Message)
|
||||
// GetWork
|
||||
// !! Getting work while |worker_awake_| is false !!
|
||||
worker_awake_ = true;
|
||||
RegisteredTaskSource task_source;
|
||||
if (get_work_first_) {
|
||||
task_source = WorkerThreadDelegate::GetWorkLockRequired(worker);
|
||||
if (task_source)
|
||||
get_work_first_ = false;
|
||||
}
|
||||
|
||||
if (!task_source) {
|
||||
CheckedAutoUnlock auto_unlock(lock_);
|
||||
task_source = GetWorkFromWindowsMessageQueue();
|
||||
if (task_source)
|
||||
get_work_first_ = true;
|
||||
}
|
||||
|
||||
if (!task_source && !get_work_first_) {
|
||||
// This case is important if we checked the Windows Message Queue first
|
||||
// and found there was no work. We don't want to return null immediately
|
||||
// as that could cause the thread to go to sleep while work is waiting via
|
||||
// WorkerThreadDelegate::GetWork().
|
||||
task_source = WorkerThreadDelegate::GetWorkLockRequired(worker);
|
||||
}
|
||||
if (!task_source) {
|
||||
// The worker will sleep after this returns nullptr.
|
||||
worker_awake_ = false;
|
||||
return nullptr;
|
||||
}
|
||||
auto run_status = task_source.WillRunTask();
|
||||
DCHECK_NE(run_status, TaskSource::RunStatus::kDisallowed);
|
||||
return task_source;
|
||||
}
|
||||
|
||||
void OnMainExit(WorkerThread* /* worker */) override {
|
||||
scoped_com_initializer_.reset();
|
||||
}
|
||||
|
||||
void WaitForWork(WaitableEvent* wake_up_event) override {
|
||||
DCHECK(wake_up_event);
|
||||
const TimeDelta sleep_time = GetSleepTimeout();
|
||||
const DWORD milliseconds_wait = checked_cast<DWORD>(
|
||||
sleep_time.is_max() ? INFINITE : sleep_time.InMilliseconds());
|
||||
const HANDLE wake_up_event_handle = wake_up_event->handle();
|
||||
MsgWaitForMultipleObjectsEx(1, &wake_up_event_handle, milliseconds_wait,
|
||||
QS_ALLINPUT, 0);
|
||||
}
|
||||
|
||||
private:
|
||||
RegisteredTaskSource GetWorkFromWindowsMessageQueue() {
|
||||
MSG msg;
|
||||
if (PeekMessage(&msg, nullptr, 0, 0, PM_REMOVE) != FALSE) {
|
||||
Task pump_message_task(FROM_HERE,
|
||||
BindOnce(
|
||||
[](MSG msg) {
|
||||
TranslateMessage(&msg);
|
||||
DispatchMessage(&msg);
|
||||
},
|
||||
std::move(msg)),
|
||||
TimeDelta());
|
||||
if (task_tracker()->WillPostTask(
|
||||
&pump_message_task, TaskShutdownBehavior::SKIP_ON_SHUTDOWN)) {
|
||||
auto transaction = message_pump_sequence_->BeginTransaction();
|
||||
const bool sequence_should_be_queued = transaction.WillPushTask();
|
||||
DCHECK(sequence_should_be_queued)
|
||||
<< "GetWorkFromWindowsMessageQueue() does not expect "
|
||||
"queueing of pump tasks.";
|
||||
auto registered_task_source = task_tracker_->RegisterTaskSource(
|
||||
std::move(message_pump_sequence_));
|
||||
if (!registered_task_source)
|
||||
return nullptr;
|
||||
transaction.PushTask(std::move(pump_message_task));
|
||||
return registered_task_source;
|
||||
}
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
bool get_work_first_ = true;
|
||||
const scoped_refptr<Sequence> message_pump_sequence_ =
|
||||
MakeRefCounted<Sequence>(TaskTraits{MayBlock()},
|
||||
nullptr,
|
||||
TaskSourceExecutionMode::kParallel);
|
||||
std::unique_ptr<win::ScopedCOMInitializer> scoped_com_initializer_;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(WorkerThreadCOMDelegate);
|
||||
};
|
||||
|
||||
#endif // defined(OS_WIN)
|
||||
|
||||
} // namespace
|
||||
|
||||
class PooledSingleThreadTaskRunnerManager::PooledSingleThreadTaskRunner
|
||||
: public SingleThreadTaskRunner {
|
||||
public:
|
||||
// Constructs a PooledSingleThreadTaskRunner that indirectly controls the
|
||||
// lifetime of a dedicated |worker| for |traits|.
|
||||
PooledSingleThreadTaskRunner(PooledSingleThreadTaskRunnerManager* const outer,
|
||||
const TaskTraits& traits,
|
||||
WorkerThread* worker,
|
||||
SingleThreadTaskRunnerThreadMode thread_mode)
|
||||
: outer_(outer),
|
||||
worker_(worker),
|
||||
thread_mode_(thread_mode),
|
||||
sequence_(
|
||||
MakeRefCounted<Sequence>(traits,
|
||||
this,
|
||||
TaskSourceExecutionMode::kSingleThread)) {
|
||||
DCHECK(outer_);
|
||||
DCHECK(worker_);
|
||||
}
|
||||
|
||||
// SingleThreadTaskRunner:
|
||||
bool PostDelayedTask(const Location& from_here,
|
||||
OnceClosure closure,
|
||||
TimeDelta delay) override {
|
||||
if (!g_manager_is_alive)
|
||||
return false;
|
||||
|
||||
Task task(from_here, std::move(closure), delay);
|
||||
|
||||
if (!outer_->task_tracker_->WillPostTask(&task,
|
||||
sequence_->shutdown_behavior())) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if (task.delayed_run_time.is_null())
|
||||
return GetDelegate()->PostTaskNow(sequence_, std::move(task));
|
||||
|
||||
// Unretained(GetDelegate()) is safe because this TaskRunner and its
|
||||
// worker are kept alive as long as there are pending Tasks.
|
||||
outer_->delayed_task_manager_->AddDelayedTask(
|
||||
std::move(task),
|
||||
BindOnce(IgnoreResult(&WorkerThreadDelegate::PostTaskNow),
|
||||
Unretained(GetDelegate()), sequence_),
|
||||
this);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool PostNonNestableDelayedTask(const Location& from_here,
|
||||
OnceClosure closure,
|
||||
TimeDelta delay) override {
|
||||
// Tasks are never nested within the thread pool.
|
||||
return PostDelayedTask(from_here, std::move(closure), delay);
|
||||
}
|
||||
|
||||
bool RunsTasksInCurrentSequence() const override {
|
||||
if (!g_manager_is_alive)
|
||||
return false;
|
||||
return GetDelegate()->RunsTasksInCurrentSequence();
|
||||
}
|
||||
|
||||
private:
|
||||
~PooledSingleThreadTaskRunner() override {
|
||||
// Only unregister if this is a DEDICATED SingleThreadTaskRunner. SHARED
|
||||
// task runner WorkerThreads are managed separately as they are reused.
|
||||
// |g_manager_is_alive| avoids a use-after-free should this
|
||||
// PooledSingleThreadTaskRunner outlive its manager. It is safe to access
|
||||
// |g_manager_is_alive| without synchronization primitives as it is const
|
||||
// for the lifetime of the manager and ~PooledSingleThreadTaskRunner()
|
||||
// either happens prior to the end of JoinForTesting() (which happens-before
|
||||
// manager's destruction) or on main thread after the task environment's
|
||||
// entire destruction (which happens-after the manager's destruction). Yes,
|
||||
// there's a theoretical use case where the last ref to this
|
||||
// PooledSingleThreadTaskRunner is handed to a thread not controlled by
|
||||
// thread_pool and that this ends up causing
|
||||
// ~PooledSingleThreadTaskRunner() to race with
|
||||
// ~PooledSingleThreadTaskRunnerManager() but this is intentionally not
|
||||
// supported (and it doesn't matter in production where we leak the task
|
||||
// environment for such reasons). TSan should catch this weird paradigm
|
||||
// should anyone elect to use it in a unit test and the error would point
|
||||
// here.
|
||||
if (g_manager_is_alive &&
|
||||
thread_mode_ == SingleThreadTaskRunnerThreadMode::DEDICATED) {
|
||||
outer_->UnregisterWorkerThread(worker_);
|
||||
}
|
||||
}
|
||||
|
||||
WorkerThreadDelegate* GetDelegate() const {
|
||||
return static_cast<WorkerThreadDelegate*>(worker_->delegate());
|
||||
}
|
||||
|
||||
PooledSingleThreadTaskRunnerManager* const outer_;
|
||||
WorkerThread* const worker_;
|
||||
const SingleThreadTaskRunnerThreadMode thread_mode_;
|
||||
const scoped_refptr<Sequence> sequence_;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(PooledSingleThreadTaskRunner);
|
||||
};
|
||||
|
||||
PooledSingleThreadTaskRunnerManager::PooledSingleThreadTaskRunnerManager(
|
||||
TrackedRef<TaskTracker> task_tracker,
|
||||
DelayedTaskManager* delayed_task_manager)
|
||||
: task_tracker_(std::move(task_tracker)),
|
||||
delayed_task_manager_(delayed_task_manager) {
|
||||
DCHECK(task_tracker_);
|
||||
DCHECK(delayed_task_manager_);
|
||||
#if defined(OS_WIN)
|
||||
static_assert(std::extent<decltype(shared_com_worker_threads_)>() ==
|
||||
std::extent<decltype(shared_worker_threads_)>(),
|
||||
"The size of |shared_com_worker_threads_| must match "
|
||||
"|shared_worker_threads_|");
|
||||
static_assert(
|
||||
std::extent<
|
||||
std::remove_reference<decltype(shared_com_worker_threads_[0])>>() ==
|
||||
std::extent<
|
||||
std::remove_reference<decltype(shared_worker_threads_[0])>>(),
|
||||
"The size of |shared_com_worker_threads_| must match "
|
||||
"|shared_worker_threads_|");
|
||||
#endif // defined(OS_WIN)
|
||||
DCHECK(!g_manager_is_alive);
|
||||
g_manager_is_alive = true;
|
||||
}
|
||||
|
||||
PooledSingleThreadTaskRunnerManager::~PooledSingleThreadTaskRunnerManager() {
|
||||
DCHECK(g_manager_is_alive);
|
||||
g_manager_is_alive = false;
|
||||
}
|
||||
|
||||
void PooledSingleThreadTaskRunnerManager::Start(
|
||||
WorkerThreadObserver* worker_thread_observer) {
|
||||
DCHECK(!worker_thread_observer_);
|
||||
worker_thread_observer_ = worker_thread_observer;
|
||||
|
||||
decltype(workers_) workers_to_start;
|
||||
{
|
||||
CheckedAutoLock auto_lock(lock_);
|
||||
started_ = true;
|
||||
workers_to_start = workers_;
|
||||
}
|
||||
|
||||
// Start workers that were created before this method was called.
|
||||
// Workers that already need to wake up are already signaled as part of
|
||||
// PooledSingleThreadTaskRunner::PostTaskNow(). As a result, it's
|
||||
// unnecessary to call WakeUp() for each worker (in fact, an extraneous
|
||||
// WakeUp() would be racy and wrong - see https://crbug.com/862582).
|
||||
for (scoped_refptr<WorkerThread> worker : workers_to_start) {
|
||||
worker->Start(worker_thread_observer_);
|
||||
}
|
||||
}
|
||||
|
||||
void PooledSingleThreadTaskRunnerManager::DidUpdateCanRunPolicy() {
|
||||
decltype(workers_) workers_to_update;
|
||||
|
||||
{
|
||||
CheckedAutoLock auto_lock(lock_);
|
||||
if (!started_)
|
||||
return;
|
||||
workers_to_update = workers_;
|
||||
}
|
||||
// Any worker created after the lock is released will see the latest
|
||||
// CanRunPolicy if tasks are posted to it and thus doesn't need a
|
||||
// DidUpdateCanRunPolicy() notification.
|
||||
for (auto& worker : workers_to_update) {
|
||||
static_cast<WorkerThreadDelegate*>(worker->delegate())
|
||||
->DidUpdateCanRunPolicy();
|
||||
}
|
||||
}
|
||||
|
||||
scoped_refptr<SingleThreadTaskRunner>
|
||||
PooledSingleThreadTaskRunnerManager::CreateSingleThreadTaskRunner(
|
||||
const TaskTraits& traits,
|
||||
SingleThreadTaskRunnerThreadMode thread_mode) {
|
||||
return CreateTaskRunnerImpl<WorkerThreadDelegate>(traits, thread_mode);
|
||||
}
|
||||
|
||||
#if defined(OS_WIN)
|
||||
scoped_refptr<SingleThreadTaskRunner>
|
||||
PooledSingleThreadTaskRunnerManager::CreateCOMSTATaskRunner(
|
||||
const TaskTraits& traits,
|
||||
SingleThreadTaskRunnerThreadMode thread_mode) {
|
||||
return CreateTaskRunnerImpl<WorkerThreadCOMDelegate>(traits, thread_mode);
|
||||
}
|
||||
#endif // defined(OS_WIN)
|
||||
|
||||
// static
|
||||
PooledSingleThreadTaskRunnerManager::ContinueOnShutdown
|
||||
PooledSingleThreadTaskRunnerManager::TraitsToContinueOnShutdown(
|
||||
const TaskTraits& traits) {
|
||||
if (traits.shutdown_behavior() == TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN)
|
||||
return IS_CONTINUE_ON_SHUTDOWN;
|
||||
return IS_NOT_CONTINUE_ON_SHUTDOWN;
|
||||
}
|
||||
|
||||
template <typename DelegateType>
|
||||
scoped_refptr<PooledSingleThreadTaskRunnerManager::PooledSingleThreadTaskRunner>
|
||||
PooledSingleThreadTaskRunnerManager::CreateTaskRunnerImpl(
|
||||
const TaskTraits& traits,
|
||||
SingleThreadTaskRunnerThreadMode thread_mode) {
|
||||
DCHECK(thread_mode != SingleThreadTaskRunnerThreadMode::SHARED ||
|
||||
!traits.with_base_sync_primitives())
|
||||
<< "Using WithBaseSyncPrimitives() on a shared SingleThreadTaskRunner "
|
||||
"may cause deadlocks. Either reevaluate your usage (e.g. use "
|
||||
"SequencedTaskRunner) or use "
|
||||
"SingleThreadTaskRunnerThreadMode::DEDICATED.";
|
||||
// To simplify the code, |dedicated_worker| is a local only variable that
|
||||
// allows the code to treat both the DEDICATED and SHARED cases similarly for
|
||||
// SingleThreadTaskRunnerThreadMode. In DEDICATED, the scoped_refptr is backed
|
||||
// by a local variable and in SHARED, the scoped_refptr is backed by a member
|
||||
// variable.
|
||||
WorkerThread* dedicated_worker = nullptr;
|
||||
WorkerThread*& worker =
|
||||
thread_mode == SingleThreadTaskRunnerThreadMode::DEDICATED
|
||||
? dedicated_worker
|
||||
: GetSharedWorkerThreadForTraits<DelegateType>(traits);
|
||||
bool new_worker = false;
|
||||
bool started;
|
||||
{
|
||||
CheckedAutoLock auto_lock(lock_);
|
||||
if (!worker) {
|
||||
const auto& environment_params =
|
||||
kEnvironmentParams[GetEnvironmentIndexForTraits(traits)];
|
||||
std::string worker_name;
|
||||
if (thread_mode == SingleThreadTaskRunnerThreadMode::SHARED)
|
||||
worker_name += "Shared";
|
||||
worker_name += environment_params.name_suffix;
|
||||
worker = CreateAndRegisterWorkerThread<DelegateType>(
|
||||
worker_name, thread_mode,
|
||||
CanUseBackgroundPriorityForWorkerThread()
|
||||
? environment_params.priority_hint
|
||||
: ThreadPriority::NORMAL);
|
||||
new_worker = true;
|
||||
}
|
||||
started = started_;
|
||||
}
|
||||
|
||||
if (new_worker && started)
|
||||
worker->Start(worker_thread_observer_);
|
||||
|
||||
return MakeRefCounted<PooledSingleThreadTaskRunner>(this, traits, worker,
|
||||
thread_mode);
|
||||
}
|
||||
|
||||
void PooledSingleThreadTaskRunnerManager::JoinForTesting() {
|
||||
decltype(workers_) local_workers;
|
||||
{
|
||||
CheckedAutoLock auto_lock(lock_);
|
||||
local_workers = std::move(workers_);
|
||||
}
|
||||
|
||||
for (const auto& worker : local_workers) {
|
||||
static_cast<WorkerThreadDelegate*>(worker->delegate())
|
||||
->EnableFlushPriorityQueueTaskSourcesOnDestroyForTesting();
|
||||
worker->JoinForTesting();
|
||||
}
|
||||
|
||||
{
|
||||
CheckedAutoLock auto_lock(lock_);
|
||||
DCHECK(workers_.empty())
|
||||
<< "New worker(s) unexpectedly registered during join.";
|
||||
workers_ = std::move(local_workers);
|
||||
}
|
||||
|
||||
// Release shared WorkerThreads at the end so they get joined above. If
|
||||
// this call happens before the joins, the WorkerThreads are effectively
|
||||
// detached and may outlive the PooledSingleThreadTaskRunnerManager.
|
||||
ReleaseSharedWorkerThreads();
|
||||
}
|
||||
|
||||
template <>
|
||||
std::unique_ptr<WorkerThreadDelegate>
|
||||
PooledSingleThreadTaskRunnerManager::CreateWorkerThreadDelegate<
|
||||
WorkerThreadDelegate>(const std::string& name,
|
||||
int id,
|
||||
SingleThreadTaskRunnerThreadMode thread_mode) {
|
||||
return std::make_unique<WorkerThreadDelegate>(
|
||||
StringPrintf("ThreadPoolSingleThread%s%d", name.c_str(), id),
|
||||
thread_mode == SingleThreadTaskRunnerThreadMode::DEDICATED
|
||||
? WorkerThread::ThreadLabel::DEDICATED
|
||||
: WorkerThread::ThreadLabel::SHARED,
|
||||
task_tracker_);
|
||||
}
|
||||
|
||||
#if defined(OS_WIN)
|
||||
template <>
|
||||
std::unique_ptr<WorkerThreadDelegate>
|
||||
PooledSingleThreadTaskRunnerManager::CreateWorkerThreadDelegate<
|
||||
WorkerThreadCOMDelegate>(const std::string& name,
|
||||
int id,
|
||||
SingleThreadTaskRunnerThreadMode thread_mode) {
|
||||
return std::make_unique<WorkerThreadCOMDelegate>(
|
||||
StringPrintf("ThreadPoolSingleThreadCOMSTA%s%d", name.c_str(), id),
|
||||
thread_mode == SingleThreadTaskRunnerThreadMode::DEDICATED
|
||||
? WorkerThread::ThreadLabel::DEDICATED_COM
|
||||
: WorkerThread::ThreadLabel::SHARED_COM,
|
||||
task_tracker_);
|
||||
}
|
||||
#endif // defined(OS_WIN)
|
||||
|
||||
template <typename DelegateType>
|
||||
WorkerThread*
|
||||
PooledSingleThreadTaskRunnerManager::CreateAndRegisterWorkerThread(
|
||||
const std::string& name,
|
||||
SingleThreadTaskRunnerThreadMode thread_mode,
|
||||
ThreadPriority priority_hint) {
|
||||
int id = next_worker_id_++;
|
||||
std::unique_ptr<WorkerThreadDelegate> delegate =
|
||||
CreateWorkerThreadDelegate<DelegateType>(name, id, thread_mode);
|
||||
WorkerThreadDelegate* delegate_raw = delegate.get();
|
||||
scoped_refptr<WorkerThread> worker = MakeRefCounted<WorkerThread>(
|
||||
priority_hint, std::move(delegate), task_tracker_);
|
||||
delegate_raw->set_worker(worker.get());
|
||||
workers_.emplace_back(std::move(worker));
|
||||
return workers_.back().get();
|
||||
}
|
||||
|
||||
template <>
|
||||
WorkerThread*&
|
||||
PooledSingleThreadTaskRunnerManager::GetSharedWorkerThreadForTraits<
|
||||
WorkerThreadDelegate>(const TaskTraits& traits) {
|
||||
return shared_worker_threads_[GetEnvironmentIndexForTraits(traits)]
|
||||
[TraitsToContinueOnShutdown(traits)];
|
||||
}
|
||||
|
||||
#if defined(OS_WIN)
|
||||
template <>
|
||||
WorkerThread*&
|
||||
PooledSingleThreadTaskRunnerManager::GetSharedWorkerThreadForTraits<
|
||||
WorkerThreadCOMDelegate>(const TaskTraits& traits) {
|
||||
return shared_com_worker_threads_[GetEnvironmentIndexForTraits(traits)]
|
||||
[TraitsToContinueOnShutdown(traits)];
|
||||
}
|
||||
#endif // defined(OS_WIN)
|
||||
|
||||
void PooledSingleThreadTaskRunnerManager::UnregisterWorkerThread(
|
||||
WorkerThread* worker) {
|
||||
// Cleanup uses a CheckedLock, so call Cleanup() after releasing |lock_|.
|
||||
scoped_refptr<WorkerThread> worker_to_destroy;
|
||||
{
|
||||
CheckedAutoLock auto_lock(lock_);
|
||||
|
||||
// Skip when joining (the join logic takes care of the rest).
|
||||
if (workers_.empty())
|
||||
return;
|
||||
|
||||
auto worker_iter = std::find(workers_.begin(), workers_.end(), worker);
|
||||
DCHECK(worker_iter != workers_.end());
|
||||
worker_to_destroy = std::move(*worker_iter);
|
||||
workers_.erase(worker_iter);
|
||||
}
|
||||
worker_to_destroy->Cleanup();
|
||||
}
|
||||
|
||||
void PooledSingleThreadTaskRunnerManager::ReleaseSharedWorkerThreads() {
|
||||
decltype(shared_worker_threads_) local_shared_worker_threads;
|
||||
#if defined(OS_WIN)
|
||||
decltype(shared_com_worker_threads_) local_shared_com_worker_threads;
|
||||
#endif
|
||||
{
|
||||
CheckedAutoLock auto_lock(lock_);
|
||||
for (size_t i = 0; i < base::size(shared_worker_threads_); ++i) {
|
||||
for (size_t j = 0; j < base::size(shared_worker_threads_[i]); ++j) {
|
||||
local_shared_worker_threads[i][j] = shared_worker_threads_[i][j];
|
||||
shared_worker_threads_[i][j] = nullptr;
|
||||
#if defined(OS_WIN)
|
||||
local_shared_com_worker_threads[i][j] =
|
||||
shared_com_worker_threads_[i][j];
|
||||
shared_com_worker_threads_[i][j] = nullptr;
|
||||
#endif
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (size_t i = 0; i < base::size(local_shared_worker_threads); ++i) {
|
||||
for (size_t j = 0; j < base::size(local_shared_worker_threads[i]); ++j) {
|
||||
if (local_shared_worker_threads[i][j])
|
||||
UnregisterWorkerThread(local_shared_worker_threads[i][j]);
|
||||
#if defined(OS_WIN)
|
||||
if (local_shared_com_worker_threads[i][j])
|
||||
UnregisterWorkerThread(local_shared_com_worker_threads[i][j]);
|
||||
#endif
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace base
|
||||
|
|
@ -0,0 +1,158 @@
|
|||
// Copyright 2017 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_TASK_THREAD_POOL_POOLED_SINGLE_THREAD_TASK_RUNNER_MANAGER_H_
|
||||
#define BASE_TASK_THREAD_POOL_POOLED_SINGLE_THREAD_TASK_RUNNER_MANAGER_H_
|
||||
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "base/base_export.h"
|
||||
#include "base/macros.h"
|
||||
#include "base/memory/ref_counted.h"
|
||||
#include "base/task/common/checked_lock.h"
|
||||
#include "base/task/single_thread_task_runner_thread_mode.h"
|
||||
#include "base/task/thread_pool/environment_config.h"
|
||||
#include "base/task/thread_pool/tracked_ref.h"
|
||||
#include "base/thread_annotations.h"
|
||||
#include "base/threading/platform_thread.h"
|
||||
#include "build/build_config.h"
|
||||
|
||||
namespace base {
|
||||
|
||||
class TaskTraits;
|
||||
class WorkerThreadObserver;
|
||||
class SingleThreadTaskRunner;
|
||||
|
||||
namespace internal {
|
||||
|
||||
class DelayedTaskManager;
|
||||
class WorkerThread;
|
||||
class TaskTracker;
|
||||
|
||||
namespace {
|
||||
|
||||
class WorkerThreadDelegate;
|
||||
|
||||
} // namespace
|
||||
|
||||
// Manages a group of threads which are each associated with one or more
|
||||
// SingleThreadTaskRunners.
|
||||
//
|
||||
// SingleThreadTaskRunners using SingleThreadTaskRunnerThreadMode::SHARED are
|
||||
// backed by shared WorkerThreads for each COM+task environment combination.
|
||||
// These workers are lazily instantiated and then only reclaimed during
|
||||
// JoinForTesting()
|
||||
//
|
||||
// No threads are created (and hence no tasks can run) before Start() is called.
|
||||
//
|
||||
// This class is thread-safe.
|
||||
class BASE_EXPORT PooledSingleThreadTaskRunnerManager final {
|
||||
public:
|
||||
PooledSingleThreadTaskRunnerManager(TrackedRef<TaskTracker> task_tracker,
|
||||
DelayedTaskManager* delayed_task_manager);
|
||||
~PooledSingleThreadTaskRunnerManager();
|
||||
|
||||
// Starts threads for existing SingleThreadTaskRunners and allows threads to
|
||||
// be started when SingleThreadTaskRunners are created in the future. If
|
||||
// specified, |worker_thread_observer| will be notified when a worker
|
||||
// enters and exits its main function. It must not be destroyed before
|
||||
// JoinForTesting() has returned (must never be destroyed in production).
|
||||
void Start(WorkerThreadObserver* worker_thread_observer = nullptr);
|
||||
|
||||
// Wakes up workers as appropriate for the new CanRunPolicy policy. Must be
|
||||
// called after an update to CanRunPolicy in TaskTracker.
|
||||
void DidUpdateCanRunPolicy();
|
||||
|
||||
// Creates a SingleThreadTaskRunner which runs tasks with |traits| on a thread
|
||||
// named "ThreadPoolSingleThread[Shared]" +
|
||||
// kEnvironmentParams[GetEnvironmentIndexForTraits(traits)].name_suffix +
|
||||
// index.
|
||||
scoped_refptr<SingleThreadTaskRunner> CreateSingleThreadTaskRunner(
|
||||
const TaskTraits& traits,
|
||||
SingleThreadTaskRunnerThreadMode thread_mode);
|
||||
|
||||
#if defined(OS_WIN)
|
||||
// Creates a SingleThreadTaskRunner which runs tasks with |traits| on a COM
|
||||
// STA thread named "ThreadPoolSingleThreadCOMSTA[Shared]" +
|
||||
// kEnvironmentParams[GetEnvironmentIndexForTraits(traits)].name_suffix +
|
||||
// index.
|
||||
scoped_refptr<SingleThreadTaskRunner> CreateCOMSTATaskRunner(
|
||||
const TaskTraits& traits,
|
||||
SingleThreadTaskRunnerThreadMode thread_mode);
|
||||
#endif // defined(OS_WIN)
|
||||
|
||||
void JoinForTesting();
|
||||
|
||||
private:
|
||||
class PooledSingleThreadTaskRunner;
|
||||
|
||||
enum ContinueOnShutdown {
|
||||
IS_CONTINUE_ON_SHUTDOWN,
|
||||
IS_NOT_CONTINUE_ON_SHUTDOWN,
|
||||
CONTINUE_ON_SHUTDOWN_COUNT,
|
||||
};
|
||||
|
||||
static ContinueOnShutdown TraitsToContinueOnShutdown(
|
||||
const TaskTraits& traits);
|
||||
|
||||
template <typename DelegateType>
|
||||
scoped_refptr<PooledSingleThreadTaskRunner> CreateTaskRunnerImpl(
|
||||
const TaskTraits& traits,
|
||||
SingleThreadTaskRunnerThreadMode thread_mode);
|
||||
|
||||
template <typename DelegateType>
|
||||
std::unique_ptr<WorkerThreadDelegate> CreateWorkerThreadDelegate(
|
||||
const std::string& name,
|
||||
int id,
|
||||
SingleThreadTaskRunnerThreadMode thread_mode);
|
||||
|
||||
template <typename DelegateType>
|
||||
WorkerThread* CreateAndRegisterWorkerThread(
|
||||
const std::string& name,
|
||||
SingleThreadTaskRunnerThreadMode thread_mode,
|
||||
ThreadPriority priority_hint) EXCLUSIVE_LOCKS_REQUIRED(lock_);
|
||||
|
||||
template <typename DelegateType>
|
||||
WorkerThread*& GetSharedWorkerThreadForTraits(const TaskTraits& traits);
|
||||
|
||||
void UnregisterWorkerThread(WorkerThread* worker);
|
||||
|
||||
void ReleaseSharedWorkerThreads();
|
||||
|
||||
const TrackedRef<TaskTracker> task_tracker_;
|
||||
DelayedTaskManager* const delayed_task_manager_;
|
||||
|
||||
// Optional observer notified when a worker enters and exits its main
|
||||
// function. Set in Start() and never modified afterwards.
|
||||
WorkerThreadObserver* worker_thread_observer_ = nullptr;
|
||||
|
||||
CheckedLock lock_;
|
||||
std::vector<scoped_refptr<WorkerThread>> workers_ GUARDED_BY(lock_);
|
||||
int next_worker_id_ GUARDED_BY(lock_) = 0;
|
||||
|
||||
// Workers for SingleThreadTaskRunnerThreadMode::SHARED tasks. It is
|
||||
// important to have separate threads for CONTINUE_ON_SHUTDOWN and non-
|
||||
// CONTINUE_ON_SHUTDOWN to avoid being in a situation where a
|
||||
// CONTINUE_ON_SHUTDOWN task effectively blocks shutdown by preventing a
|
||||
// BLOCK_SHUTDOWN task to be scheduled. https://crbug.com/829786
|
||||
WorkerThread* shared_worker_threads_[ENVIRONMENT_COUNT]
|
||||
[CONTINUE_ON_SHUTDOWN_COUNT] GUARDED_BY(
|
||||
lock_) = {};
|
||||
#if defined(OS_WIN)
|
||||
WorkerThread* shared_com_worker_threads_
|
||||
[ENVIRONMENT_COUNT][CONTINUE_ON_SHUTDOWN_COUNT] GUARDED_BY(lock_) = {};
|
||||
#endif // defined(OS_WIN)
|
||||
|
||||
// Set to true when Start() is called.
|
||||
bool started_ GUARDED_BY(lock_) = false;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(PooledSingleThreadTaskRunnerManager);
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
} // namespace base
|
||||
|
||||
#endif // BASE_TASK_THREAD_POOL_POOLED_SINGLE_THREAD_TASK_RUNNER_MANAGER_H_
|
||||
|
|
@ -0,0 +1,40 @@
|
|||
// Copyright 2018 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "base/task/thread_pool/pooled_task_runner_delegate.h"
|
||||
|
||||
namespace base {
|
||||
namespace internal {
|
||||
|
||||
namespace {
|
||||
|
||||
// Indicates whether a PooledTaskRunnerDelegate instance exists in the
|
||||
// process. Used to tell when a task is posted from the main thread after the
|
||||
// task environment was brought down in unit tests so that TaskRunners can
|
||||
// return false on PostTask, letting callers know they should complete
|
||||
// necessary work synchronously. A PooledTaskRunnerDelegate is usually
|
||||
// instantiated before worker threads are started and deleted after worker
|
||||
// threads have been joined. This makes the variable const while worker threads
|
||||
// are up and as such it doesn't need to be atomic.
|
||||
bool g_exists = false;
|
||||
|
||||
} // namespace
|
||||
|
||||
PooledTaskRunnerDelegate::PooledTaskRunnerDelegate() {
|
||||
DCHECK(!g_exists);
|
||||
g_exists = true;
|
||||
}
|
||||
|
||||
PooledTaskRunnerDelegate::~PooledTaskRunnerDelegate() {
|
||||
DCHECK(g_exists);
|
||||
g_exists = false;
|
||||
}
|
||||
|
||||
// static
|
||||
bool PooledTaskRunnerDelegate::Exists() {
|
||||
return g_exists;
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace base
|
||||
|
|
@ -0,0 +1,64 @@
|
|||
// Copyright 2018 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_TASK_THREAD_POOL_POOLED_TASK_RUNNER_DELEGATE_H_
|
||||
#define BASE_TASK_THREAD_POOL_POOLED_TASK_RUNNER_DELEGATE_H_
|
||||
|
||||
#include "base/base_export.h"
|
||||
#include "base/task/task_traits.h"
|
||||
#include "base/task/thread_pool/job_task_source.h"
|
||||
#include "base/task/thread_pool/sequence.h"
|
||||
#include "base/task/thread_pool/task.h"
|
||||
#include "base/task/thread_pool/task_source.h"
|
||||
|
||||
namespace base {
|
||||
namespace internal {
|
||||
|
||||
// Delegate interface for PooledParallelTaskRunner and
|
||||
// PooledSequencedTaskRunner.
|
||||
class BASE_EXPORT PooledTaskRunnerDelegate {
|
||||
public:
|
||||
PooledTaskRunnerDelegate();
|
||||
virtual ~PooledTaskRunnerDelegate();
|
||||
|
||||
// Returns true if a PooledTaskRunnerDelegate instance exists in the
|
||||
// process. This is needed in case of unit tests wherein a TaskRunner
|
||||
// outlives the ThreadPoolInstance that created it.
|
||||
static bool Exists();
|
||||
|
||||
// Returns true if |task_source| currently running must return ASAP.
|
||||
// Thread-safe but may return an outdated result (if a task unnecessarily
|
||||
// yields due to this, it will simply be re-scheduled).
|
||||
virtual bool ShouldYield(const TaskSource* task_source) const = 0;
|
||||
|
||||
// Invoked when a |task| is posted to the PooledParallelTaskRunner or
|
||||
// PooledSequencedTaskRunner. The implementation must post |task| to
|
||||
// |sequence| within the appropriate priority queue, depending on |sequence|
|
||||
// traits. Returns true if task was successfully posted.
|
||||
virtual bool PostTaskWithSequence(Task task,
|
||||
scoped_refptr<Sequence> sequence) = 0;
|
||||
|
||||
// Invoked when a task is posted as a Job. The implementation must add
|
||||
// |task_source| to the appropriate priority queue, depending on |task_source|
|
||||
// traits, if it's not there already. Returns true if task source was
|
||||
// successfully enqueued or was already enqueued.
|
||||
virtual bool EnqueueJobTaskSource(
|
||||
scoped_refptr<JobTaskSource> task_source) = 0;
|
||||
|
||||
// Removes |task_source| from the priority queue.
|
||||
virtual void RemoveJobTaskSource(
|
||||
scoped_refptr<JobTaskSource> task_source) = 0;
|
||||
|
||||
// Invoked when the priority of |sequence|'s TaskRunner is updated. The
|
||||
// implementation must update |sequence|'s priority to |priority|, then place
|
||||
// |sequence| in the correct priority-queue position within the appropriate
|
||||
// thread group.
|
||||
virtual void UpdatePriority(scoped_refptr<TaskSource> task_source,
|
||||
TaskPriority priority) = 0;
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
} // namespace base
|
||||
|
||||
#endif // BASE_TASK_THREAD_POOL_POOLED_TASK_RUNNER_DELEGATE_H_
|
||||
|
|
@ -0,0 +1,210 @@
|
|||
// Copyright 2016 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "base/task/thread_pool/priority_queue.h"
|
||||
|
||||
#include <utility>
|
||||
|
||||
#include "base/logging.h"
|
||||
#include "base/memory/ptr_util.h"
|
||||
#include "base/stl_util.h"
|
||||
|
||||
namespace base {
|
||||
namespace internal {
|
||||
|
||||
// A class combining a TaskSource and the SequenceSortKey that determines its
|
||||
// position in a PriorityQueue. Instances are only mutable via
|
||||
// take_task_source() which can only be called once and renders its instance
|
||||
// invalid after the call.
|
||||
class PriorityQueue::TaskSourceAndSortKey {
|
||||
public:
|
||||
TaskSourceAndSortKey() = default;
|
||||
TaskSourceAndSortKey(RegisteredTaskSource task_source,
|
||||
const SequenceSortKey& sort_key)
|
||||
: task_source_(std::move(task_source)), sort_key_(sort_key) {
|
||||
DCHECK(task_source_);
|
||||
}
|
||||
|
||||
// Note: while |task_source_| should always be non-null post-move (i.e. we
|
||||
// shouldn't be moving an invalid TaskSourceAndSortKey around), there can't be
|
||||
// a DCHECK(task_source_) on moves as IntrusiveHeap moves elements on pop
|
||||
// instead of overwriting them: resulting in the move of a
|
||||
// TaskSourceAndSortKey with a null |task_source_| in Transaction::Pop()'s
|
||||
// implementation.
|
||||
TaskSourceAndSortKey(TaskSourceAndSortKey&& other) = default;
|
||||
TaskSourceAndSortKey& operator=(TaskSourceAndSortKey&& other) = default;
|
||||
|
||||
// Extracts |task_source_| from this object. This object is invalid after this
|
||||
// call.
|
||||
RegisteredTaskSource take_task_source() {
|
||||
DCHECK(task_source_);
|
||||
task_source_->ClearHeapHandle();
|
||||
return std::move(task_source_);
|
||||
}
|
||||
|
||||
// Compares this TaskSourceAndSortKey to |other| based on their respective
|
||||
// |sort_key_|. Required by IntrusiveHeap.
|
||||
bool operator<=(const TaskSourceAndSortKey& other) const {
|
||||
return sort_key_ <= other.sort_key_;
|
||||
}
|
||||
|
||||
// Required by IntrusiveHeap.
|
||||
void SetHeapHandle(const HeapHandle& handle) {
|
||||
DCHECK(task_source_);
|
||||
task_source_->SetHeapHandle(handle);
|
||||
}
|
||||
|
||||
// Required by IntrusiveHeap.
|
||||
void ClearHeapHandle() {
|
||||
// Ensure |task_source_| is not nullptr, which may be the case if
|
||||
// take_task_source() was called before this.
|
||||
if (task_source_)
|
||||
task_source_->ClearHeapHandle();
|
||||
}
|
||||
|
||||
// Required by IntrusiveHeap.
|
||||
HeapHandle GetHeapHandle() const {
|
||||
if (task_source_)
|
||||
return task_source_->GetHeapHandle();
|
||||
return HeapHandle::Invalid();
|
||||
}
|
||||
|
||||
const RegisteredTaskSource& task_source() const { return task_source_; }
|
||||
RegisteredTaskSource& task_source() { return task_source_; }
|
||||
|
||||
const SequenceSortKey& sort_key() const { return sort_key_; }
|
||||
|
||||
private:
|
||||
RegisteredTaskSource task_source_;
|
||||
SequenceSortKey sort_key_;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(TaskSourceAndSortKey);
|
||||
};
|
||||
|
||||
PriorityQueue::PriorityQueue() = default;
|
||||
|
||||
PriorityQueue::~PriorityQueue() {
|
||||
if (!is_flush_task_sources_on_destroy_enabled_)
|
||||
return;
|
||||
|
||||
while (!container_.empty()) {
|
||||
auto task_source = PopTaskSource();
|
||||
auto task = task_source.Clear();
|
||||
std::move(task.task).Run();
|
||||
}
|
||||
}
|
||||
|
||||
PriorityQueue& PriorityQueue::operator=(PriorityQueue&& other) = default;
|
||||
|
||||
void PriorityQueue::Push(
|
||||
TransactionWithRegisteredTaskSource transaction_with_task_source) {
|
||||
auto sequence_sort_key =
|
||||
transaction_with_task_source.transaction.GetSortKey();
|
||||
container_.insert(TaskSourceAndSortKey(
|
||||
std::move(transaction_with_task_source.task_source), sequence_sort_key));
|
||||
IncrementNumTaskSourcesForPriority(sequence_sort_key.priority());
|
||||
}
|
||||
|
||||
const SequenceSortKey& PriorityQueue::PeekSortKey() const {
|
||||
DCHECK(!IsEmpty());
|
||||
return container_.Min().sort_key();
|
||||
}
|
||||
|
||||
RegisteredTaskSource& PriorityQueue::PeekTaskSource() const {
|
||||
DCHECK(!IsEmpty());
|
||||
|
||||
// The const_cast on Min() is okay since modifying the TaskSource cannot alter
|
||||
// the sort order of TaskSourceAndSortKey.
|
||||
auto& task_source_and_sort_key =
|
||||
const_cast<PriorityQueue::TaskSourceAndSortKey&>(container_.Min());
|
||||
return task_source_and_sort_key.task_source();
|
||||
}
|
||||
|
||||
RegisteredTaskSource PriorityQueue::PopTaskSource() {
|
||||
DCHECK(!IsEmpty());
|
||||
|
||||
// The const_cast on Min() is okay since the TaskSourceAndSortKey is
|
||||
// transactionally being popped from |container_| right after and taking its
|
||||
// TaskSource does not alter its sort order.
|
||||
auto& task_source_and_sort_key =
|
||||
const_cast<TaskSourceAndSortKey&>(container_.Min());
|
||||
DecrementNumTaskSourcesForPriority(
|
||||
task_source_and_sort_key.sort_key().priority());
|
||||
RegisteredTaskSource task_source =
|
||||
task_source_and_sort_key.take_task_source();
|
||||
container_.Pop();
|
||||
return task_source;
|
||||
}
|
||||
|
||||
RegisteredTaskSource PriorityQueue::RemoveTaskSource(
|
||||
const TaskSource& task_source) {
|
||||
if (IsEmpty())
|
||||
return nullptr;
|
||||
|
||||
const HeapHandle heap_handle = task_source.heap_handle();
|
||||
if (!heap_handle.IsValid())
|
||||
return nullptr;
|
||||
|
||||
TaskSourceAndSortKey& task_source_and_sort_key =
|
||||
const_cast<PriorityQueue::TaskSourceAndSortKey&>(
|
||||
container_.at(heap_handle));
|
||||
DCHECK_EQ(task_source_and_sort_key.task_source().get(), &task_source);
|
||||
RegisteredTaskSource registered_task_source =
|
||||
task_source_and_sort_key.take_task_source();
|
||||
|
||||
DecrementNumTaskSourcesForPriority(
|
||||
task_source_and_sort_key.sort_key().priority());
|
||||
container_.erase(heap_handle);
|
||||
return registered_task_source;
|
||||
}
|
||||
|
||||
void PriorityQueue::UpdateSortKey(TaskSource::Transaction transaction) {
|
||||
DCHECK(transaction);
|
||||
|
||||
if (IsEmpty())
|
||||
return;
|
||||
|
||||
const HeapHandle heap_handle = transaction.task_source()->heap_handle();
|
||||
if (!heap_handle.IsValid())
|
||||
return;
|
||||
|
||||
auto old_sort_key = container_.at(heap_handle).sort_key();
|
||||
auto new_sort_key = transaction.GetSortKey();
|
||||
auto registered_task_source =
|
||||
const_cast<PriorityQueue::TaskSourceAndSortKey&>(
|
||||
container_.at(heap_handle))
|
||||
.take_task_source();
|
||||
|
||||
DecrementNumTaskSourcesForPriority(old_sort_key.priority());
|
||||
IncrementNumTaskSourcesForPriority(new_sort_key.priority());
|
||||
|
||||
container_.ChangeKey(
|
||||
heap_handle,
|
||||
TaskSourceAndSortKey(std::move(registered_task_source), new_sort_key));
|
||||
}
|
||||
|
||||
bool PriorityQueue::IsEmpty() const {
|
||||
return container_.empty();
|
||||
}
|
||||
|
||||
size_t PriorityQueue::Size() const {
|
||||
return container_.size();
|
||||
}
|
||||
|
||||
void PriorityQueue::EnableFlushTaskSourcesOnDestroyForTesting() {
|
||||
DCHECK(!is_flush_task_sources_on_destroy_enabled_);
|
||||
is_flush_task_sources_on_destroy_enabled_ = true;
|
||||
}
|
||||
|
||||
void PriorityQueue::DecrementNumTaskSourcesForPriority(TaskPriority priority) {
|
||||
DCHECK_GT(num_task_sources_per_priority_[static_cast<int>(priority)], 0U);
|
||||
--num_task_sources_per_priority_[static_cast<int>(priority)];
|
||||
}
|
||||
|
||||
void PriorityQueue::IncrementNumTaskSourcesForPriority(TaskPriority priority) {
|
||||
++num_task_sources_per_priority_[static_cast<int>(priority)];
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace base
|
||||
|
|
@ -0,0 +1,99 @@
|
|||
// Copyright 2016 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_TASK_THREAD_POOL_PRIORITY_QUEUE_H_
|
||||
#define BASE_TASK_THREAD_POOL_PRIORITY_QUEUE_H_
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "base/base_export.h"
|
||||
#include "base/macros.h"
|
||||
#include "base/memory/ref_counted.h"
|
||||
#include "base/task/common/checked_lock.h"
|
||||
#include "base/task/common/intrusive_heap.h"
|
||||
#include "base/task/thread_pool/sequence_sort_key.h"
|
||||
#include "base/task/thread_pool/task_source.h"
|
||||
|
||||
namespace base {
|
||||
namespace internal {
|
||||
|
||||
// A PriorityQueue holds TaskSources of Tasks. This class is not thread-safe
|
||||
// (requires external synchronization).
|
||||
class BASE_EXPORT PriorityQueue {
|
||||
public:
|
||||
PriorityQueue();
|
||||
~PriorityQueue();
|
||||
|
||||
PriorityQueue& operator=(PriorityQueue&& other);
|
||||
|
||||
// Inserts |task_source| in the PriorityQueue with |sequence_sort_key|.
|
||||
void Push(TransactionWithRegisteredTaskSource transaction_with_task_source);
|
||||
|
||||
// Returns a reference to the SequenceSortKey representing the priority of
|
||||
// the highest pending task in this PriorityQueue. The reference becomes
|
||||
// invalid the next time that this PriorityQueue is modified.
|
||||
// Cannot be called on an empty PriorityQueue.
|
||||
const SequenceSortKey& PeekSortKey() const;
|
||||
|
||||
// Returns a reference to the highest priority TaskSource in this
|
||||
// PriorityQueue. Cannot be called on an empty PriorityQueue. The returned
|
||||
// task source may be modified as long as its sort key isn't affected.
|
||||
RegisteredTaskSource& PeekTaskSource() const;
|
||||
|
||||
// Removes and returns the highest priority TaskSource in this PriorityQueue.
|
||||
// Cannot be called on an empty PriorityQueue.
|
||||
RegisteredTaskSource PopTaskSource();
|
||||
|
||||
// Removes |task_source| from the PriorityQueue. Returns a
|
||||
// RegisteredTaskSource which evaluates to true if successful, or false if
|
||||
// |task_source| is not currently in the PriorityQueue or the PriorityQueue is
|
||||
// empty.
|
||||
RegisteredTaskSource RemoveTaskSource(const TaskSource& task_source);
|
||||
|
||||
// Updates the sort key of the TaskSource in |transaction| to
|
||||
// match its current traits. No-ops if the TaskSource is not in the
|
||||
// PriorityQueue or the PriorityQueue is empty.
|
||||
void UpdateSortKey(TaskSource::Transaction transaction);
|
||||
|
||||
// Returns true if the PriorityQueue is empty.
|
||||
bool IsEmpty() const;
|
||||
|
||||
// Returns the number of TaskSources in the PriorityQueue.
|
||||
size_t Size() const;
|
||||
|
||||
// Returns the number of TaskSources with |priority|.
|
||||
size_t GetNumTaskSourcesWithPriority(TaskPriority priority) const {
|
||||
return num_task_sources_per_priority_[static_cast<int>(priority)];
|
||||
}
|
||||
|
||||
// Set the PriorityQueue to empty all its TaskSources of Tasks when it is
|
||||
// destroyed; needed to prevent memory leaks caused by a reference cycle
|
||||
// (TaskSource -> Task -> TaskRunner -> TaskSource...) during test teardown.
|
||||
void EnableFlushTaskSourcesOnDestroyForTesting();
|
||||
|
||||
private:
|
||||
// A class combining a TaskSource and the SequenceSortKey that determines its
|
||||
// position in a PriorityQueue.
|
||||
class TaskSourceAndSortKey;
|
||||
|
||||
using ContainerType = IntrusiveHeap<TaskSourceAndSortKey>;
|
||||
|
||||
void DecrementNumTaskSourcesForPriority(TaskPriority priority);
|
||||
void IncrementNumTaskSourcesForPriority(TaskPriority priority);
|
||||
|
||||
ContainerType container_;
|
||||
|
||||
std::array<size_t, static_cast<int>(TaskPriority::HIGHEST) + 1>
|
||||
num_task_sources_per_priority_ = {};
|
||||
|
||||
// Should only be enabled by EnableFlushTaskSourcesOnDestroyForTesting().
|
||||
bool is_flush_task_sources_on_destroy_enabled_ = false;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(PriorityQueue);
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
} // namespace base
|
||||
|
||||
#endif // BASE_TASK_THREAD_POOL_PRIORITY_QUEUE_H_
|
||||
153
TMessagesProj/jni/voip/webrtc/base/task/thread_pool/sequence.cc
Normal file
153
TMessagesProj/jni/voip/webrtc/base/task/thread_pool/sequence.cc
Normal file
|
|
@ -0,0 +1,153 @@
|
|||
// Copyright 2016 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "base/task/thread_pool/sequence.h"
|
||||
|
||||
#include <utility>
|
||||
|
||||
#include "base/bind.h"
|
||||
#include "base/critical_closure.h"
|
||||
#include "base/feature_list.h"
|
||||
#include "base/logging.h"
|
||||
#include "base/memory/ptr_util.h"
|
||||
#include "base/task/task_features.h"
|
||||
#include "base/time/time.h"
|
||||
|
||||
namespace base {
|
||||
namespace internal {
|
||||
|
||||
Sequence::Transaction::Transaction(Sequence* sequence)
|
||||
: TaskSource::Transaction(sequence) {}
|
||||
|
||||
Sequence::Transaction::Transaction(Sequence::Transaction&& other) = default;
|
||||
|
||||
Sequence::Transaction::~Transaction() = default;
|
||||
|
||||
bool Sequence::Transaction::WillPushTask() const {
|
||||
// If the sequence is empty before a Task is inserted into it and the pool is
|
||||
// not running any task from this sequence, it should be queued.
|
||||
// Otherwise, one of these must be true:
|
||||
// - The Sequence is already queued, or,
|
||||
// - A thread is running a Task from the Sequence. It is expected to reenqueue
|
||||
// the Sequence once it's done running the Task.
|
||||
return sequence()->queue_.empty() && !sequence()->has_worker_;
|
||||
}
|
||||
|
||||
void Sequence::Transaction::PushTask(Task task) {
|
||||
// Use CHECK instead of DCHECK to crash earlier. See http://crbug.com/711167
|
||||
// for details.
|
||||
CHECK(task.task);
|
||||
DCHECK(task.queue_time.is_null());
|
||||
|
||||
bool should_be_queued = WillPushTask();
|
||||
task.queue_time = TimeTicks::Now();
|
||||
|
||||
task.task = sequence()->traits_.shutdown_behavior() ==
|
||||
TaskShutdownBehavior::BLOCK_SHUTDOWN
|
||||
? MakeCriticalClosure(std::move(task.task))
|
||||
: std::move(task.task);
|
||||
|
||||
sequence()->queue_.push(std::move(task));
|
||||
|
||||
// AddRef() matched by manual Release() when the sequence has no more tasks
|
||||
// to run (in DidProcessTask() or Clear()).
|
||||
if (should_be_queued && sequence()->task_runner())
|
||||
sequence()->task_runner()->AddRef();
|
||||
}
|
||||
|
||||
TaskSource::RunStatus Sequence::WillRunTask() {
|
||||
// There should never be a second call to WillRunTask() before DidProcessTask
|
||||
// since the RunStatus is always marked a saturated.
|
||||
DCHECK(!has_worker_);
|
||||
|
||||
// It's ok to access |has_worker_| outside of a Transaction since
|
||||
// WillRunTask() is externally synchronized, always called in sequence with
|
||||
// TakeTask() and DidProcessTask() and only called if |!queue_.empty()|, which
|
||||
// means it won't race with WillPushTask()/PushTask().
|
||||
has_worker_ = true;
|
||||
return RunStatus::kAllowedSaturated;
|
||||
}
|
||||
|
||||
size_t Sequence::GetRemainingConcurrency() const {
|
||||
return 1;
|
||||
}
|
||||
|
||||
Task Sequence::TakeTask(TaskSource::Transaction* transaction) {
|
||||
CheckedAutoLockMaybe auto_lock(transaction ? nullptr : &lock_);
|
||||
|
||||
DCHECK(has_worker_);
|
||||
DCHECK(!queue_.empty());
|
||||
DCHECK(queue_.front().task);
|
||||
|
||||
auto next_task = std::move(queue_.front());
|
||||
queue_.pop();
|
||||
return next_task;
|
||||
}
|
||||
|
||||
bool Sequence::DidProcessTask(TaskSource::Transaction* transaction) {
|
||||
CheckedAutoLockMaybe auto_lock(transaction ? nullptr : &lock_);
|
||||
// There should never be a call to DidProcessTask without an associated
|
||||
// WillRunTask().
|
||||
DCHECK(has_worker_);
|
||||
has_worker_ = false;
|
||||
// See comment on TaskSource::task_runner_ for lifetime management details.
|
||||
if (queue_.empty()) {
|
||||
ReleaseTaskRunner();
|
||||
return false;
|
||||
}
|
||||
// Let the caller re-enqueue this non-empty Sequence regardless of
|
||||
// |run_result| so it can continue churning through this Sequence's tasks and
|
||||
// skip/delete them in the proper scope.
|
||||
return true;
|
||||
}
|
||||
|
||||
SequenceSortKey Sequence::GetSortKey() const {
|
||||
DCHECK(!queue_.empty());
|
||||
return SequenceSortKey(traits_.priority(), queue_.front().queue_time);
|
||||
}
|
||||
|
||||
Task Sequence::Clear(TaskSource::Transaction* transaction) {
|
||||
CheckedAutoLockMaybe auto_lock(transaction ? nullptr : &lock_);
|
||||
// See comment on TaskSource::task_runner_ for lifetime management details.
|
||||
if (!queue_.empty() && !has_worker_)
|
||||
ReleaseTaskRunner();
|
||||
return Task(FROM_HERE,
|
||||
base::BindOnce(
|
||||
[](base::queue<Task> queue) {
|
||||
while (!queue.empty())
|
||||
queue.pop();
|
||||
},
|
||||
std::move(queue_)),
|
||||
TimeDelta());
|
||||
}
|
||||
|
||||
void Sequence::ReleaseTaskRunner() {
|
||||
if (!task_runner())
|
||||
return;
|
||||
if (execution_mode() == TaskSourceExecutionMode::kParallel) {
|
||||
static_cast<PooledParallelTaskRunner*>(task_runner())
|
||||
->UnregisterSequence(this);
|
||||
}
|
||||
// No member access after this point, releasing |task_runner()| might delete
|
||||
// |this|.
|
||||
task_runner()->Release();
|
||||
}
|
||||
|
||||
Sequence::Sequence(const TaskTraits& traits,
|
||||
TaskRunner* task_runner,
|
||||
TaskSourceExecutionMode execution_mode)
|
||||
: TaskSource(traits, task_runner, execution_mode) {}
|
||||
|
||||
Sequence::~Sequence() = default;
|
||||
|
||||
Sequence::Transaction Sequence::BeginTransaction() {
|
||||
return Transaction(this);
|
||||
}
|
||||
|
||||
ExecutionEnvironment Sequence::GetExecutionEnvironment() {
|
||||
return {token_, &sequence_local_storage_};
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace base
|
||||
127
TMessagesProj/jni/voip/webrtc/base/task/thread_pool/sequence.h
Normal file
127
TMessagesProj/jni/voip/webrtc/base/task/thread_pool/sequence.h
Normal file
|
|
@ -0,0 +1,127 @@
|
|||
// Copyright 2016 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_TASK_THREAD_POOL_SEQUENCE_H_
|
||||
#define BASE_TASK_THREAD_POOL_SEQUENCE_H_
|
||||
|
||||
#include <stddef.h>
|
||||
|
||||
#include "base/base_export.h"
|
||||
#include "base/compiler_specific.h"
|
||||
#include "base/containers/queue.h"
|
||||
#include "base/macros.h"
|
||||
#include "base/optional.h"
|
||||
#include "base/sequence_token.h"
|
||||
#include "base/task/task_traits.h"
|
||||
#include "base/task/thread_pool/pooled_parallel_task_runner.h"
|
||||
#include "base/task/thread_pool/sequence_sort_key.h"
|
||||
#include "base/task/thread_pool/task.h"
|
||||
#include "base/task/thread_pool/task_source.h"
|
||||
#include "base/threading/sequence_local_storage_map.h"
|
||||
|
||||
namespace base {
|
||||
namespace internal {
|
||||
|
||||
// A Sequence holds slots each containing up to a single Task that must be
|
||||
// executed in posting order.
|
||||
//
|
||||
// In comments below, an "empty Sequence" is a Sequence with no slot.
|
||||
//
|
||||
// Note: there is a known refcounted-ownership cycle in the Scheduler
|
||||
// architecture: Sequence -> Task -> TaskRunner -> Sequence -> ...
|
||||
// This is okay so long as the other owners of Sequence (PriorityQueue and
|
||||
// WorkerThread in alternation and
|
||||
// ThreadGroupImpl::WorkerThreadDelegateImpl::GetWork()
|
||||
// temporarily) keep running it (and taking Tasks from it as a result). A
|
||||
// dangling reference cycle would only occur should they release their reference
|
||||
// to it while it's not empty. In other words, it is only correct for them to
|
||||
// release it after PopTask() returns false to indicate it was made empty by
|
||||
// that call (in which case the next PushTask() will return true to indicate to
|
||||
// the caller that the Sequence should be re-enqueued for execution).
|
||||
//
|
||||
// This class is thread-safe.
|
||||
class BASE_EXPORT Sequence : public TaskSource {
|
||||
public:
|
||||
// A Transaction can perform multiple operations atomically on a
|
||||
// Sequence. While a Transaction is alive, it is guaranteed that nothing
|
||||
// else will access the Sequence; the Sequence's lock is held for the
|
||||
// lifetime of the Transaction.
|
||||
class BASE_EXPORT Transaction : public TaskSource::Transaction {
|
||||
public:
|
||||
Transaction(Transaction&& other);
|
||||
~Transaction();
|
||||
|
||||
// Returns true if the sequence would need to be queued after receiving a
|
||||
// new Task.
|
||||
bool WillPushTask() const WARN_UNUSED_RESULT;
|
||||
|
||||
// Adds |task| in a new slot at the end of the Sequence. This must only be
|
||||
// called after invoking WillPushTask().
|
||||
void PushTask(Task task);
|
||||
|
||||
Sequence* sequence() const { return static_cast<Sequence*>(task_source()); }
|
||||
|
||||
private:
|
||||
friend class Sequence;
|
||||
|
||||
explicit Transaction(Sequence* sequence);
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(Transaction);
|
||||
};
|
||||
|
||||
// |traits| is metadata that applies to all Tasks in the Sequence.
|
||||
// |task_runner| is a reference to the TaskRunner feeding this TaskSource.
|
||||
// |task_runner| can be nullptr only for tasks with no TaskRunner, in which
|
||||
// case |execution_mode| must be kParallel. Otherwise, |execution_mode| is the
|
||||
// execution mode of |task_runner|.
|
||||
Sequence(const TaskTraits& traits,
|
||||
TaskRunner* task_runner,
|
||||
TaskSourceExecutionMode execution_mode);
|
||||
|
||||
// Begins a Transaction. This method cannot be called on a thread which has an
|
||||
// active Sequence::Transaction.
|
||||
Transaction BeginTransaction() WARN_UNUSED_RESULT;
|
||||
|
||||
// TaskSource:
|
||||
ExecutionEnvironment GetExecutionEnvironment() override;
|
||||
size_t GetRemainingConcurrency() const override;
|
||||
|
||||
// Returns a token that uniquely identifies this Sequence.
|
||||
const SequenceToken& token() const { return token_; }
|
||||
|
||||
SequenceLocalStorageMap* sequence_local_storage() {
|
||||
return &sequence_local_storage_;
|
||||
}
|
||||
|
||||
private:
|
||||
~Sequence() override;
|
||||
|
||||
// TaskSource:
|
||||
RunStatus WillRunTask() override;
|
||||
Task TakeTask(TaskSource::Transaction* transaction) override;
|
||||
Task Clear(TaskSource::Transaction* transaction) override;
|
||||
bool DidProcessTask(TaskSource::Transaction* transaction) override;
|
||||
SequenceSortKey GetSortKey() const override;
|
||||
|
||||
// Releases reference to TaskRunner.
|
||||
void ReleaseTaskRunner();
|
||||
|
||||
const SequenceToken token_ = SequenceToken::Create();
|
||||
|
||||
// Queue of tasks to execute.
|
||||
base::queue<Task> queue_;
|
||||
|
||||
// True if a worker is currently associated with a Task from this Sequence.
|
||||
bool has_worker_ = false;
|
||||
|
||||
// Holds data stored through the SequenceLocalStorageSlot API.
|
||||
SequenceLocalStorageMap sequence_local_storage_;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(Sequence);
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
} // namespace base
|
||||
|
||||
#endif // BASE_TASK_THREAD_POOL_SEQUENCE_H_
|
||||
|
|
@ -0,0 +1,29 @@
|
|||
// Copyright 2016 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "base/task/thread_pool/sequence_sort_key.h"
|
||||
|
||||
namespace base {
|
||||
namespace internal {
|
||||
|
||||
SequenceSortKey::SequenceSortKey(TaskPriority priority,
|
||||
TimeTicks next_task_sequenced_time)
|
||||
: priority_(priority),
|
||||
next_task_sequenced_time_(next_task_sequenced_time) {}
|
||||
|
||||
bool SequenceSortKey::operator<=(const SequenceSortKey& other) const {
|
||||
// This SequenceSortKey is considered more important than |other| if it has a
|
||||
// higher priority or if it has the same priority but its next task was
|
||||
// posted sooner than |other|'s.
|
||||
const int priority_diff =
|
||||
static_cast<int>(priority_) - static_cast<int>(other.priority_);
|
||||
if (priority_diff > 0)
|
||||
return true;
|
||||
if (priority_diff < 0)
|
||||
return false;
|
||||
return next_task_sequenced_time_ <= other.next_task_sequenced_time_;
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace base
|
||||
|
|
@ -0,0 +1,53 @@
|
|||
// Copyright 2016 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_TASK_THREAD_POOL_SEQUENCE_SORT_KEY_H_
|
||||
#define BASE_TASK_THREAD_POOL_SEQUENCE_SORT_KEY_H_
|
||||
|
||||
#include "base/base_export.h"
|
||||
#include "base/task/task_traits.h"
|
||||
#include "base/time/time.h"
|
||||
|
||||
namespace base {
|
||||
namespace internal {
|
||||
|
||||
// An immutable but assignable representation of the priority of a Sequence.
|
||||
class BASE_EXPORT SequenceSortKey final {
|
||||
public:
|
||||
SequenceSortKey() = default;
|
||||
SequenceSortKey(TaskPriority priority, TimeTicks next_task_sequenced_time);
|
||||
|
||||
TaskPriority priority() const { return priority_; }
|
||||
TimeTicks next_task_sequenced_time() const {
|
||||
return next_task_sequenced_time_;
|
||||
}
|
||||
|
||||
// Lower sort key means more important.
|
||||
bool operator<=(const SequenceSortKey& other) const;
|
||||
|
||||
bool operator==(const SequenceSortKey& other) const {
|
||||
return priority_ == other.priority_ &&
|
||||
next_task_sequenced_time_ == other.next_task_sequenced_time_;
|
||||
}
|
||||
bool operator!=(const SequenceSortKey& other) const {
|
||||
return !(other == *this);
|
||||
}
|
||||
|
||||
private:
|
||||
// The private section allows this class to keep its immutable property while
|
||||
// being copy-assignable (i.e. instead of making its members const).
|
||||
|
||||
// Highest task priority in the sequence at the time this sort key was
|
||||
// created.
|
||||
TaskPriority priority_;
|
||||
|
||||
// Sequenced time of the next task to run in the sequence at the time this
|
||||
// sort key was created.
|
||||
TimeTicks next_task_sequenced_time_;
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
} // namespace base
|
||||
|
||||
#endif // BASE_TASK_THREAD_POOL_SEQUENCE_SORT_KEY_H_
|
||||
|
|
@ -0,0 +1,99 @@
|
|||
// Copyright 2018 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "base/task/thread_pool/service_thread.h"
|
||||
|
||||
#include "base/bind.h"
|
||||
#include "base/bind_helpers.h"
|
||||
#include "base/debug/alias.h"
|
||||
#include "base/rand_util.h"
|
||||
#include "base/stl_util.h"
|
||||
#include "base/task/task_traits.h"
|
||||
#include "base/task/thread_pool.h"
|
||||
#include "base/task/thread_pool/task_tracker.h"
|
||||
#include "base/task/thread_pool/thread_pool_instance.h"
|
||||
|
||||
namespace base {
|
||||
namespace internal {
|
||||
|
||||
namespace {
|
||||
|
||||
TimeDelta g_heartbeat_for_testing = TimeDelta();
|
||||
|
||||
} // namespace
|
||||
|
||||
ServiceThread::ServiceThread(const TaskTracker* task_tracker,
|
||||
RepeatingClosure report_heartbeat_metrics_callback)
|
||||
: Thread("ThreadPoolServiceThread"),
|
||||
task_tracker_(task_tracker),
|
||||
report_heartbeat_metrics_callback_(
|
||||
std::move(report_heartbeat_metrics_callback)) {}
|
||||
|
||||
ServiceThread::~ServiceThread() = default;
|
||||
|
||||
// static
|
||||
void ServiceThread::SetHeartbeatIntervalForTesting(TimeDelta heartbeat) {
|
||||
g_heartbeat_for_testing = heartbeat;
|
||||
}
|
||||
|
||||
void ServiceThread::Init() {
|
||||
// In unit tests we sometimes do not have a fully functional thread pool
|
||||
// environment, do not perform the heartbeat report in that case since it
|
||||
// relies on such an environment.
|
||||
if (ThreadPoolInstance::Get()) {
|
||||
// Compute the histogram every hour (with a slight offset to drift if that
|
||||
// hour tick happens to line up with specific events). Once per hour per
|
||||
// user was deemed sufficient to gather a reliable metric.
|
||||
constexpr TimeDelta kHeartbeat = TimeDelta::FromMinutes(59);
|
||||
|
||||
heartbeat_metrics_timer_.Start(
|
||||
FROM_HERE,
|
||||
g_heartbeat_for_testing.is_zero() ? kHeartbeat
|
||||
: g_heartbeat_for_testing,
|
||||
BindRepeating(&ServiceThread::ReportHeartbeatMetrics,
|
||||
Unretained(this)));
|
||||
}
|
||||
}
|
||||
|
||||
NOINLINE void ServiceThread::Run(RunLoop* run_loop) {
|
||||
const int line_number = __LINE__;
|
||||
Thread::Run(run_loop);
|
||||
base::debug::Alias(&line_number);
|
||||
}
|
||||
|
||||
void ServiceThread::ReportHeartbeatMetrics() const {
|
||||
report_heartbeat_metrics_callback_.Run();
|
||||
PerformHeartbeatLatencyReport();
|
||||
}
|
||||
|
||||
void ServiceThread::PerformHeartbeatLatencyReport() const {
|
||||
if (!task_tracker_)
|
||||
return;
|
||||
|
||||
// Only record latency for one TaskPriority per report to avoid bias in the
|
||||
// order in which tasks are posted (should we record all at once) as well as
|
||||
// to avoid spinning up many worker threads to process this report if the
|
||||
// thread pool is currently idle (each thread group keeps at least one idle
|
||||
// thread so a single task isn't an issue).
|
||||
|
||||
// Invoke RandInt() out-of-line to ensure it's obtained before
|
||||
// TimeTicks::Now().
|
||||
const TaskPriority profiled_priority = static_cast<TaskPriority>(
|
||||
RandInt(static_cast<int>(TaskPriority::LOWEST),
|
||||
static_cast<int>(TaskPriority::HIGHEST)));
|
||||
|
||||
// Post through the static API to time the full stack. Use a new Now() for
|
||||
// every set of traits in case PostTask() itself is slow.
|
||||
// Bonus: this approach also includes the overhead of BindOnce() in the
|
||||
// reported latency.
|
||||
ThreadPool::PostTask(
|
||||
FROM_HERE, {profiled_priority},
|
||||
BindOnce(
|
||||
&TaskTracker::RecordHeartbeatLatencyAndTasksRunWhileQueuingHistograms,
|
||||
Unretained(task_tracker_), profiled_priority, TimeTicks::Now(),
|
||||
task_tracker_->GetNumTasksRun()));
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace base
|
||||
|
|
@ -0,0 +1,68 @@
|
|||
// Copyright 2018 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_TASK_THREAD_POOL_SERVICE_THREAD_H_
|
||||
#define BASE_TASK_THREAD_POOL_SERVICE_THREAD_H_
|
||||
|
||||
#include "base/base_export.h"
|
||||
#include "base/macros.h"
|
||||
#include "base/threading/thread.h"
|
||||
#include "base/time/time.h"
|
||||
#include "base/timer/timer.h"
|
||||
|
||||
namespace base {
|
||||
namespace internal {
|
||||
|
||||
class TaskTracker;
|
||||
|
||||
// The ThreadPool's ServiceThread is a mostly idle thread that is responsible
|
||||
// for handling async events (e.g. delayed tasks and async I/O). Its role is to
|
||||
// merely forward such events to their destination (hence staying mostly idle
|
||||
// and highly responsive).
|
||||
// It aliases Thread::Run() to enforce that ServiceThread::Run() be on the stack
|
||||
// and make it easier to identify the service thread in stack traces.
|
||||
class BASE_EXPORT ServiceThread : public Thread {
|
||||
public:
|
||||
// Constructs a ServiceThread which will record heartbeat metrics. This
|
||||
// includes metrics recorded through |report_heartbeat_metrics_callback|,
|
||||
// in addition to latency metrics through |task_tracker| if non-null. In that
|
||||
// case, this ServiceThread will assume a registered ThreadPool instance
|
||||
// and that |task_tracker| will outlive this ServiceThread.
|
||||
explicit ServiceThread(const TaskTracker* task_tracker,
|
||||
RepeatingClosure report_heartbeat_metrics_callback);
|
||||
|
||||
~ServiceThread() override;
|
||||
|
||||
// Overrides the default interval at which |heartbeat_latency_timer_| fires.
|
||||
// Call this with a |heartbeat| of zero to undo the override.
|
||||
// Must not be called while the ServiceThread is running.
|
||||
static void SetHeartbeatIntervalForTesting(TimeDelta heartbeat);
|
||||
|
||||
private:
|
||||
// Thread:
|
||||
void Init() override;
|
||||
void Run(RunLoop* run_loop) override;
|
||||
|
||||
void ReportHeartbeatMetrics() const;
|
||||
|
||||
// Kicks off a single async task which will record a histogram on the latency
|
||||
// of a randomly chosen set of TaskTraits.
|
||||
void PerformHeartbeatLatencyReport() const;
|
||||
|
||||
const TaskTracker* const task_tracker_;
|
||||
|
||||
// Fires a recurring heartbeat task to record metrics which are independent
|
||||
// from any execution sequence. This is done on the service thread to avoid
|
||||
// all external dependencies (even main thread).
|
||||
base::RepeatingTimer heartbeat_metrics_timer_;
|
||||
|
||||
RepeatingClosure report_heartbeat_metrics_callback_;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(ServiceThread);
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
} // namespace base
|
||||
|
||||
#endif // BASE_TASK_THREAD_POOL_SERVICE_THREAD_H_
|
||||
45
TMessagesProj/jni/voip/webrtc/base/task/thread_pool/task.cc
Normal file
45
TMessagesProj/jni/voip/webrtc/base/task/thread_pool/task.cc
Normal file
|
|
@ -0,0 +1,45 @@
|
|||
// Copyright 2016 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "base/task/thread_pool/task.h"
|
||||
|
||||
#include <utility>
|
||||
|
||||
#include "base/atomic_sequence_num.h"
|
||||
|
||||
namespace base {
|
||||
namespace internal {
|
||||
|
||||
namespace {
|
||||
|
||||
AtomicSequenceNumber g_sequence_nums_for_tracing;
|
||||
|
||||
} // namespace
|
||||
|
||||
Task::Task() = default;
|
||||
|
||||
Task::Task(const Location& posted_from, OnceClosure task, TimeDelta delay)
|
||||
: PendingTask(posted_from,
|
||||
std::move(task),
|
||||
delay.is_zero() ? TimeTicks() : TimeTicks::Now() + delay,
|
||||
Nestable::kNonNestable) {
|
||||
// ThreadPoolImpl doesn't use |sequence_num| but tracing (toplevel.flow)
|
||||
// relies on it being unique. While this subtle dependency is a bit
|
||||
// overreaching, ThreadPoolImpl is the only task system that doesn't use
|
||||
// |sequence_num| and the dependent code rarely changes so this isn't worth a
|
||||
// big change and faking it here isn't too bad for now (posting tasks is full
|
||||
// of atomic ops already).
|
||||
this->sequence_num = g_sequence_nums_for_tracing.GetNext();
|
||||
}
|
||||
|
||||
// This should be "= default but MSVC has trouble with "noexcept = default" in
|
||||
// this case.
|
||||
Task::Task(Task&& other) noexcept : PendingTask(std::move(other)) {}
|
||||
|
||||
Task::~Task() = default;
|
||||
|
||||
Task& Task::operator=(Task&& other) = default;
|
||||
|
||||
} // namespace internal
|
||||
} // namespace base
|
||||
46
TMessagesProj/jni/voip/webrtc/base/task/thread_pool/task.h
Normal file
46
TMessagesProj/jni/voip/webrtc/base/task/thread_pool/task.h
Normal file
|
|
@ -0,0 +1,46 @@
|
|||
// Copyright 2016 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_TASK_THREAD_POOL_TASK_H_
|
||||
#define BASE_TASK_THREAD_POOL_TASK_H_
|
||||
|
||||
#include "base/base_export.h"
|
||||
#include "base/callback.h"
|
||||
#include "base/location.h"
|
||||
#include "base/macros.h"
|
||||
#include "base/memory/ref_counted.h"
|
||||
#include "base/pending_task.h"
|
||||
#include "base/sequenced_task_runner.h"
|
||||
#include "base/single_thread_task_runner.h"
|
||||
#include "base/time/time.h"
|
||||
|
||||
namespace base {
|
||||
namespace internal {
|
||||
|
||||
// A task is a unit of work inside the thread pool. Support for tracing and
|
||||
// profiling inherited from PendingTask.
|
||||
// TODO(etiennep): This class is now equivalent to PendingTask, remove it.
|
||||
struct BASE_EXPORT Task : public PendingTask {
|
||||
Task();
|
||||
|
||||
// |posted_from| is the site the task was posted from. |task| is the closure
|
||||
// to run. |delay| is a delay that must expire before the Task runs.
|
||||
Task(const Location& posted_from, OnceClosure task, TimeDelta delay);
|
||||
|
||||
// Task is move-only to avoid mistakes that cause reference counts to be
|
||||
// accidentally bumped.
|
||||
Task(Task&& other) noexcept;
|
||||
|
||||
~Task();
|
||||
|
||||
Task& operator=(Task&& other);
|
||||
|
||||
private:
|
||||
DISALLOW_COPY_AND_ASSIGN(Task);
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
} // namespace base
|
||||
|
||||
#endif // BASE_TASK_THREAD_POOL_TASK_H_
|
||||
|
|
@ -0,0 +1,175 @@
|
|||
// Copyright 2019 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "base/task/thread_pool/task_source.h"
|
||||
|
||||
#include <utility>
|
||||
|
||||
#include "base/feature_list.h"
|
||||
#include "base/logging.h"
|
||||
#include "base/memory/ptr_util.h"
|
||||
#include "base/task/task_features.h"
|
||||
#include "base/task/thread_pool/task_tracker.h"
|
||||
|
||||
namespace base {
|
||||
namespace internal {
|
||||
|
||||
TaskSource::Transaction::Transaction(TaskSource* task_source)
|
||||
: task_source_(task_source) {
|
||||
task_source->lock_.Acquire();
|
||||
}
|
||||
|
||||
TaskSource::Transaction::Transaction(TaskSource::Transaction&& other)
|
||||
: task_source_(other.task_source()) {
|
||||
other.task_source_ = nullptr;
|
||||
}
|
||||
|
||||
TaskSource::Transaction::~Transaction() {
|
||||
if (task_source_) {
|
||||
task_source_->lock_.AssertAcquired();
|
||||
task_source_->lock_.Release();
|
||||
}
|
||||
}
|
||||
|
||||
SequenceSortKey TaskSource::Transaction::GetSortKey() const {
|
||||
return task_source_->GetSortKey();
|
||||
}
|
||||
|
||||
void TaskSource::Transaction::UpdatePriority(TaskPriority priority) {
|
||||
if (FeatureList::IsEnabled(kAllTasksUserBlocking))
|
||||
return;
|
||||
task_source_->traits_.UpdatePriority(priority);
|
||||
task_source_->priority_racy_.store(task_source_->traits_.priority(),
|
||||
std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
void TaskSource::SetHeapHandle(const HeapHandle& handle) {
|
||||
heap_handle_ = handle;
|
||||
}
|
||||
|
||||
void TaskSource::ClearHeapHandle() {
|
||||
heap_handle_ = HeapHandle();
|
||||
}
|
||||
|
||||
TaskSource::TaskSource(const TaskTraits& traits,
|
||||
TaskRunner* task_runner,
|
||||
TaskSourceExecutionMode execution_mode)
|
||||
: traits_(traits),
|
||||
priority_racy_(traits.priority()),
|
||||
task_runner_(task_runner),
|
||||
execution_mode_(execution_mode) {
|
||||
DCHECK(task_runner_ ||
|
||||
execution_mode_ == TaskSourceExecutionMode::kParallel ||
|
||||
execution_mode_ == TaskSourceExecutionMode::kJob);
|
||||
}
|
||||
|
||||
TaskSource::~TaskSource() = default;
|
||||
|
||||
TaskSource::Transaction TaskSource::BeginTransaction() {
|
||||
return Transaction(this);
|
||||
}
|
||||
|
||||
RegisteredTaskSource::RegisteredTaskSource() = default;
|
||||
|
||||
RegisteredTaskSource::RegisteredTaskSource(std::nullptr_t)
|
||||
: RegisteredTaskSource() {}
|
||||
|
||||
RegisteredTaskSource::RegisteredTaskSource(
|
||||
RegisteredTaskSource&& other) noexcept
|
||||
:
|
||||
#if DCHECK_IS_ON()
|
||||
run_step_{std::exchange(other.run_step_, State::kInitial)},
|
||||
#endif // DCHECK_IS_ON()
|
||||
task_source_{std::move(other.task_source_)},
|
||||
task_tracker_{std::exchange(other.task_tracker_, nullptr)} {
|
||||
}
|
||||
|
||||
RegisteredTaskSource::~RegisteredTaskSource() {
|
||||
Unregister();
|
||||
}
|
||||
|
||||
// static
|
||||
RegisteredTaskSource RegisteredTaskSource::CreateForTesting(
|
||||
scoped_refptr<TaskSource> task_source,
|
||||
TaskTracker* task_tracker) {
|
||||
return RegisteredTaskSource(std::move(task_source), task_tracker);
|
||||
}
|
||||
|
||||
scoped_refptr<TaskSource> RegisteredTaskSource::Unregister() {
|
||||
#if DCHECK_IS_ON()
|
||||
DCHECK_EQ(run_step_, State::kInitial);
|
||||
#endif // DCHECK_IS_ON()
|
||||
if (task_source_ && task_tracker_)
|
||||
return task_tracker_->UnregisterTaskSource(std::move(task_source_));
|
||||
return std::move(task_source_);
|
||||
}
|
||||
|
||||
RegisteredTaskSource& RegisteredTaskSource::operator=(
|
||||
RegisteredTaskSource&& other) {
|
||||
Unregister();
|
||||
#if DCHECK_IS_ON()
|
||||
run_step_ = std::exchange(other.run_step_, State::kInitial);
|
||||
#endif // DCHECK_IS_ON()
|
||||
task_source_ = std::move(other.task_source_);
|
||||
task_tracker_ = std::exchange(other.task_tracker_, nullptr);
|
||||
return *this;
|
||||
}
|
||||
|
||||
TaskSource::RunStatus RegisteredTaskSource::WillRunTask() {
|
||||
TaskSource::RunStatus run_status = task_source_->WillRunTask();
|
||||
#if DCHECK_IS_ON()
|
||||
DCHECK_EQ(run_step_, State::kInitial);
|
||||
if (run_status != TaskSource::RunStatus::kDisallowed)
|
||||
run_step_ = State::kReady;
|
||||
#endif // DCHECK_IS_ON()
|
||||
return run_status;
|
||||
}
|
||||
|
||||
Task RegisteredTaskSource::TakeTask(TaskSource::Transaction* transaction) {
|
||||
DCHECK(!transaction || transaction->task_source() == get());
|
||||
#if DCHECK_IS_ON()
|
||||
DCHECK_EQ(State::kReady, run_step_);
|
||||
#endif // DCHECK_IS_ON()
|
||||
return task_source_->TakeTask(transaction);
|
||||
}
|
||||
|
||||
Task RegisteredTaskSource::Clear(TaskSource::Transaction* transaction) {
|
||||
DCHECK(!transaction || transaction->task_source() == get());
|
||||
return task_source_->Clear(transaction);
|
||||
}
|
||||
|
||||
bool RegisteredTaskSource::DidProcessTask(
|
||||
TaskSource::Transaction* transaction) {
|
||||
DCHECK(!transaction || transaction->task_source() == get());
|
||||
#if DCHECK_IS_ON()
|
||||
DCHECK_EQ(State::kReady, run_step_);
|
||||
run_step_ = State::kInitial;
|
||||
#endif // DCHECK_IS_ON()
|
||||
return task_source_->DidProcessTask(transaction);
|
||||
}
|
||||
|
||||
RegisteredTaskSource::RegisteredTaskSource(
|
||||
scoped_refptr<TaskSource> task_source,
|
||||
TaskTracker* task_tracker)
|
||||
: task_source_(std::move(task_source)), task_tracker_(task_tracker) {}
|
||||
|
||||
TransactionWithRegisteredTaskSource::TransactionWithRegisteredTaskSource(
|
||||
RegisteredTaskSource task_source_in,
|
||||
TaskSource::Transaction transaction_in)
|
||||
: task_source(std::move(task_source_in)),
|
||||
transaction(std::move(transaction_in)) {
|
||||
DCHECK_EQ(task_source.get(), transaction.task_source());
|
||||
}
|
||||
|
||||
// static:
|
||||
TransactionWithRegisteredTaskSource
|
||||
TransactionWithRegisteredTaskSource::FromTaskSource(
|
||||
RegisteredTaskSource task_source_in) {
|
||||
auto transaction = task_source_in->BeginTransaction();
|
||||
return TransactionWithRegisteredTaskSource(std::move(task_source_in),
|
||||
std::move(transaction));
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace base
|
||||
|
|
@ -0,0 +1,327 @@
|
|||
// Copyright 2019 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_TASK_THREAD_POOL_TASK_SOURCE_H_
|
||||
#define BASE_TASK_THREAD_POOL_TASK_SOURCE_H_
|
||||
|
||||
#include <stddef.h>
|
||||
|
||||
#include "base/base_export.h"
|
||||
#include "base/compiler_specific.h"
|
||||
#include "base/macros.h"
|
||||
#include "base/memory/ref_counted.h"
|
||||
#include "base/optional.h"
|
||||
#include "base/sequence_token.h"
|
||||
#include "base/task/common/checked_lock.h"
|
||||
#include "base/task/common/intrusive_heap.h"
|
||||
#include "base/task/task_traits.h"
|
||||
#include "base/task/thread_pool/sequence_sort_key.h"
|
||||
#include "base/task/thread_pool/task.h"
|
||||
#include "base/threading/sequence_local_storage_map.h"
|
||||
|
||||
namespace base {
|
||||
namespace internal {
|
||||
|
||||
class TaskTracker;
|
||||
|
||||
enum class TaskSourceExecutionMode {
|
||||
kParallel,
|
||||
kSequenced,
|
||||
kSingleThread,
|
||||
kJob,
|
||||
kMax = kJob,
|
||||
};
|
||||
|
||||
struct BASE_EXPORT ExecutionEnvironment {
|
||||
SequenceToken token;
|
||||
SequenceLocalStorageMap* sequence_local_storage;
|
||||
};
|
||||
|
||||
// A TaskSource is a virtual class that provides a series of Tasks that must be
|
||||
// executed.
|
||||
//
|
||||
// A task source is registered when it's ready to be queued. A task source is
|
||||
// ready to be queued when either:
|
||||
// 1- It has new tasks that can run concurrently as a result of external
|
||||
// operations, e.g. posting a new task to an empty Sequence or increasing
|
||||
// max concurrency of a JobTaskSource;
|
||||
// 2- A worker finished running a task from it and DidProcessTask() returned
|
||||
// true; or
|
||||
// 3- A worker is about to run a task from it and WillRunTask() returned
|
||||
// kAllowedNotSaturated.
|
||||
//
|
||||
// A worker may perform the following sequence of operations on a
|
||||
// RegisteredTaskSource after obtaining it from the queue:
|
||||
// 1- Check whether a task can run with WillRunTask() (and register/enqueue the
|
||||
// task source again if not saturated).
|
||||
// 2- (optional) Iff (1) determined that a task can run, access the next task
|
||||
// with TakeTask().
|
||||
// 3- (optional) Execute the task.
|
||||
// 4- Inform the task source that a task was processed with DidProcessTask(),
|
||||
// and re-enqueue the task source iff requested.
|
||||
// When a task source is registered multiple times, many overlapping chains of
|
||||
// operations may run concurrently, as permitted by WillRunTask(). This allows
|
||||
// tasks from the same task source to run in parallel.
|
||||
// However, the following invariants are kept:
|
||||
// - The number of workers concurrently running tasks never goes over the
|
||||
// intended concurrency.
|
||||
// - If the task source has more tasks that can run concurrently, it must be
|
||||
// queued.
|
||||
//
|
||||
// Note: there is a known refcounted-ownership cycle in the ThreadPool
|
||||
// architecture: TaskSource -> TaskRunner -> TaskSource -> ... This is okay so
|
||||
// long as the other owners of TaskSource (PriorityQueue and WorkerThread in
|
||||
// alternation and ThreadGroupImpl::WorkerThreadDelegateImpl::GetWork()
|
||||
// temporarily) keep running it (and taking Tasks from it as a result). A
|
||||
// dangling reference cycle would only occur should they release their reference
|
||||
// to it while it's not empty. In other words, it is only correct for them to
|
||||
// release it when DidProcessTask() returns false.
|
||||
//
|
||||
// This class is thread-safe.
|
||||
class BASE_EXPORT TaskSource : public RefCountedThreadSafe<TaskSource> {
|
||||
public:
|
||||
// Indicates whether WillRunTask() allows TakeTask() to be called on a
|
||||
// RegisteredTaskSource.
|
||||
enum class RunStatus {
|
||||
// TakeTask() cannot be called.
|
||||
kDisallowed,
|
||||
// TakeTask() may called, and the TaskSource has not reached its maximum
|
||||
// concurrency (i.e. the TaskSource still needs to be queued).
|
||||
kAllowedNotSaturated,
|
||||
// TakeTask() may called, and the TaskSource has reached its maximum
|
||||
// concurrency (i.e. the TaskSource no longer needs to be queued).
|
||||
kAllowedSaturated,
|
||||
};
|
||||
|
||||
// A Transaction can perform multiple operations atomically on a
|
||||
// TaskSource. While a Transaction is alive, it is guaranteed that nothing
|
||||
// else will access the TaskSource; the TaskSource's lock is held for the
|
||||
// lifetime of the Transaction.
|
||||
class BASE_EXPORT Transaction {
|
||||
public:
|
||||
Transaction(Transaction&& other);
|
||||
~Transaction();
|
||||
|
||||
operator bool() const { return !!task_source_; }
|
||||
|
||||
// Returns a SequenceSortKey representing the priority of the TaskSource.
|
||||
// Cannot be called on an empty TaskSource.
|
||||
SequenceSortKey GetSortKey() const;
|
||||
|
||||
// Sets TaskSource priority to |priority|.
|
||||
void UpdatePriority(TaskPriority priority);
|
||||
|
||||
// Returns the traits of all Tasks in the TaskSource.
|
||||
TaskTraits traits() const { return task_source_->traits_; }
|
||||
|
||||
TaskSource* task_source() const { return task_source_; }
|
||||
|
||||
protected:
|
||||
explicit Transaction(TaskSource* task_source);
|
||||
|
||||
private:
|
||||
friend class TaskSource;
|
||||
|
||||
TaskSource* task_source_;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(Transaction);
|
||||
};
|
||||
|
||||
// |traits| is metadata that applies to all Tasks in the TaskSource.
|
||||
// |task_runner| is a reference to the TaskRunner feeding this TaskSource.
|
||||
// |task_runner| can be nullptr only for tasks with no TaskRunner, in which
|
||||
// case |execution_mode| must be kParallel. Otherwise, |execution_mode| is the
|
||||
// execution mode of |task_runner|.
|
||||
TaskSource(const TaskTraits& traits,
|
||||
TaskRunner* task_runner,
|
||||
TaskSourceExecutionMode execution_mode);
|
||||
|
||||
// Begins a Transaction. This method cannot be called on a thread which has an
|
||||
// active TaskSource::Transaction.
|
||||
Transaction BeginTransaction() WARN_UNUSED_RESULT;
|
||||
|
||||
virtual ExecutionEnvironment GetExecutionEnvironment() = 0;
|
||||
|
||||
// Thread-safe but the returned value may immediately be obsolete. As such
|
||||
// this should only be used as a best-effort guess of how many more workers
|
||||
// are needed. This may be called on an empty task source.
|
||||
virtual size_t GetRemainingConcurrency() const = 0;
|
||||
|
||||
// Support for IntrusiveHeap.
|
||||
void SetHeapHandle(const HeapHandle& handle);
|
||||
void ClearHeapHandle();
|
||||
HeapHandle GetHeapHandle() const { return heap_handle_; }
|
||||
|
||||
HeapHandle heap_handle() const { return heap_handle_; }
|
||||
|
||||
// Returns the shutdown behavior of all Tasks in the TaskSource. Can be
|
||||
// accessed without a Transaction because it is never mutated.
|
||||
TaskShutdownBehavior shutdown_behavior() const {
|
||||
return traits_.shutdown_behavior();
|
||||
}
|
||||
// Returns a racy priority of the TaskSource. Can be accessed without a
|
||||
// Transaction but may return an outdated result.
|
||||
TaskPriority priority_racy() const {
|
||||
return priority_racy_.load(std::memory_order_relaxed);
|
||||
}
|
||||
// Returns the thread policy of the TaskSource. Can be accessed without a
|
||||
// Transaction because it is never mutated.
|
||||
ThreadPolicy thread_policy() const { return traits_.thread_policy(); }
|
||||
|
||||
// A reference to TaskRunner is only retained between PushTask() and when
|
||||
// DidProcessTask() returns false, guaranteeing it is safe to dereference this
|
||||
// pointer. Otherwise, the caller should guarantee such TaskRunner still
|
||||
// exists before dereferencing.
|
||||
TaskRunner* task_runner() const { return task_runner_; }
|
||||
|
||||
TaskSourceExecutionMode execution_mode() const { return execution_mode_; }
|
||||
|
||||
protected:
|
||||
virtual ~TaskSource();
|
||||
|
||||
virtual RunStatus WillRunTask() = 0;
|
||||
|
||||
// Implementations of TakeTask(), DidProcessTask() and Clear() must ensure
|
||||
// proper synchronization iff |transaction| is nullptr.
|
||||
virtual Task TakeTask(TaskSource::Transaction* transaction) = 0;
|
||||
virtual bool DidProcessTask(TaskSource::Transaction* transaction) = 0;
|
||||
|
||||
// This may be called for each outstanding RegisteredTaskSource that's ready.
|
||||
// The implementation needs to support this being called multiple times;
|
||||
// unless it guarantees never to hand-out multiple RegisteredTaskSources that
|
||||
// are concurrently ready.
|
||||
virtual Task Clear(TaskSource::Transaction* transaction) = 0;
|
||||
|
||||
virtual SequenceSortKey GetSortKey() const = 0;
|
||||
|
||||
// Sets TaskSource priority to |priority|.
|
||||
void UpdatePriority(TaskPriority priority);
|
||||
|
||||
// The TaskTraits of all Tasks in the TaskSource.
|
||||
TaskTraits traits_;
|
||||
|
||||
// The cached priority for atomic access.
|
||||
std::atomic<TaskPriority> priority_racy_;
|
||||
|
||||
// Synchronizes access to all members.
|
||||
mutable CheckedLock lock_{UniversalPredecessor()};
|
||||
|
||||
private:
|
||||
friend class RefCountedThreadSafe<TaskSource>;
|
||||
friend class RegisteredTaskSource;
|
||||
|
||||
// The TaskSource's position in its current PriorityQueue. Access is protected
|
||||
// by the PriorityQueue's lock.
|
||||
HeapHandle heap_handle_;
|
||||
|
||||
// A pointer to the TaskRunner that posts to this TaskSource, if any. The
|
||||
// derived class is responsible for calling AddRef() when a TaskSource from
|
||||
// which no Task is executing becomes non-empty and Release() when
|
||||
// it becomes empty again (e.g. when DidProcessTask() returns false).
|
||||
TaskRunner* task_runner_;
|
||||
|
||||
TaskSourceExecutionMode execution_mode_;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(TaskSource);
|
||||
};
|
||||
|
||||
// Wrapper around TaskSource to signify the intent to queue and run it.
|
||||
// RegisteredTaskSource can only be created with TaskTracker and may only be
|
||||
// used by a single worker at a time. However, the same task source may be
|
||||
// registered several times, spawning multiple RegisteredTaskSources. A
|
||||
// RegisteredTaskSource resets to its initial state when WillRunTask() fails
|
||||
// or after DidProcessTask(), so it can be used again.
|
||||
class BASE_EXPORT RegisteredTaskSource {
|
||||
public:
|
||||
RegisteredTaskSource();
|
||||
RegisteredTaskSource(std::nullptr_t);
|
||||
RegisteredTaskSource(RegisteredTaskSource&& other) noexcept;
|
||||
~RegisteredTaskSource();
|
||||
|
||||
RegisteredTaskSource& operator=(RegisteredTaskSource&& other);
|
||||
|
||||
operator bool() const { return task_source_ != nullptr; }
|
||||
TaskSource* operator->() const { return task_source_.get(); }
|
||||
TaskSource* get() const { return task_source_.get(); }
|
||||
|
||||
static RegisteredTaskSource CreateForTesting(
|
||||
scoped_refptr<TaskSource> task_source,
|
||||
TaskTracker* task_tracker = nullptr);
|
||||
|
||||
// Can only be called if this RegisteredTaskSource is in its initial state.
|
||||
// Returns the underlying task source. An Optional is used in preparation for
|
||||
// the merge between ThreadPool and TaskQueueManager (in Blink).
|
||||
// https://crbug.com/783309
|
||||
scoped_refptr<TaskSource> Unregister();
|
||||
|
||||
// Informs this TaskSource that the current worker would like to run a Task
|
||||
// from it. Can only be called if in its initial state. Returns a RunStatus
|
||||
// that indicates if the operation is allowed (TakeTask() can be called).
|
||||
TaskSource::RunStatus WillRunTask();
|
||||
|
||||
// Returns the next task to run from this TaskSource. This should be called
|
||||
// only after WillRunTask() returned RunStatus::kAllowed*. |transaction| is
|
||||
// optional and should only be provided if this operation is already part of
|
||||
// a transaction.
|
||||
Task TakeTask(TaskSource::Transaction* transaction = nullptr)
|
||||
WARN_UNUSED_RESULT;
|
||||
|
||||
// Must be called after WillRunTask() or once the task was run if TakeTask()
|
||||
// was called. This resets this RegisteredTaskSource to its initial state so
|
||||
// that WillRunTask() may be called again. |transaction| is optional and
|
||||
// should only be provided if this operation is already part of a transaction.
|
||||
// Returns true if the TaskSource should be queued after this operation.
|
||||
bool DidProcessTask(TaskSource::Transaction* transaction = nullptr);
|
||||
|
||||
// Returns a task that clears this TaskSource to make it empty. |transaction|
|
||||
// is optional and should only be provided if this operation is already part
|
||||
// of a transaction.
|
||||
Task Clear(TaskSource::Transaction* transaction = nullptr) WARN_UNUSED_RESULT;
|
||||
|
||||
private:
|
||||
friend class TaskTracker;
|
||||
RegisteredTaskSource(scoped_refptr<TaskSource> task_source,
|
||||
TaskTracker* task_tracker);
|
||||
|
||||
#if DCHECK_IS_ON()
|
||||
// Indicates the step of a task execution chain.
|
||||
enum class State {
|
||||
kInitial, // WillRunTask() may be called.
|
||||
kReady, // After WillRunTask() returned a valid RunStatus.
|
||||
};
|
||||
|
||||
State run_step_ = State::kInitial;
|
||||
#endif // DCHECK_IS_ON()
|
||||
|
||||
scoped_refptr<TaskSource> task_source_;
|
||||
TaskTracker* task_tracker_ = nullptr;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(RegisteredTaskSource);
|
||||
};
|
||||
|
||||
// A pair of Transaction and RegisteredTaskSource. Useful to carry a
|
||||
// RegisteredTaskSource with an associated Transaction.
|
||||
// TODO(crbug.com/839091): Rename to RegisteredTaskSourceAndTransaction.
|
||||
struct BASE_EXPORT TransactionWithRegisteredTaskSource {
|
||||
public:
|
||||
TransactionWithRegisteredTaskSource(RegisteredTaskSource task_source_in,
|
||||
TaskSource::Transaction transaction_in);
|
||||
|
||||
TransactionWithRegisteredTaskSource(
|
||||
TransactionWithRegisteredTaskSource&& other) = default;
|
||||
~TransactionWithRegisteredTaskSource() = default;
|
||||
|
||||
static TransactionWithRegisteredTaskSource FromTaskSource(
|
||||
RegisteredTaskSource task_source_in);
|
||||
|
||||
RegisteredTaskSource task_source;
|
||||
TaskSource::Transaction transaction;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(TransactionWithRegisteredTaskSource);
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
} // namespace base
|
||||
|
||||
#endif // BASE_TASK_THREAD_POOL_TASK_SOURCE_H_
|
||||
|
|
@ -0,0 +1,797 @@
|
|||
// Copyright 2016 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "base/task/thread_pool/task_tracker.h"
|
||||
|
||||
#include <atomic>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "base/base_switches.h"
|
||||
#include "base/callback.h"
|
||||
#include "base/command_line.h"
|
||||
#include "base/compiler_specific.h"
|
||||
#include "base/debug/alias.h"
|
||||
#include "base/json/json_writer.h"
|
||||
#include "base/memory/ptr_util.h"
|
||||
#include "base/metrics/histogram_macros.h"
|
||||
#include "base/optional.h"
|
||||
#include "base/sequence_token.h"
|
||||
#include "base/synchronization/condition_variable.h"
|
||||
#include "base/task/scoped_set_task_priority_for_current_thread.h"
|
||||
#include "base/task/task_executor.h"
|
||||
#include "base/threading/sequence_local_storage_map.h"
|
||||
#include "base/threading/sequenced_task_runner_handle.h"
|
||||
#include "base/threading/thread_restrictions.h"
|
||||
#include "base/threading/thread_task_runner_handle.h"
|
||||
#include "base/time/time.h"
|
||||
#include "base/trace_event/trace_event.h"
|
||||
#include "base/values.h"
|
||||
#include "build/build_config.h"
|
||||
|
||||
namespace base {
|
||||
namespace internal {
|
||||
|
||||
namespace {
|
||||
|
||||
constexpr const char* kExecutionModeString[] = {"parallel", "sequenced",
|
||||
"single thread", "job"};
|
||||
static_assert(
|
||||
size(kExecutionModeString) ==
|
||||
static_cast<size_t>(TaskSourceExecutionMode::kMax) + 1,
|
||||
"Array kExecutionModeString is out of sync with TaskSourceExecutionMode.");
|
||||
|
||||
// An immutable copy of a thread pool task's info required by tracing.
|
||||
class TaskTracingInfo : public trace_event::ConvertableToTraceFormat {
|
||||
public:
|
||||
TaskTracingInfo(const TaskTraits& task_traits,
|
||||
const char* execution_mode,
|
||||
const SequenceToken& sequence_token)
|
||||
: task_traits_(task_traits),
|
||||
execution_mode_(execution_mode),
|
||||
sequence_token_(sequence_token) {}
|
||||
|
||||
// trace_event::ConvertableToTraceFormat implementation.
|
||||
void AppendAsTraceFormat(std::string* out) const override;
|
||||
|
||||
private:
|
||||
const TaskTraits task_traits_;
|
||||
const char* const execution_mode_;
|
||||
const SequenceToken sequence_token_;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(TaskTracingInfo);
|
||||
};
|
||||
|
||||
void TaskTracingInfo::AppendAsTraceFormat(std::string* out) const {
|
||||
DictionaryValue dict;
|
||||
|
||||
dict.SetStringKey("task_priority",
|
||||
base::TaskPriorityToString(task_traits_.priority()));
|
||||
dict.SetStringKey("execution_mode", execution_mode_);
|
||||
if (sequence_token_.IsValid())
|
||||
dict.SetIntKey("sequence_token", sequence_token_.ToInternalValue());
|
||||
|
||||
std::string tmp;
|
||||
JSONWriter::Write(dict, &tmp);
|
||||
out->append(tmp);
|
||||
}
|
||||
|
||||
// Constructs a histogram to track latency which is logging to
|
||||
// "ThreadPool.{histogram_name}.{histogram_label}.{task_type_suffix}".
|
||||
HistogramBase* GetLatencyHistogram(StringPiece histogram_name,
|
||||
StringPiece histogram_label,
|
||||
StringPiece task_type_suffix) {
|
||||
DCHECK(!histogram_name.empty());
|
||||
DCHECK(!task_type_suffix.empty());
|
||||
|
||||
if (histogram_label.empty())
|
||||
return nullptr;
|
||||
|
||||
// Mimics the UMA_HISTOGRAM_HIGH_RESOLUTION_CUSTOM_TIMES macro. The minimums
|
||||
// and maximums were chosen to place the 1ms mark at around the 70% range
|
||||
// coverage for buckets giving us good info for tasks that have a latency
|
||||
// below 1ms (most of them) and enough info to assess how bad the latency is
|
||||
// for tasks that exceed this threshold.
|
||||
const std::string histogram = JoinString(
|
||||
{"ThreadPool", histogram_name, histogram_label, task_type_suffix}, ".");
|
||||
return Histogram::FactoryMicrosecondsTimeGet(
|
||||
histogram, TimeDelta::FromMicroseconds(1),
|
||||
TimeDelta::FromMilliseconds(20), 50,
|
||||
HistogramBase::kUmaTargetedHistogramFlag);
|
||||
}
|
||||
|
||||
// Constructs a histogram to track task count which is logging to
|
||||
// "ThreadPool.{histogram_name}.{histogram_label}.{task_type_suffix}".
|
||||
HistogramBase* GetCountHistogram(StringPiece histogram_name,
|
||||
StringPiece histogram_label,
|
||||
StringPiece task_type_suffix) {
|
||||
DCHECK(!histogram_name.empty());
|
||||
DCHECK(!task_type_suffix.empty());
|
||||
|
||||
if (histogram_label.empty())
|
||||
return nullptr;
|
||||
|
||||
// Mimics the UMA_HISTOGRAM_CUSTOM_COUNTS macro.
|
||||
const std::string histogram = JoinString(
|
||||
{"ThreadPool", histogram_name, histogram_label, task_type_suffix}, ".");
|
||||
// 500 was chosen as the maximum number of tasks run while queuing because
|
||||
// values this high would likely indicate an error, beyond which knowing the
|
||||
// actual number of tasks is not informative.
|
||||
return Histogram::FactoryGet(histogram, 1, 500, 50,
|
||||
HistogramBase::kUmaTargetedHistogramFlag);
|
||||
}
|
||||
|
||||
// Returns a histogram stored in an array indexed by task priority.
|
||||
// TODO(jessemckenna): use the STATIC_HISTOGRAM_POINTER_GROUP macro from
|
||||
// histogram_macros.h instead.
|
||||
HistogramBase* GetHistogramForTaskPriority(TaskPriority task_priority,
|
||||
HistogramBase* const histograms[3]) {
|
||||
return histograms[static_cast<int>(task_priority)];
|
||||
}
|
||||
|
||||
bool HasLogBestEffortTasksSwitch() {
|
||||
// The CommandLine might not be initialized if ThreadPool is initialized in a
|
||||
// dynamic library which doesn't have access to argc/argv.
|
||||
return CommandLine::InitializedForCurrentProcess() &&
|
||||
CommandLine::ForCurrentProcess()->HasSwitch(
|
||||
switches::kLogBestEffortTasks);
|
||||
}
|
||||
|
||||
// Needed for PostTaskHere and CurrentThread. This executor lives for the
|
||||
// duration of a threadpool task invocation.
|
||||
class EphemeralTaskExecutor : public TaskExecutor {
|
||||
public:
|
||||
// |sequenced_task_runner| and |single_thread_task_runner| must outlive this
|
||||
// EphemeralTaskExecutor.
|
||||
EphemeralTaskExecutor(SequencedTaskRunner* sequenced_task_runner,
|
||||
SingleThreadTaskRunner* single_thread_task_runner,
|
||||
const TaskTraits* sequence_traits)
|
||||
: sequenced_task_runner_(sequenced_task_runner),
|
||||
single_thread_task_runner_(single_thread_task_runner),
|
||||
sequence_traits_(sequence_traits) {
|
||||
SetTaskExecutorForCurrentThread(this);
|
||||
}
|
||||
|
||||
~EphemeralTaskExecutor() override {
|
||||
SetTaskExecutorForCurrentThread(nullptr);
|
||||
}
|
||||
|
||||
// TaskExecutor:
|
||||
bool PostDelayedTask(const Location& from_here,
|
||||
const TaskTraits& traits,
|
||||
OnceClosure task,
|
||||
TimeDelta delay) override {
|
||||
CheckTraitsCompatibleWithSequenceTraits(traits);
|
||||
return sequenced_task_runner_->PostDelayedTask(from_here, std::move(task),
|
||||
delay);
|
||||
}
|
||||
|
||||
scoped_refptr<TaskRunner> CreateTaskRunner(
|
||||
const TaskTraits& traits) override {
|
||||
CheckTraitsCompatibleWithSequenceTraits(traits);
|
||||
return sequenced_task_runner_;
|
||||
}
|
||||
|
||||
scoped_refptr<SequencedTaskRunner> CreateSequencedTaskRunner(
|
||||
const TaskTraits& traits) override {
|
||||
CheckTraitsCompatibleWithSequenceTraits(traits);
|
||||
return sequenced_task_runner_;
|
||||
}
|
||||
|
||||
scoped_refptr<SingleThreadTaskRunner> CreateSingleThreadTaskRunner(
|
||||
const TaskTraits& traits,
|
||||
SingleThreadTaskRunnerThreadMode thread_mode) override {
|
||||
CheckTraitsCompatibleWithSequenceTraits(traits);
|
||||
return single_thread_task_runner_;
|
||||
}
|
||||
|
||||
#if defined(OS_WIN)
|
||||
scoped_refptr<SingleThreadTaskRunner> CreateCOMSTATaskRunner(
|
||||
const TaskTraits& traits,
|
||||
SingleThreadTaskRunnerThreadMode thread_mode) override {
|
||||
CheckTraitsCompatibleWithSequenceTraits(traits);
|
||||
return single_thread_task_runner_;
|
||||
}
|
||||
#endif // defined(OS_WIN)
|
||||
|
||||
private:
|
||||
// Currently ignores |traits.priority()|.
|
||||
void CheckTraitsCompatibleWithSequenceTraits(const TaskTraits& traits) {
|
||||
if (traits.shutdown_behavior_set_explicitly()) {
|
||||
DCHECK_EQ(traits.shutdown_behavior(),
|
||||
sequence_traits_->shutdown_behavior());
|
||||
}
|
||||
|
||||
DCHECK(!traits.may_block() ||
|
||||
traits.may_block() == sequence_traits_->may_block());
|
||||
|
||||
DCHECK(!traits.with_base_sync_primitives() ||
|
||||
traits.with_base_sync_primitives() ==
|
||||
sequence_traits_->with_base_sync_primitives());
|
||||
}
|
||||
|
||||
SequencedTaskRunner* const sequenced_task_runner_;
|
||||
SingleThreadTaskRunner* const single_thread_task_runner_;
|
||||
const TaskTraits* const sequence_traits_;
|
||||
};
|
||||
|
||||
} // namespace
|
||||
|
||||
// Atomic internal state used by TaskTracker to track items that are blocking
|
||||
// Shutdown. An "item" consist of either:
|
||||
// - A running SKIP_ON_SHUTDOWN task
|
||||
// - A queued/running BLOCK_SHUTDOWN TaskSource.
|
||||
// Sequential consistency shouldn't be assumed from these calls (i.e. a thread
|
||||
// reading |HasShutdownStarted() == true| isn't guaranteed to see all writes
|
||||
// made before |StartShutdown()| on the thread that invoked it).
|
||||
class TaskTracker::State {
|
||||
public:
|
||||
State() = default;
|
||||
|
||||
// Sets a flag indicating that shutdown has started. Returns true if there are
|
||||
// items blocking shutdown. Can only be called once.
|
||||
bool StartShutdown() {
|
||||
const auto new_value =
|
||||
subtle::NoBarrier_AtomicIncrement(&bits_, kShutdownHasStartedMask);
|
||||
|
||||
// Check that the "shutdown has started" bit isn't zero. This would happen
|
||||
// if it was incremented twice.
|
||||
DCHECK(new_value & kShutdownHasStartedMask);
|
||||
|
||||
const auto num_items_blocking_shutdown =
|
||||
new_value >> kNumItemsBlockingShutdownBitOffset;
|
||||
return num_items_blocking_shutdown != 0;
|
||||
}
|
||||
|
||||
// Returns true if shutdown has started.
|
||||
bool HasShutdownStarted() const {
|
||||
return subtle::NoBarrier_Load(&bits_) & kShutdownHasStartedMask;
|
||||
}
|
||||
|
||||
// Returns true if there are items blocking shutdown.
|
||||
bool AreItemsBlockingShutdown() const {
|
||||
const auto num_items_blocking_shutdown =
|
||||
subtle::NoBarrier_Load(&bits_) >> kNumItemsBlockingShutdownBitOffset;
|
||||
DCHECK_GE(num_items_blocking_shutdown, 0);
|
||||
return num_items_blocking_shutdown != 0;
|
||||
}
|
||||
|
||||
// Increments the number of items blocking shutdown. Returns true if
|
||||
// shutdown has started.
|
||||
bool IncrementNumItemsBlockingShutdown() {
|
||||
#if DCHECK_IS_ON()
|
||||
// Verify that no overflow will occur.
|
||||
const auto num_items_blocking_shutdown =
|
||||
subtle::NoBarrier_Load(&bits_) >> kNumItemsBlockingShutdownBitOffset;
|
||||
DCHECK_LT(num_items_blocking_shutdown,
|
||||
std::numeric_limits<subtle::Atomic32>::max() -
|
||||
kNumItemsBlockingShutdownIncrement);
|
||||
#endif
|
||||
|
||||
const auto new_bits = subtle::NoBarrier_AtomicIncrement(
|
||||
&bits_, kNumItemsBlockingShutdownIncrement);
|
||||
return new_bits & kShutdownHasStartedMask;
|
||||
}
|
||||
|
||||
// Decrements the number of items blocking shutdown. Returns true if shutdown
|
||||
// has started and the number of tasks blocking shutdown becomes zero.
|
||||
bool DecrementNumItemsBlockingShutdown() {
|
||||
const auto new_bits = subtle::NoBarrier_AtomicIncrement(
|
||||
&bits_, -kNumItemsBlockingShutdownIncrement);
|
||||
const bool shutdown_has_started = new_bits & kShutdownHasStartedMask;
|
||||
const auto num_items_blocking_shutdown =
|
||||
new_bits >> kNumItemsBlockingShutdownBitOffset;
|
||||
DCHECK_GE(num_items_blocking_shutdown, 0);
|
||||
return shutdown_has_started && num_items_blocking_shutdown == 0;
|
||||
}
|
||||
|
||||
private:
|
||||
static constexpr subtle::Atomic32 kShutdownHasStartedMask = 1;
|
||||
static constexpr subtle::Atomic32 kNumItemsBlockingShutdownBitOffset = 1;
|
||||
static constexpr subtle::Atomic32 kNumItemsBlockingShutdownIncrement =
|
||||
1 << kNumItemsBlockingShutdownBitOffset;
|
||||
|
||||
// The LSB indicates whether shutdown has started. The other bits count the
|
||||
// number of items blocking shutdown.
|
||||
// No barriers are required to read/write |bits_| as this class is only used
|
||||
// as an atomic state checker, it doesn't provide sequential consistency
|
||||
// guarantees w.r.t. external state. Sequencing of the TaskTracker::State
|
||||
// operations themselves is guaranteed by the AtomicIncrement RMW (read-
|
||||
// modify-write) semantics however. For example, if two threads are racing to
|
||||
// call IncrementNumItemsBlockingShutdown() and StartShutdown() respectively,
|
||||
// either the first thread will win and the StartShutdown() call will see the
|
||||
// blocking task or the second thread will win and
|
||||
// IncrementNumItemsBlockingShutdown() will know that shutdown has started.
|
||||
subtle::Atomic32 bits_ = 0;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(State);
|
||||
};
|
||||
|
||||
// TODO(jessemckenna): Write a helper function to avoid code duplication below.
|
||||
TaskTracker::TaskTracker(StringPiece histogram_label)
|
||||
: histogram_label_(histogram_label),
|
||||
has_log_best_effort_tasks_switch_(HasLogBestEffortTasksSwitch()),
|
||||
state_(new State),
|
||||
can_run_policy_(CanRunPolicy::kAll),
|
||||
flush_cv_(flush_lock_.CreateConditionVariable()),
|
||||
shutdown_lock_(&flush_lock_),
|
||||
task_latency_histograms_{GetLatencyHistogram("TaskLatencyMicroseconds",
|
||||
histogram_label,
|
||||
"BackgroundTaskPriority"),
|
||||
GetLatencyHistogram("TaskLatencyMicroseconds",
|
||||
histogram_label,
|
||||
"UserVisibleTaskPriority"),
|
||||
GetLatencyHistogram("TaskLatencyMicroseconds",
|
||||
histogram_label,
|
||||
"UserBlockingTaskPriority")},
|
||||
heartbeat_latency_histograms_{
|
||||
GetLatencyHistogram("HeartbeatLatencyMicroseconds",
|
||||
histogram_label,
|
||||
"BackgroundTaskPriority"),
|
||||
GetLatencyHistogram("HeartbeatLatencyMicroseconds",
|
||||
histogram_label,
|
||||
"UserVisibleTaskPriority"),
|
||||
GetLatencyHistogram("HeartbeatLatencyMicroseconds",
|
||||
histogram_label,
|
||||
"UserBlockingTaskPriority")},
|
||||
num_tasks_run_while_queuing_histograms_{
|
||||
GetCountHistogram("NumTasksRunWhileQueuing",
|
||||
histogram_label,
|
||||
"BackgroundTaskPriority"),
|
||||
GetCountHistogram("NumTasksRunWhileQueuing",
|
||||
histogram_label,
|
||||
"UserVisibleTaskPriority"),
|
||||
GetCountHistogram("NumTasksRunWhileQueuing",
|
||||
histogram_label,
|
||||
"UserBlockingTaskPriority")},
|
||||
tracked_ref_factory_(this) {}
|
||||
|
||||
TaskTracker::~TaskTracker() = default;
|
||||
|
||||
void TaskTracker::StartShutdown() {
|
||||
CheckedAutoLock auto_lock(shutdown_lock_);
|
||||
|
||||
// This method can only be called once.
|
||||
DCHECK(!shutdown_event_);
|
||||
DCHECK(!state_->HasShutdownStarted());
|
||||
|
||||
shutdown_event_ = std::make_unique<WaitableEvent>();
|
||||
|
||||
const bool tasks_are_blocking_shutdown = state_->StartShutdown();
|
||||
|
||||
// From now, if a thread causes the number of tasks blocking shutdown to
|
||||
// become zero, it will call OnBlockingShutdownTasksComplete().
|
||||
|
||||
if (!tasks_are_blocking_shutdown) {
|
||||
// If another thread posts a BLOCK_SHUTDOWN task at this moment, it will
|
||||
// block until this method releases |shutdown_lock_|. Then, it will fail
|
||||
// DCHECK(!shutdown_event_->IsSignaled()). This is the desired behavior
|
||||
// because posting a BLOCK_SHUTDOWN task after StartShutdown() when no
|
||||
// tasks are blocking shutdown isn't allowed.
|
||||
shutdown_event_->Signal();
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
void TaskTracker::CompleteShutdown() {
|
||||
// It is safe to access |shutdown_event_| without holding |lock_| because the
|
||||
// pointer never changes after being set by StartShutdown(), which must
|
||||
// happen-before before this.
|
||||
DCHECK(TS_UNCHECKED_READ(shutdown_event_));
|
||||
{
|
||||
base::ScopedAllowBaseSyncPrimitives allow_wait;
|
||||
TS_UNCHECKED_READ(shutdown_event_)->Wait();
|
||||
}
|
||||
|
||||
// Unblock FlushForTesting() and perform the FlushAsyncForTesting callback
|
||||
// when shutdown completes.
|
||||
{
|
||||
CheckedAutoLock auto_lock(flush_lock_);
|
||||
flush_cv_->Signal();
|
||||
}
|
||||
CallFlushCallbackForTesting();
|
||||
}
|
||||
|
||||
void TaskTracker::FlushForTesting() {
|
||||
CheckedAutoLock auto_lock(flush_lock_);
|
||||
while (num_incomplete_task_sources_.load(std::memory_order_acquire) != 0 &&
|
||||
!IsShutdownComplete()) {
|
||||
flush_cv_->Wait();
|
||||
}
|
||||
}
|
||||
|
||||
void TaskTracker::FlushAsyncForTesting(OnceClosure flush_callback) {
|
||||
DCHECK(flush_callback);
|
||||
{
|
||||
CheckedAutoLock auto_lock(flush_lock_);
|
||||
DCHECK(!flush_callback_for_testing_)
|
||||
<< "Only one FlushAsyncForTesting() may be pending at any time.";
|
||||
flush_callback_for_testing_ = std::move(flush_callback);
|
||||
}
|
||||
|
||||
if (num_incomplete_task_sources_.load(std::memory_order_acquire) == 0 ||
|
||||
IsShutdownComplete()) {
|
||||
CallFlushCallbackForTesting();
|
||||
}
|
||||
}
|
||||
|
||||
void TaskTracker::SetCanRunPolicy(CanRunPolicy can_run_policy) {
|
||||
can_run_policy_.store(can_run_policy);
|
||||
}
|
||||
|
||||
bool TaskTracker::WillPostTask(Task* task,
|
||||
TaskShutdownBehavior shutdown_behavior) {
|
||||
DCHECK(task);
|
||||
DCHECK(task->task);
|
||||
|
||||
if (state_->HasShutdownStarted()) {
|
||||
// A non BLOCK_SHUTDOWN task is allowed to be posted iff shutdown hasn't
|
||||
// started and the task is not delayed.
|
||||
if (shutdown_behavior != TaskShutdownBehavior::BLOCK_SHUTDOWN ||
|
||||
!task->delayed_run_time.is_null()) {
|
||||
return false;
|
||||
}
|
||||
|
||||
// A BLOCK_SHUTDOWN task posted after shutdown has completed is an
|
||||
// ordering bug. This aims to catch those early.
|
||||
CheckedAutoLock auto_lock(shutdown_lock_);
|
||||
DCHECK(shutdown_event_);
|
||||
DCHECK(!shutdown_event_->IsSignaled());
|
||||
}
|
||||
|
||||
// TODO(scheduler-dev): Record the task traits here.
|
||||
task_annotator_.WillQueueTask("ThreadPool_PostTask", task, "");
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool TaskTracker::WillPostTaskNow(const Task& task, TaskPriority priority) {
|
||||
if (!task.delayed_run_time.is_null() && state_->HasShutdownStarted())
|
||||
return false;
|
||||
if (has_log_best_effort_tasks_switch_ &&
|
||||
priority == TaskPriority::BEST_EFFORT) {
|
||||
// A TaskPriority::BEST_EFFORT task is being posted.
|
||||
LOG(INFO) << task.posted_from.ToString();
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
RegisteredTaskSource TaskTracker::RegisterTaskSource(
|
||||
scoped_refptr<TaskSource> task_source) {
|
||||
DCHECK(task_source);
|
||||
|
||||
TaskShutdownBehavior shutdown_behavior = task_source->shutdown_behavior();
|
||||
if (!BeforeQueueTaskSource(shutdown_behavior))
|
||||
return nullptr;
|
||||
|
||||
num_incomplete_task_sources_.fetch_add(1, std::memory_order_relaxed);
|
||||
return RegisteredTaskSource(std::move(task_source), this);
|
||||
}
|
||||
|
||||
bool TaskTracker::CanRunPriority(TaskPriority priority) const {
|
||||
auto can_run_policy = can_run_policy_.load();
|
||||
|
||||
if (can_run_policy == CanRunPolicy::kAll)
|
||||
return true;
|
||||
|
||||
if (can_run_policy == CanRunPolicy::kForegroundOnly &&
|
||||
priority >= TaskPriority::USER_VISIBLE) {
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
RegisteredTaskSource TaskTracker::RunAndPopNextTask(
|
||||
RegisteredTaskSource task_source) {
|
||||
DCHECK(task_source);
|
||||
|
||||
const bool should_run_tasks = BeforeRunTask(task_source->shutdown_behavior());
|
||||
|
||||
// Run the next task in |task_source|.
|
||||
Optional<Task> task;
|
||||
TaskTraits traits;
|
||||
{
|
||||
auto transaction = task_source->BeginTransaction();
|
||||
task = should_run_tasks ? task_source.TakeTask(&transaction)
|
||||
: task_source.Clear(&transaction);
|
||||
traits = transaction.traits();
|
||||
}
|
||||
|
||||
if (task) {
|
||||
// Run the |task| (whether it's a worker task or the Clear() closure).
|
||||
RunTask(std::move(task.value()), task_source.get(), traits);
|
||||
}
|
||||
if (should_run_tasks)
|
||||
AfterRunTask(task_source->shutdown_behavior());
|
||||
const bool task_source_must_be_queued = task_source.DidProcessTask();
|
||||
// |task_source| should be reenqueued iff requested by DidProcessTask().
|
||||
if (task_source_must_be_queued)
|
||||
return task_source;
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
bool TaskTracker::HasShutdownStarted() const {
|
||||
return state_->HasShutdownStarted();
|
||||
}
|
||||
|
||||
bool TaskTracker::IsShutdownComplete() const {
|
||||
CheckedAutoLock auto_lock(shutdown_lock_);
|
||||
return shutdown_event_ && shutdown_event_->IsSignaled();
|
||||
}
|
||||
|
||||
void TaskTracker::RecordLatencyHistogram(TaskPriority priority,
|
||||
TimeTicks posted_time) const {
|
||||
if (histogram_label_.empty())
|
||||
return;
|
||||
|
||||
const TimeDelta task_latency = TimeTicks::Now() - posted_time;
|
||||
GetHistogramForTaskPriority(priority, task_latency_histograms_)
|
||||
->AddTimeMicrosecondsGranularity(task_latency);
|
||||
}
|
||||
|
||||
void TaskTracker::RecordHeartbeatLatencyAndTasksRunWhileQueuingHistograms(
|
||||
TaskPriority priority,
|
||||
TimeTicks posted_time,
|
||||
int num_tasks_run_when_posted) const {
|
||||
if (histogram_label_.empty())
|
||||
return;
|
||||
|
||||
const TimeDelta task_latency = TimeTicks::Now() - posted_time;
|
||||
GetHistogramForTaskPriority(priority, heartbeat_latency_histograms_)
|
||||
->AddTimeMicrosecondsGranularity(task_latency);
|
||||
|
||||
GetHistogramForTaskPriority(priority, num_tasks_run_while_queuing_histograms_)
|
||||
->Add(GetNumTasksRun() - num_tasks_run_when_posted);
|
||||
}
|
||||
|
||||
int TaskTracker::GetNumTasksRun() const {
|
||||
return num_tasks_run_.load(std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
void TaskTracker::IncrementNumTasksRun() {
|
||||
num_tasks_run_.fetch_add(1, std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
void TaskTracker::RunTask(Task task,
|
||||
TaskSource* task_source,
|
||||
const TaskTraits& traits) {
|
||||
DCHECK(task_source);
|
||||
RecordLatencyHistogram(traits.priority(), task.queue_time);
|
||||
|
||||
const auto environment = task_source->GetExecutionEnvironment();
|
||||
|
||||
const bool previous_singleton_allowed =
|
||||
ThreadRestrictions::SetSingletonAllowed(
|
||||
traits.shutdown_behavior() !=
|
||||
TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN);
|
||||
const bool previous_io_allowed =
|
||||
ThreadRestrictions::SetIOAllowed(traits.may_block());
|
||||
const bool previous_wait_allowed =
|
||||
ThreadRestrictions::SetWaitAllowed(traits.with_base_sync_primitives());
|
||||
|
||||
{
|
||||
DCHECK(environment.token.IsValid());
|
||||
ScopedSetSequenceTokenForCurrentThread
|
||||
scoped_set_sequence_token_for_current_thread(environment.token);
|
||||
ScopedSetTaskPriorityForCurrentThread
|
||||
scoped_set_task_priority_for_current_thread(traits.priority());
|
||||
|
||||
// Local storage map used if none is provided by |environment|.
|
||||
Optional<SequenceLocalStorageMap> local_storage_map;
|
||||
if (!environment.sequence_local_storage)
|
||||
local_storage_map.emplace();
|
||||
|
||||
ScopedSetSequenceLocalStorageMapForCurrentThread
|
||||
scoped_set_sequence_local_storage_map_for_current_thread(
|
||||
environment.sequence_local_storage
|
||||
? environment.sequence_local_storage
|
||||
: &local_storage_map.value());
|
||||
|
||||
// Set up TaskRunnerHandle as expected for the scope of the task.
|
||||
Optional<SequencedTaskRunnerHandle> sequenced_task_runner_handle;
|
||||
Optional<ThreadTaskRunnerHandle> single_thread_task_runner_handle;
|
||||
Optional<EphemeralTaskExecutor> ephemeral_task_executor;
|
||||
switch (task_source->execution_mode()) {
|
||||
case TaskSourceExecutionMode::kJob:
|
||||
case TaskSourceExecutionMode::kParallel:
|
||||
break;
|
||||
case TaskSourceExecutionMode::kSequenced:
|
||||
DCHECK(task_source->task_runner());
|
||||
sequenced_task_runner_handle.emplace(
|
||||
static_cast<SequencedTaskRunner*>(task_source->task_runner()));
|
||||
ephemeral_task_executor.emplace(
|
||||
static_cast<SequencedTaskRunner*>(task_source->task_runner()),
|
||||
nullptr, &traits);
|
||||
break;
|
||||
case TaskSourceExecutionMode::kSingleThread:
|
||||
DCHECK(task_source->task_runner());
|
||||
single_thread_task_runner_handle.emplace(
|
||||
static_cast<SingleThreadTaskRunner*>(task_source->task_runner()));
|
||||
ephemeral_task_executor.emplace(
|
||||
static_cast<SequencedTaskRunner*>(task_source->task_runner()),
|
||||
static_cast<SingleThreadTaskRunner*>(task_source->task_runner()),
|
||||
&traits);
|
||||
break;
|
||||
}
|
||||
|
||||
TRACE_TASK_EXECUTION("ThreadPool_RunTask", task);
|
||||
|
||||
// TODO(gab): In a better world this would be tacked on as an extra arg
|
||||
// to the trace event generated above. This is not possible however until
|
||||
// http://crbug.com/652692 is resolved.
|
||||
TRACE_EVENT1("thread_pool", "ThreadPool_TaskInfo", "task_info",
|
||||
std::make_unique<TaskTracingInfo>(
|
||||
traits,
|
||||
kExecutionModeString[static_cast<size_t>(
|
||||
task_source->execution_mode())],
|
||||
environment.token));
|
||||
|
||||
RunTaskWithShutdownBehavior(traits.shutdown_behavior(), &task);
|
||||
|
||||
// Make sure the arguments bound to the callback are deleted within the
|
||||
// scope in which the callback runs.
|
||||
task.task = OnceClosure();
|
||||
}
|
||||
|
||||
ThreadRestrictions::SetWaitAllowed(previous_wait_allowed);
|
||||
ThreadRestrictions::SetIOAllowed(previous_io_allowed);
|
||||
ThreadRestrictions::SetSingletonAllowed(previous_singleton_allowed);
|
||||
}
|
||||
|
||||
bool TaskTracker::HasIncompleteTaskSourcesForTesting() const {
|
||||
return num_incomplete_task_sources_.load(std::memory_order_acquire) != 0;
|
||||
}
|
||||
|
||||
bool TaskTracker::BeforeQueueTaskSource(
|
||||
TaskShutdownBehavior shutdown_behavior) {
|
||||
if (shutdown_behavior == TaskShutdownBehavior::BLOCK_SHUTDOWN) {
|
||||
// BLOCK_SHUTDOWN task sources block shutdown between the moment they are
|
||||
// queued and the moment their last task completes its execution.
|
||||
const bool shutdown_started = state_->IncrementNumItemsBlockingShutdown();
|
||||
|
||||
if (shutdown_started) {
|
||||
// A BLOCK_SHUTDOWN task posted after shutdown has completed is an
|
||||
// ordering bug. This aims to catch those early.
|
||||
CheckedAutoLock auto_lock(shutdown_lock_);
|
||||
DCHECK(shutdown_event_);
|
||||
DCHECK(!shutdown_event_->IsSignaled());
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
// A non BLOCK_SHUTDOWN task is allowed to be posted iff shutdown hasn't
|
||||
// started.
|
||||
return !state_->HasShutdownStarted();
|
||||
}
|
||||
|
||||
bool TaskTracker::BeforeRunTask(TaskShutdownBehavior shutdown_behavior) {
|
||||
switch (shutdown_behavior) {
|
||||
case TaskShutdownBehavior::BLOCK_SHUTDOWN: {
|
||||
// The number of tasks blocking shutdown has been incremented when the
|
||||
// task was posted.
|
||||
DCHECK(state_->AreItemsBlockingShutdown());
|
||||
|
||||
// Trying to run a BLOCK_SHUTDOWN task after shutdown has completed is
|
||||
// unexpected as it either shouldn't have been posted if shutdown
|
||||
// completed or should be blocking shutdown if it was posted before it
|
||||
// did.
|
||||
DCHECK(!state_->HasShutdownStarted() || !IsShutdownComplete());
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
case TaskShutdownBehavior::SKIP_ON_SHUTDOWN: {
|
||||
// SKIP_ON_SHUTDOWN tasks block shutdown while they are running.
|
||||
const bool shutdown_started = state_->IncrementNumItemsBlockingShutdown();
|
||||
|
||||
if (shutdown_started) {
|
||||
// The SKIP_ON_SHUTDOWN task isn't allowed to run during shutdown.
|
||||
// Decrement the number of tasks blocking shutdown that was wrongly
|
||||
// incremented.
|
||||
DecrementNumItemsBlockingShutdown();
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
case TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN: {
|
||||
return !state_->HasShutdownStarted();
|
||||
}
|
||||
}
|
||||
|
||||
NOTREACHED();
|
||||
return false;
|
||||
}
|
||||
|
||||
void TaskTracker::AfterRunTask(TaskShutdownBehavior shutdown_behavior) {
|
||||
IncrementNumTasksRun();
|
||||
if (shutdown_behavior == TaskShutdownBehavior::SKIP_ON_SHUTDOWN) {
|
||||
DecrementNumItemsBlockingShutdown();
|
||||
}
|
||||
}
|
||||
|
||||
scoped_refptr<TaskSource> TaskTracker::UnregisterTaskSource(
|
||||
scoped_refptr<TaskSource> task_source) {
|
||||
DCHECK(task_source);
|
||||
if (task_source->shutdown_behavior() ==
|
||||
TaskShutdownBehavior::BLOCK_SHUTDOWN) {
|
||||
DecrementNumItemsBlockingShutdown();
|
||||
}
|
||||
DecrementNumIncompleteTaskSources();
|
||||
return task_source;
|
||||
}
|
||||
|
||||
void TaskTracker::DecrementNumItemsBlockingShutdown() {
|
||||
const bool shutdown_started_and_no_items_block_shutdown =
|
||||
state_->DecrementNumItemsBlockingShutdown();
|
||||
if (!shutdown_started_and_no_items_block_shutdown)
|
||||
return;
|
||||
|
||||
CheckedAutoLock auto_lock(shutdown_lock_);
|
||||
DCHECK(shutdown_event_);
|
||||
shutdown_event_->Signal();
|
||||
}
|
||||
|
||||
void TaskTracker::DecrementNumIncompleteTaskSources() {
|
||||
const auto prev_num_incomplete_task_sources =
|
||||
num_incomplete_task_sources_.fetch_sub(1);
|
||||
DCHECK_GE(prev_num_incomplete_task_sources, 1);
|
||||
if (prev_num_incomplete_task_sources == 1) {
|
||||
{
|
||||
CheckedAutoLock auto_lock(flush_lock_);
|
||||
flush_cv_->Signal();
|
||||
}
|
||||
CallFlushCallbackForTesting();
|
||||
}
|
||||
}
|
||||
|
||||
void TaskTracker::CallFlushCallbackForTesting() {
|
||||
OnceClosure flush_callback;
|
||||
{
|
||||
CheckedAutoLock auto_lock(flush_lock_);
|
||||
flush_callback = std::move(flush_callback_for_testing_);
|
||||
}
|
||||
if (flush_callback)
|
||||
std::move(flush_callback).Run();
|
||||
}
|
||||
|
||||
NOINLINE void TaskTracker::RunContinueOnShutdown(Task* task) {
|
||||
const int line_number = __LINE__;
|
||||
task_annotator_.RunTask("ThreadPool_RunTask_ContinueOnShutdown", task);
|
||||
base::debug::Alias(&line_number);
|
||||
}
|
||||
|
||||
NOINLINE void TaskTracker::RunSkipOnShutdown(Task* task) {
|
||||
const int line_number = __LINE__;
|
||||
task_annotator_.RunTask("ThreadPool_RunTask_SkipOnShutdown", task);
|
||||
base::debug::Alias(&line_number);
|
||||
}
|
||||
|
||||
NOINLINE void TaskTracker::RunBlockShutdown(Task* task) {
|
||||
const int line_number = __LINE__;
|
||||
task_annotator_.RunTask("ThreadPool_RunTask_BlockShutdown", task);
|
||||
base::debug::Alias(&line_number);
|
||||
}
|
||||
|
||||
void TaskTracker::RunTaskWithShutdownBehavior(
|
||||
TaskShutdownBehavior shutdown_behavior,
|
||||
Task* task) {
|
||||
switch (shutdown_behavior) {
|
||||
case TaskShutdownBehavior::CONTINUE_ON_SHUTDOWN:
|
||||
RunContinueOnShutdown(task);
|
||||
return;
|
||||
case TaskShutdownBehavior::SKIP_ON_SHUTDOWN:
|
||||
RunSkipOnShutdown(task);
|
||||
return;
|
||||
case TaskShutdownBehavior::BLOCK_SHUTDOWN:
|
||||
RunBlockShutdown(task);
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace base
|
||||
|
|
@ -0,0 +1,296 @@
|
|||
// Copyright 2016 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_TASK_THREAD_POOL_TASK_TRACKER_H_
|
||||
#define BASE_TASK_THREAD_POOL_TASK_TRACKER_H_
|
||||
|
||||
#include <atomic>
|
||||
#include <functional>
|
||||
#include <limits>
|
||||
#include <memory>
|
||||
#include <queue>
|
||||
|
||||
#include "base/atomicops.h"
|
||||
#include "base/base_export.h"
|
||||
#include "base/callback_forward.h"
|
||||
#include "base/logging.h"
|
||||
#include "base/macros.h"
|
||||
#include "base/metrics/histogram_base.h"
|
||||
#include "base/sequence_checker.h"
|
||||
#include "base/strings/string_piece.h"
|
||||
#include "base/synchronization/waitable_event.h"
|
||||
#include "base/task/common/checked_lock.h"
|
||||
#include "base/task/common/task_annotator.h"
|
||||
#include "base/task/task_traits.h"
|
||||
#include "base/task/thread_pool/task.h"
|
||||
#include "base/task/thread_pool/task_source.h"
|
||||
#include "base/task/thread_pool/tracked_ref.h"
|
||||
#include "base/thread_annotations.h"
|
||||
|
||||
namespace base {
|
||||
|
||||
class ConditionVariable;
|
||||
|
||||
namespace internal {
|
||||
|
||||
// Determines which tasks are allowed to run.
|
||||
enum class CanRunPolicy {
|
||||
// All tasks are allowed to run.
|
||||
kAll,
|
||||
// Only USER_VISIBLE and USER_BLOCKING tasks are allowed to run.
|
||||
kForegroundOnly,
|
||||
// No tasks can run.
|
||||
kNone,
|
||||
};
|
||||
|
||||
// TaskTracker enforces policies that determines whether:
|
||||
// - A task can be pushed to a task source (WillPostTask).
|
||||
// - A task source can be queued (WillQueueTaskSource).
|
||||
// - Tasks for a given priority can run (CanRunPriority).
|
||||
// - The next task in a queued task source can run (RunAndPopNextTask).
|
||||
// TaskTracker also sets up the environment to run a task (RunAndPopNextTask)
|
||||
// and records metrics and trace events. This class is thread-safe.
|
||||
class BASE_EXPORT TaskTracker {
|
||||
public:
|
||||
// |histogram_label| is used to label histograms. No histograms are recorded
|
||||
// if it is empty.
|
||||
TaskTracker(StringPiece histogram_label);
|
||||
|
||||
virtual ~TaskTracker();
|
||||
|
||||
// Initiates shutdown. Once this is called, only BLOCK_SHUTDOWN tasks will
|
||||
// start running (doesn't affect tasks that are already running). This can
|
||||
// only be called once.
|
||||
void StartShutdown();
|
||||
|
||||
// Synchronously completes shutdown. StartShutdown() must be called first.
|
||||
// Returns when:
|
||||
// - All SKIP_ON_SHUTDOWN tasks that were already running have completed their
|
||||
// execution.
|
||||
// - All posted BLOCK_SHUTDOWN tasks have completed their execution.
|
||||
// CONTINUE_ON_SHUTDOWN tasks still may be running after Shutdown returns.
|
||||
// This can only be called once.
|
||||
void CompleteShutdown();
|
||||
|
||||
// Waits until there are no incomplete task sources. May be called in tests
|
||||
// to validate that a condition is met after all task sources have run.
|
||||
//
|
||||
// Does not wait for delayed tasks. Waits for task sources posted from
|
||||
// other threads during the call. Returns immediately when shutdown completes.
|
||||
void FlushForTesting();
|
||||
|
||||
// Returns and calls |flush_callback| when there are no incomplete undelayed
|
||||
// tasks. |flush_callback| may be called back on any thread and should not
|
||||
// perform a lot of work. May be used when additional work on the current
|
||||
// thread needs to be performed during a flush. Only one
|
||||
// FlushAsyncForTesting() may be pending at any given time.
|
||||
void FlushAsyncForTesting(OnceClosure flush_callback);
|
||||
|
||||
// Sets the new CanRunPolicy policy, possibly affecting result of
|
||||
// CanRunPriority(). The caller must wake up worker as appropriate so that
|
||||
// tasks that are allowed to run by the new policy can be scheduled.
|
||||
void SetCanRunPolicy(CanRunPolicy can_run_policy);
|
||||
|
||||
// Informs this TaskTracker that |task| with |shutdown_behavior| is about to
|
||||
// be pushed to a task source (if non-delayed) or be added to the
|
||||
// DelayedTaskManager (if delayed). Returns true if this operation is allowed
|
||||
// (the operation should be performed if-and-only-if it is). This method may
|
||||
// also modify metadata on |task| if desired.
|
||||
bool WillPostTask(Task* task, TaskShutdownBehavior shutdown_behavior);
|
||||
|
||||
// Informs this TaskTracker that |task| that is about to be pushed to a task
|
||||
// source with |priority|. Returns true if this operation is allowed (the
|
||||
// operation should be performed if-and-only-if it is).
|
||||
bool WillPostTaskNow(const Task& task,
|
||||
TaskPriority priority) WARN_UNUSED_RESULT;
|
||||
|
||||
// Informs this TaskTracker that |task_source| is about to be queued. Returns
|
||||
// a RegisteredTaskSource that should be queued if-and-only-if it evaluates to
|
||||
// true.
|
||||
RegisteredTaskSource RegisterTaskSource(
|
||||
scoped_refptr<TaskSource> task_source);
|
||||
|
||||
// Returns true if a task with |priority| can run under to the current policy.
|
||||
bool CanRunPriority(TaskPriority priority) const;
|
||||
|
||||
// Runs the next task in |task_source| unless the current shutdown state
|
||||
// prevents that. Then, pops the task from |task_source| (even if it didn't
|
||||
// run). Returns |task_source| if non-empty after popping a task from it
|
||||
// (which indicates that it should be reenqueued). WillPostTask() must have
|
||||
// allowed the task in front of |task_source| to be posted before this is
|
||||
// called.
|
||||
RegisteredTaskSource RunAndPopNextTask(RegisteredTaskSource task_source);
|
||||
|
||||
// Returns true once shutdown has started (StartShutdown() was called).
|
||||
// Note: sequential consistency with the thread calling StartShutdown() isn't
|
||||
// guaranteed by this call.
|
||||
bool HasShutdownStarted() const;
|
||||
|
||||
// Returns true if shutdown has completed (StartShutdown() was called and
|
||||
// no tasks are blocking shutdown).
|
||||
bool IsShutdownComplete() const;
|
||||
|
||||
// Records two histograms
|
||||
// 1. ThreadPool.[label].HeartbeatLatencyMicroseconds.[suffix]:
|
||||
// Now() - posted_time
|
||||
// 2. ThreadPool.[label].NumTasksRunWhileQueuing.[suffix]:
|
||||
// GetNumTasksRun() - num_tasks_run_when_posted.
|
||||
// [label] is the histogram label provided to the constructor.
|
||||
// [suffix] is derived from |task_priority|.
|
||||
void RecordHeartbeatLatencyAndTasksRunWhileQueuingHistograms(
|
||||
TaskPriority task_priority,
|
||||
TimeTicks posted_time,
|
||||
int num_tasks_run_when_posted) const;
|
||||
|
||||
// Returns the number of tasks run so far
|
||||
int GetNumTasksRun() const;
|
||||
|
||||
TrackedRef<TaskTracker> GetTrackedRef() {
|
||||
return tracked_ref_factory_.GetTrackedRef();
|
||||
}
|
||||
|
||||
// Returns true if there are task sources that haven't completed their
|
||||
// execution (still queued or in progress). If it returns false: the side-
|
||||
// effects of all completed tasks are guaranteed to be visible to the caller.
|
||||
bool HasIncompleteTaskSourcesForTesting() const;
|
||||
|
||||
protected:
|
||||
// Runs and deletes |task|. |task| is deleted in the environment where it
|
||||
// runs. |task_source| is the task source from which |task| was extracted.
|
||||
// |traits| are the traits of |task_source|. An override is expected to call
|
||||
// its parent's implementation but is free to perform extra work before and
|
||||
// after doing so.
|
||||
virtual void RunTask(Task task,
|
||||
TaskSource* task_source,
|
||||
const TaskTraits& traits);
|
||||
|
||||
private:
|
||||
friend class RegisteredTaskSource;
|
||||
class State;
|
||||
|
||||
void PerformShutdown();
|
||||
|
||||
// Called before WillPostTask() informs the tracing system that a task has
|
||||
// been posted. Updates |num_items_blocking_shutdown_| if necessary and
|
||||
// returns true if the current shutdown state allows the task to be posted.
|
||||
bool BeforeQueueTaskSource(TaskShutdownBehavior shutdown_behavior);
|
||||
|
||||
// Called before a task with |effective_shutdown_behavior| is run by
|
||||
// RunTask(). Updates |num_items_blocking_shutdown_| if necessary and returns
|
||||
// true if the current shutdown state allows the task to be run.
|
||||
bool BeforeRunTask(TaskShutdownBehavior shutdown_behavior);
|
||||
|
||||
// Called after a task with |effective_shutdown_behavior| has been run by
|
||||
// RunTask(). Updates |num_items_blocking_shutdown_| if necessary.
|
||||
void AfterRunTask(TaskShutdownBehavior shutdown_behavior);
|
||||
|
||||
// Informs this TaskTracker that |task_source| won't be reenqueued and returns
|
||||
// the underlying TaskSource. This is called before destroying a valid
|
||||
// RegisteredTaskSource. Updates |num_items_blocking_shutdown_| if necessary.
|
||||
scoped_refptr<TaskSource> UnregisterTaskSource(
|
||||
scoped_refptr<TaskSource> task_source);
|
||||
|
||||
// Called when an item blocking shutdown finishes after shutdown has started.
|
||||
void DecrementNumItemsBlockingShutdown();
|
||||
|
||||
// Decrements the number of incomplete task sources and signals |flush_cv_|
|
||||
// if it reaches zero.
|
||||
void DecrementNumIncompleteTaskSources();
|
||||
|
||||
// Calls |flush_callback_for_testing_| if one is available in a lock-safe
|
||||
// manner.
|
||||
void CallFlushCallbackForTesting();
|
||||
|
||||
// Records |Now() - posted_time| to the
|
||||
// ThreadPool.TaskLatencyMicroseconds.[label].[priority] histogram.
|
||||
void RecordLatencyHistogram(TaskPriority priority,
|
||||
TimeTicks posted_time) const;
|
||||
|
||||
void IncrementNumTasksRun();
|
||||
|
||||
// Dummy frames to allow identification of shutdown behavior in a stack trace.
|
||||
void RunContinueOnShutdown(Task* task);
|
||||
void RunSkipOnShutdown(Task* task);
|
||||
void RunBlockShutdown(Task* task);
|
||||
void RunTaskWithShutdownBehavior(TaskShutdownBehavior shutdown_behavior,
|
||||
Task* task);
|
||||
|
||||
TaskAnnotator task_annotator_;
|
||||
|
||||
// Suffix for histograms recorded by this TaskTracker.
|
||||
const std::string histogram_label_;
|
||||
|
||||
// Indicates whether logging information about TaskPriority::BEST_EFFORT tasks
|
||||
// was enabled with a command line switch.
|
||||
const bool has_log_best_effort_tasks_switch_;
|
||||
|
||||
// Number of tasks blocking shutdown and boolean indicating whether shutdown
|
||||
// has started. |shutdown_lock_| should be held to access |shutdown_event_|
|
||||
// when this indicates that shutdown has started because State doesn't provide
|
||||
// memory barriers. It intentionally trades having to use a Lock on shutdown
|
||||
// with not needing memory barriers at runtime.
|
||||
const std::unique_ptr<State> state_;
|
||||
|
||||
// Number of task sources that haven't completed their execution. Is
|
||||
// decremented with a memory barrier after the last task of a task source
|
||||
// runs. Is accessed with an acquire memory barrier in FlushForTesting(). The
|
||||
// memory barriers ensure that the memory written by flushed task sources is
|
||||
// visible when FlushForTesting() returns.
|
||||
std::atomic_int num_incomplete_task_sources_{0};
|
||||
|
||||
// Global policy the determines result of CanRunPriority().
|
||||
std::atomic<CanRunPolicy> can_run_policy_;
|
||||
|
||||
// Lock associated with |flush_cv_|. Partially synchronizes access to
|
||||
// |num_incomplete_task_sources_|. Full synchronization isn't needed
|
||||
// because it's atomic, but synchronization is needed to coordinate waking and
|
||||
// sleeping at the right time. Fully synchronizes access to
|
||||
// |flush_callback_for_testing_|.
|
||||
mutable CheckedLock flush_lock_;
|
||||
|
||||
// Signaled when |num_incomplete_task_sources_| is or reaches zero or when
|
||||
// shutdown completes.
|
||||
const std::unique_ptr<ConditionVariable> flush_cv_;
|
||||
|
||||
// Invoked if non-null when |num_incomplete_task_sources_| is zero or when
|
||||
// shutdown completes.
|
||||
OnceClosure flush_callback_for_testing_ GUARDED_BY(flush_lock_);
|
||||
|
||||
// Synchronizes access to shutdown related members below.
|
||||
mutable CheckedLock shutdown_lock_;
|
||||
|
||||
// Event instantiated when shutdown starts and signaled when shutdown
|
||||
// completes.
|
||||
std::unique_ptr<WaitableEvent> shutdown_event_ GUARDED_BY(shutdown_lock_);
|
||||
|
||||
// Counter for number of tasks run so far, used to record tasks run while
|
||||
// a task queued to histogram.
|
||||
std::atomic_int num_tasks_run_{0};
|
||||
|
||||
// ThreadPool.TaskLatencyMicroseconds.*,
|
||||
// ThreadPool.HeartbeatLatencyMicroseconds.*, and
|
||||
// ThreadPool.NumTasksRunWhileQueuing.* histograms. The index is a
|
||||
// TaskPriority. Intentionally leaked.
|
||||
// TODO(scheduler-dev): Consider using STATIC_HISTOGRAM_POINTER_GROUP for
|
||||
// these.
|
||||
using TaskPriorityType = std::underlying_type<TaskPriority>::type;
|
||||
static constexpr TaskPriorityType kNumTaskPriorities =
|
||||
static_cast<TaskPriorityType>(TaskPriority::HIGHEST) + 1;
|
||||
HistogramBase* const task_latency_histograms_[kNumTaskPriorities];
|
||||
HistogramBase* const heartbeat_latency_histograms_[kNumTaskPriorities];
|
||||
HistogramBase* const
|
||||
num_tasks_run_while_queuing_histograms_[kNumTaskPriorities];
|
||||
|
||||
// Ensures all state (e.g. dangling cleaned up workers) is coalesced before
|
||||
// destroying the TaskTracker (e.g. in test environments).
|
||||
// Ref. https://crbug.com/827615.
|
||||
TrackedRefFactory<TaskTracker> tracked_ref_factory_;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(TaskTracker);
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
} // namespace base
|
||||
|
||||
#endif // BASE_TASK_THREAD_POOL_TASK_TRACKER_H_
|
||||
|
|
@ -0,0 +1,26 @@
|
|||
// Copyright 2016 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "base/task/thread_pool/task_tracker_posix.h"
|
||||
|
||||
#include <utility>
|
||||
|
||||
#include "base/files/file_descriptor_watcher_posix.h"
|
||||
|
||||
namespace base {
|
||||
namespace internal {
|
||||
|
||||
TaskTrackerPosix::TaskTrackerPosix(StringPiece name) : TaskTracker(name) {}
|
||||
TaskTrackerPosix::~TaskTrackerPosix() = default;
|
||||
|
||||
void TaskTrackerPosix::RunTask(Task task,
|
||||
TaskSource* task_source,
|
||||
const TaskTraits& traits) {
|
||||
DCHECK(io_thread_task_runner_);
|
||||
FileDescriptorWatcher file_descriptor_watcher(io_thread_task_runner_);
|
||||
TaskTracker::RunTask(std::move(task), task_source, traits);
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace base
|
||||
|
|
@ -0,0 +1,57 @@
|
|||
// Copyright 2016 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_TASK_THREAD_POOL_TASK_TRACKER_POSIX_H_
|
||||
#define BASE_TASK_THREAD_POOL_TASK_TRACKER_POSIX_H_
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "base/base_export.h"
|
||||
#include "base/logging.h"
|
||||
#include "base/macros.h"
|
||||
#include "base/message_loop/message_pump_type.h"
|
||||
#include "base/task/thread_pool/task_tracker.h"
|
||||
#include "base/threading/platform_thread.h"
|
||||
|
||||
namespace base {
|
||||
namespace internal {
|
||||
|
||||
struct Task;
|
||||
|
||||
// A TaskTracker that instantiates a FileDescriptorWatcher in the scope in which
|
||||
// a task runs. Used on all POSIX platforms except NaCl SFI.
|
||||
// set_io_thread_task_runner() must be called before the
|
||||
// TaskTracker can run tasks.
|
||||
class BASE_EXPORT TaskTrackerPosix : public TaskTracker {
|
||||
public:
|
||||
TaskTrackerPosix(StringPiece name);
|
||||
~TaskTrackerPosix() override;
|
||||
|
||||
// Sets the task runner with which to setup FileDescriptorWatcher in
|
||||
// the scope in which tasks run. |io_thread_task_runner| must refer to
|
||||
// a Thread with MessagePumpType::IO.
|
||||
// Must be called before starting to run tasks.
|
||||
// External synchronization is required between a call to this and a call to
|
||||
// RunTask().
|
||||
void set_io_thread_task_runner(
|
||||
scoped_refptr<SingleThreadTaskRunner> io_thread_task_runner) {
|
||||
io_thread_task_runner_ = std::move(io_thread_task_runner);
|
||||
}
|
||||
|
||||
protected:
|
||||
// TaskTracker:
|
||||
void RunTask(Task task,
|
||||
TaskSource* task_source,
|
||||
const TaskTraits& traits) override;
|
||||
|
||||
private:
|
||||
scoped_refptr<SingleThreadTaskRunner> io_thread_task_runner_;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(TaskTrackerPosix);
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
} // namespace base
|
||||
|
||||
#endif // BASE_TASK_THREAD_POOL_TASK_TRACKER_POSIX_H_
|
||||
|
|
@ -0,0 +1,111 @@
|
|||
// Copyright 2016 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "base/task/thread_pool/test_task_factory.h"
|
||||
|
||||
#include "base/bind.h"
|
||||
#include "base/bind_helpers.h"
|
||||
#include "base/callback.h"
|
||||
#include "base/location.h"
|
||||
#include "base/logging.h"
|
||||
#include "base/synchronization/waitable_event.h"
|
||||
#include "base/threading/sequenced_task_runner_handle.h"
|
||||
#include "base/threading/thread_task_runner_handle.h"
|
||||
#include "testing/gtest/include/gtest/gtest.h"
|
||||
|
||||
namespace base {
|
||||
namespace internal {
|
||||
namespace test {
|
||||
|
||||
TestTaskFactory::TestTaskFactory(scoped_refptr<TaskRunner> task_runner,
|
||||
TaskSourceExecutionMode execution_mode)
|
||||
: cv_(&lock_),
|
||||
task_runner_(std::move(task_runner)),
|
||||
execution_mode_(execution_mode) {
|
||||
// Detach |thread_checker_| from the current thread. It will be attached to
|
||||
// the first thread that calls ThreadCheckerImpl::CalledOnValidThread().
|
||||
thread_checker_.DetachFromThread();
|
||||
}
|
||||
|
||||
TestTaskFactory::~TestTaskFactory() {
|
||||
WaitForAllTasksToRun();
|
||||
}
|
||||
|
||||
bool TestTaskFactory::PostTask(PostNestedTask post_nested_task,
|
||||
OnceClosure after_task_closure) {
|
||||
AutoLock auto_lock(lock_);
|
||||
return task_runner_->PostTask(
|
||||
FROM_HERE, BindOnce(&TestTaskFactory::RunTaskCallback, Unretained(this),
|
||||
num_posted_tasks_++, post_nested_task,
|
||||
std::move(after_task_closure)));
|
||||
}
|
||||
|
||||
void TestTaskFactory::WaitForAllTasksToRun() const {
|
||||
AutoLock auto_lock(lock_);
|
||||
while (ran_tasks_.size() < num_posted_tasks_)
|
||||
cv_.Wait();
|
||||
}
|
||||
|
||||
void TestTaskFactory::RunTaskCallback(size_t task_index,
|
||||
PostNestedTask post_nested_task,
|
||||
OnceClosure after_task_closure) {
|
||||
if (post_nested_task == PostNestedTask::YES)
|
||||
PostTask(PostNestedTask::NO, OnceClosure());
|
||||
|
||||
if (execution_mode_ == TaskSourceExecutionMode::kSingleThread ||
|
||||
execution_mode_ == TaskSourceExecutionMode::kSequenced) {
|
||||
EXPECT_TRUE(static_cast<SequencedTaskRunner*>(task_runner_.get())
|
||||
->RunsTasksInCurrentSequence());
|
||||
}
|
||||
|
||||
// Verify TaskRunnerHandles are set as expected in the task's scope.
|
||||
switch (execution_mode_) {
|
||||
case TaskSourceExecutionMode::kJob:
|
||||
case TaskSourceExecutionMode::kParallel:
|
||||
EXPECT_FALSE(ThreadTaskRunnerHandle::IsSet());
|
||||
EXPECT_FALSE(SequencedTaskRunnerHandle::IsSet());
|
||||
break;
|
||||
case TaskSourceExecutionMode::kSequenced:
|
||||
EXPECT_FALSE(ThreadTaskRunnerHandle::IsSet());
|
||||
EXPECT_TRUE(SequencedTaskRunnerHandle::IsSet());
|
||||
EXPECT_EQ(task_runner_, SequencedTaskRunnerHandle::Get());
|
||||
break;
|
||||
case TaskSourceExecutionMode::kSingleThread:
|
||||
// SequencedTaskRunnerHandle inherits from ThreadTaskRunnerHandle so
|
||||
// both are expected to be "set" in the kSingleThread case.
|
||||
EXPECT_TRUE(ThreadTaskRunnerHandle::IsSet());
|
||||
EXPECT_TRUE(SequencedTaskRunnerHandle::IsSet());
|
||||
EXPECT_EQ(task_runner_, ThreadTaskRunnerHandle::Get());
|
||||
EXPECT_EQ(task_runner_, SequencedTaskRunnerHandle::Get());
|
||||
break;
|
||||
}
|
||||
|
||||
{
|
||||
AutoLock auto_lock(lock_);
|
||||
|
||||
DCHECK_LE(task_index, num_posted_tasks_);
|
||||
|
||||
if ((execution_mode_ == TaskSourceExecutionMode::kSingleThread ||
|
||||
execution_mode_ == TaskSourceExecutionMode::kSequenced) &&
|
||||
task_index != ran_tasks_.size()) {
|
||||
ADD_FAILURE() << "A task didn't run in the expected order.";
|
||||
}
|
||||
|
||||
if (execution_mode_ == TaskSourceExecutionMode::kSingleThread)
|
||||
EXPECT_TRUE(thread_checker_.CalledOnValidThread());
|
||||
|
||||
if (ran_tasks_.find(task_index) != ran_tasks_.end())
|
||||
ADD_FAILURE() << "A task ran more than once.";
|
||||
ran_tasks_.insert(task_index);
|
||||
|
||||
cv_.Signal();
|
||||
}
|
||||
|
||||
if (!after_task_closure.is_null())
|
||||
std::move(after_task_closure).Run();
|
||||
}
|
||||
|
||||
} // namespace test
|
||||
} // namespace internal
|
||||
} // namespace base
|
||||
|
|
@ -0,0 +1,100 @@
|
|||
// Copyright 2016 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_TASK_THREAD_POOL_TEST_TASK_FACTORY_H_
|
||||
#define BASE_TASK_THREAD_POOL_TEST_TASK_FACTORY_H_
|
||||
|
||||
#include <stddef.h>
|
||||
|
||||
#include <unordered_set>
|
||||
|
||||
#include "base/callback_forward.h"
|
||||
#include "base/macros.h"
|
||||
#include "base/memory/ref_counted.h"
|
||||
#include "base/synchronization/condition_variable.h"
|
||||
#include "base/synchronization/lock.h"
|
||||
#include "base/task/task_traits.h"
|
||||
#include "base/task/thread_pool/test_utils.h"
|
||||
#include "base/task_runner.h"
|
||||
#include "base/threading/thread_checker_impl.h"
|
||||
|
||||
namespace base {
|
||||
namespace internal {
|
||||
namespace test {
|
||||
|
||||
// A TestTaskFactory posts tasks to a TaskRunner and verifies that they run as
|
||||
// expected. Generates a test failure when:
|
||||
// - The RunsTasksInCurrentSequence() method of the SequencedTaskRunner
|
||||
// (kSequenced or kSingleThread modes) returns false on a thread on which a
|
||||
// Task is run.
|
||||
// - The TaskRunnerHandles set in the context of the task don't match what's
|
||||
// expected for the tested TaskSourceExecutionMode.
|
||||
// - The TaskSourceExecutionMode of the TaskRunner is kSequenced or
|
||||
// kSingleThread and Tasks don't run in posting order.
|
||||
// - The TaskSourceExecutionMode of the TaskRunner is kSingleThread and Tasks
|
||||
// don't run on the same thread.
|
||||
// - A Task runs more than once.
|
||||
class TestTaskFactory {
|
||||
public:
|
||||
enum class PostNestedTask {
|
||||
YES,
|
||||
NO,
|
||||
};
|
||||
|
||||
// Constructs a TestTaskFactory that posts tasks to |task_runner|.
|
||||
// |execution_mode| is the TaskSourceExecutionMode of |task_runner|.
|
||||
TestTaskFactory(scoped_refptr<TaskRunner> task_runner,
|
||||
TaskSourceExecutionMode execution_mode);
|
||||
|
||||
~TestTaskFactory();
|
||||
|
||||
// Posts a task. The posted task will:
|
||||
// - Post a new task if |post_nested_task| is YES. The nested task won't run
|
||||
// |after_task_closure|.
|
||||
// - Verify conditions in which the task runs (see potential failures above).
|
||||
// - Run |after_task_closure| if it is not null.
|
||||
bool PostTask(PostNestedTask post_nested_task,
|
||||
OnceClosure after_task_closure);
|
||||
|
||||
// Waits for all tasks posted by PostTask() to start running. It is not
|
||||
// guaranteed that the tasks have completed their execution when this returns.
|
||||
void WaitForAllTasksToRun() const;
|
||||
|
||||
const TaskRunner* task_runner() const { return task_runner_.get(); }
|
||||
|
||||
private:
|
||||
void RunTaskCallback(size_t task_index,
|
||||
PostNestedTask post_nested_task,
|
||||
OnceClosure after_task_closure);
|
||||
|
||||
// Synchronizes access to all members.
|
||||
mutable Lock lock_;
|
||||
|
||||
// Condition variable signaled when a task runs.
|
||||
mutable ConditionVariable cv_;
|
||||
|
||||
// Task runner through which this factory posts tasks.
|
||||
const scoped_refptr<TaskRunner> task_runner_;
|
||||
|
||||
// Execution mode of |task_runner_|.
|
||||
const TaskSourceExecutionMode execution_mode_;
|
||||
|
||||
// Number of tasks posted by PostTask().
|
||||
size_t num_posted_tasks_ = 0;
|
||||
|
||||
// Indexes of tasks that ran.
|
||||
std::unordered_set<size_t> ran_tasks_;
|
||||
|
||||
// Used to verify that all tasks run on the same thread when |execution_mode_|
|
||||
// is SINGLE_THREADED.
|
||||
ThreadCheckerImpl thread_checker_;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(TestTaskFactory);
|
||||
};
|
||||
|
||||
} // namespace test
|
||||
} // namespace internal
|
||||
} // namespace base
|
||||
|
||||
#endif // BASE_TASK_THREAD_POOL_TEST_TASK_FACTORY_H_
|
||||
|
|
@ -0,0 +1,299 @@
|
|||
// Copyright 2017 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "base/task/thread_pool/test_utils.h"
|
||||
|
||||
#include <utility>
|
||||
|
||||
#include "base/bind.h"
|
||||
#include "base/synchronization/condition_variable.h"
|
||||
#include "base/task/thread_pool/pooled_parallel_task_runner.h"
|
||||
#include "base/task/thread_pool/pooled_sequenced_task_runner.h"
|
||||
#include "base/test/bind_test_util.h"
|
||||
#include "base/threading/scoped_blocking_call_internal.h"
|
||||
#include "base/threading/thread_restrictions.h"
|
||||
#include "testing/gtest/include/gtest/gtest.h"
|
||||
|
||||
namespace base {
|
||||
namespace internal {
|
||||
namespace test {
|
||||
|
||||
namespace {
|
||||
|
||||
// A task runner that posts each task as a MockJobTaskSource that runs a single
|
||||
// task. This is used to run ThreadGroupTests which require a TaskRunner with
|
||||
// kJob execution mode. Delayed tasks are not supported.
|
||||
class MockJobTaskRunner : public TaskRunner {
|
||||
public:
|
||||
MockJobTaskRunner(const TaskTraits& traits,
|
||||
PooledTaskRunnerDelegate* pooled_task_runner_delegate)
|
||||
: traits_(traits),
|
||||
pooled_task_runner_delegate_(pooled_task_runner_delegate) {}
|
||||
|
||||
// TaskRunner:
|
||||
bool PostDelayedTask(const Location& from_here,
|
||||
OnceClosure closure,
|
||||
TimeDelta delay) override;
|
||||
|
||||
private:
|
||||
~MockJobTaskRunner() override;
|
||||
|
||||
const TaskTraits traits_;
|
||||
PooledTaskRunnerDelegate* const pooled_task_runner_delegate_;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(MockJobTaskRunner);
|
||||
};
|
||||
|
||||
bool MockJobTaskRunner::PostDelayedTask(const Location& from_here,
|
||||
OnceClosure closure,
|
||||
TimeDelta delay) {
|
||||
DCHECK_EQ(delay, TimeDelta()); // Jobs doesn't support delayed tasks.
|
||||
|
||||
if (!PooledTaskRunnerDelegate::Exists())
|
||||
return false;
|
||||
|
||||
auto job_task = base::MakeRefCounted<MockJobTask>(std::move(closure));
|
||||
scoped_refptr<JobTaskSource> task_source = job_task->GetJobTaskSource(
|
||||
from_here, traits_, pooled_task_runner_delegate_);
|
||||
return pooled_task_runner_delegate_->EnqueueJobTaskSource(
|
||||
std::move(task_source));
|
||||
}
|
||||
|
||||
MockJobTaskRunner::~MockJobTaskRunner() = default;
|
||||
|
||||
scoped_refptr<TaskRunner> CreateJobTaskRunner(
|
||||
const TaskTraits& traits,
|
||||
MockPooledTaskRunnerDelegate* mock_pooled_task_runner_delegate) {
|
||||
return MakeRefCounted<MockJobTaskRunner>(traits,
|
||||
mock_pooled_task_runner_delegate);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
MockWorkerThreadObserver::MockWorkerThreadObserver()
|
||||
: on_main_exit_cv_(lock_.CreateConditionVariable()) {}
|
||||
|
||||
MockWorkerThreadObserver::~MockWorkerThreadObserver() {
|
||||
WaitCallsOnMainExit();
|
||||
}
|
||||
|
||||
void MockWorkerThreadObserver::AllowCallsOnMainExit(int num_calls) {
|
||||
CheckedAutoLock auto_lock(lock_);
|
||||
EXPECT_EQ(0, allowed_calls_on_main_exit_);
|
||||
allowed_calls_on_main_exit_ = num_calls;
|
||||
}
|
||||
|
||||
void MockWorkerThreadObserver::WaitCallsOnMainExit() {
|
||||
CheckedAutoLock auto_lock(lock_);
|
||||
while (allowed_calls_on_main_exit_ != 0)
|
||||
on_main_exit_cv_->Wait();
|
||||
}
|
||||
|
||||
void MockWorkerThreadObserver::OnWorkerThreadMainExit() {
|
||||
CheckedAutoLock auto_lock(lock_);
|
||||
EXPECT_GE(allowed_calls_on_main_exit_, 0);
|
||||
--allowed_calls_on_main_exit_;
|
||||
if (allowed_calls_on_main_exit_ == 0)
|
||||
on_main_exit_cv_->Signal();
|
||||
}
|
||||
|
||||
scoped_refptr<Sequence> CreateSequenceWithTask(
|
||||
Task task,
|
||||
const TaskTraits& traits,
|
||||
scoped_refptr<TaskRunner> task_runner,
|
||||
TaskSourceExecutionMode execution_mode) {
|
||||
scoped_refptr<Sequence> sequence =
|
||||
MakeRefCounted<Sequence>(traits, task_runner.get(), execution_mode);
|
||||
sequence->BeginTransaction().PushTask(std::move(task));
|
||||
return sequence;
|
||||
}
|
||||
|
||||
scoped_refptr<TaskRunner> CreatePooledTaskRunnerWithExecutionMode(
|
||||
TaskSourceExecutionMode execution_mode,
|
||||
MockPooledTaskRunnerDelegate* mock_pooled_task_runner_delegate,
|
||||
const TaskTraits& traits) {
|
||||
switch (execution_mode) {
|
||||
case TaskSourceExecutionMode::kParallel:
|
||||
return CreatePooledTaskRunner(traits, mock_pooled_task_runner_delegate);
|
||||
case TaskSourceExecutionMode::kSequenced:
|
||||
return CreatePooledSequencedTaskRunner(traits,
|
||||
mock_pooled_task_runner_delegate);
|
||||
case TaskSourceExecutionMode::kJob:
|
||||
return CreateJobTaskRunner(traits, mock_pooled_task_runner_delegate);
|
||||
default:
|
||||
// Fall through.
|
||||
break;
|
||||
}
|
||||
ADD_FAILURE() << "Unexpected ExecutionMode";
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
scoped_refptr<TaskRunner> CreatePooledTaskRunner(
|
||||
const TaskTraits& traits,
|
||||
MockPooledTaskRunnerDelegate* mock_pooled_task_runner_delegate) {
|
||||
return MakeRefCounted<PooledParallelTaskRunner>(
|
||||
traits, mock_pooled_task_runner_delegate);
|
||||
}
|
||||
|
||||
scoped_refptr<SequencedTaskRunner> CreatePooledSequencedTaskRunner(
|
||||
const TaskTraits& traits,
|
||||
MockPooledTaskRunnerDelegate* mock_pooled_task_runner_delegate) {
|
||||
return MakeRefCounted<PooledSequencedTaskRunner>(
|
||||
traits, mock_pooled_task_runner_delegate);
|
||||
}
|
||||
|
||||
MockPooledTaskRunnerDelegate::MockPooledTaskRunnerDelegate(
|
||||
TrackedRef<TaskTracker> task_tracker,
|
||||
DelayedTaskManager* delayed_task_manager)
|
||||
: task_tracker_(task_tracker),
|
||||
delayed_task_manager_(delayed_task_manager) {}
|
||||
|
||||
MockPooledTaskRunnerDelegate::~MockPooledTaskRunnerDelegate() = default;
|
||||
|
||||
bool MockPooledTaskRunnerDelegate::PostTaskWithSequence(
|
||||
Task task,
|
||||
scoped_refptr<Sequence> sequence) {
|
||||
// |thread_group_| must be initialized with SetThreadGroup() before
|
||||
// proceeding.
|
||||
DCHECK(thread_group_);
|
||||
DCHECK(task.task);
|
||||
DCHECK(sequence);
|
||||
|
||||
if (!task_tracker_->WillPostTask(&task, sequence->shutdown_behavior()))
|
||||
return false;
|
||||
|
||||
if (task.delayed_run_time.is_null()) {
|
||||
PostTaskWithSequenceNow(std::move(task), std::move(sequence));
|
||||
} else {
|
||||
// It's safe to take a ref on this pointer since the caller must have a ref
|
||||
// to the TaskRunner in order to post.
|
||||
scoped_refptr<TaskRunner> task_runner = sequence->task_runner();
|
||||
delayed_task_manager_->AddDelayedTask(
|
||||
std::move(task),
|
||||
BindOnce(
|
||||
[](scoped_refptr<Sequence> sequence,
|
||||
MockPooledTaskRunnerDelegate* self, Task task) {
|
||||
self->PostTaskWithSequenceNow(std::move(task),
|
||||
std::move(sequence));
|
||||
},
|
||||
std::move(sequence), Unretained(this)),
|
||||
std::move(task_runner));
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void MockPooledTaskRunnerDelegate::PostTaskWithSequenceNow(
|
||||
Task task,
|
||||
scoped_refptr<Sequence> sequence) {
|
||||
auto transaction = sequence->BeginTransaction();
|
||||
const bool sequence_should_be_queued = transaction.WillPushTask();
|
||||
RegisteredTaskSource task_source;
|
||||
if (sequence_should_be_queued) {
|
||||
task_source = task_tracker_->RegisterTaskSource(std::move(sequence));
|
||||
// We shouldn't push |task| if we're not allowed to queue |task_source|.
|
||||
if (!task_source)
|
||||
return;
|
||||
}
|
||||
transaction.PushTask(std::move(task));
|
||||
if (task_source) {
|
||||
thread_group_->PushTaskSourceAndWakeUpWorkers(
|
||||
{std::move(task_source), std::move(transaction)});
|
||||
}
|
||||
}
|
||||
|
||||
bool MockPooledTaskRunnerDelegate::ShouldYield(
|
||||
const TaskSource* task_source) const {
|
||||
return thread_group_->ShouldYield(task_source->priority_racy());
|
||||
}
|
||||
|
||||
bool MockPooledTaskRunnerDelegate::EnqueueJobTaskSource(
|
||||
scoped_refptr<JobTaskSource> task_source) {
|
||||
// |thread_group_| must be initialized with SetThreadGroup() before
|
||||
// proceeding.
|
||||
DCHECK(thread_group_);
|
||||
DCHECK(task_source);
|
||||
|
||||
auto registered_task_source =
|
||||
task_tracker_->RegisterTaskSource(std::move(task_source));
|
||||
if (!registered_task_source)
|
||||
return false;
|
||||
auto transaction = registered_task_source->BeginTransaction();
|
||||
thread_group_->PushTaskSourceAndWakeUpWorkers(
|
||||
{std::move(registered_task_source), std::move(transaction)});
|
||||
return true;
|
||||
}
|
||||
|
||||
void MockPooledTaskRunnerDelegate::RemoveJobTaskSource(
|
||||
scoped_refptr<JobTaskSource> task_source) {
|
||||
thread_group_->RemoveTaskSource(*task_source);
|
||||
}
|
||||
|
||||
void MockPooledTaskRunnerDelegate::UpdatePriority(
|
||||
scoped_refptr<TaskSource> task_source,
|
||||
TaskPriority priority) {
|
||||
auto transaction = task_source->BeginTransaction();
|
||||
transaction.UpdatePriority(priority);
|
||||
thread_group_->UpdateSortKey(std::move(transaction));
|
||||
}
|
||||
|
||||
void MockPooledTaskRunnerDelegate::SetThreadGroup(ThreadGroup* thread_group) {
|
||||
thread_group_ = thread_group;
|
||||
}
|
||||
|
||||
MockJobTask::~MockJobTask() = default;
|
||||
|
||||
MockJobTask::MockJobTask(
|
||||
base::RepeatingCallback<void(JobDelegate*)> worker_task,
|
||||
size_t num_tasks_to_run)
|
||||
: worker_task_(std::move(worker_task)),
|
||||
remaining_num_tasks_to_run_(num_tasks_to_run) {}
|
||||
|
||||
MockJobTask::MockJobTask(base::OnceClosure worker_task)
|
||||
: worker_task_(base::BindRepeating(
|
||||
[](base::OnceClosure&& worker_task, JobDelegate*) mutable {
|
||||
std::move(worker_task).Run();
|
||||
},
|
||||
base::Passed(std::move(worker_task)))),
|
||||
remaining_num_tasks_to_run_(1) {}
|
||||
|
||||
size_t MockJobTask::GetMaxConcurrency() const {
|
||||
return remaining_num_tasks_to_run_.load();
|
||||
}
|
||||
|
||||
void MockJobTask::Run(JobDelegate* delegate) {
|
||||
worker_task_.Run(delegate);
|
||||
size_t before = remaining_num_tasks_to_run_.fetch_sub(1);
|
||||
DCHECK_GT(before, 0U);
|
||||
}
|
||||
|
||||
scoped_refptr<JobTaskSource> MockJobTask::GetJobTaskSource(
|
||||
const Location& from_here,
|
||||
const TaskTraits& traits,
|
||||
PooledTaskRunnerDelegate* delegate) {
|
||||
return MakeRefCounted<JobTaskSource>(
|
||||
from_here, traits, base::BindRepeating(&test::MockJobTask::Run, this),
|
||||
base::BindRepeating(&test::MockJobTask::GetMaxConcurrency, this),
|
||||
delegate);
|
||||
}
|
||||
|
||||
RegisteredTaskSource QueueAndRunTaskSource(
|
||||
TaskTracker* task_tracker,
|
||||
scoped_refptr<TaskSource> task_source) {
|
||||
auto registered_task_source =
|
||||
task_tracker->RegisterTaskSource(std::move(task_source));
|
||||
EXPECT_TRUE(registered_task_source);
|
||||
EXPECT_NE(registered_task_source.WillRunTask(),
|
||||
TaskSource::RunStatus::kDisallowed);
|
||||
return task_tracker->RunAndPopNextTask(std::move(registered_task_source));
|
||||
}
|
||||
|
||||
void ShutdownTaskTracker(TaskTracker* task_tracker) {
|
||||
task_tracker->StartShutdown();
|
||||
task_tracker->CompleteShutdown();
|
||||
}
|
||||
|
||||
} // namespace test
|
||||
} // namespace internal
|
||||
} // namespace base
|
||||
162
TMessagesProj/jni/voip/webrtc/base/task/thread_pool/test_utils.h
Normal file
162
TMessagesProj/jni/voip/webrtc/base/task/thread_pool/test_utils.h
Normal file
|
|
@ -0,0 +1,162 @@
|
|||
// Copyright 2016 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_TASK_THREAD_POOL_TEST_UTILS_H_
|
||||
#define BASE_TASK_THREAD_POOL_TEST_UTILS_H_
|
||||
|
||||
#include <atomic>
|
||||
|
||||
#include "base/callback.h"
|
||||
#include "base/task/common/checked_lock.h"
|
||||
#include "base/task/post_job.h"
|
||||
#include "base/task/task_features.h"
|
||||
#include "base/task/task_traits.h"
|
||||
#include "base/task/thread_pool/delayed_task_manager.h"
|
||||
#include "base/task/thread_pool/pooled_task_runner_delegate.h"
|
||||
#include "base/task/thread_pool/sequence.h"
|
||||
#include "base/task/thread_pool/task_tracker.h"
|
||||
#include "base/task/thread_pool/thread_group.h"
|
||||
#include "base/task/thread_pool/worker_thread_observer.h"
|
||||
#include "base/task_runner.h"
|
||||
#include "base/thread_annotations.h"
|
||||
#include "build/build_config.h"
|
||||
#include "testing/gmock/include/gmock/gmock.h"
|
||||
|
||||
namespace base {
|
||||
namespace internal {
|
||||
|
||||
struct Task;
|
||||
|
||||
namespace test {
|
||||
|
||||
class MockWorkerThreadObserver : public WorkerThreadObserver {
|
||||
public:
|
||||
MockWorkerThreadObserver();
|
||||
~MockWorkerThreadObserver();
|
||||
|
||||
void AllowCallsOnMainExit(int num_calls);
|
||||
void WaitCallsOnMainExit();
|
||||
|
||||
// WorkerThreadObserver:
|
||||
MOCK_METHOD0(OnWorkerThreadMainEntry, void());
|
||||
// This doesn't use MOCK_METHOD0 because some tests need to wait for all calls
|
||||
// to happen, which isn't possible with gmock.
|
||||
void OnWorkerThreadMainExit() override;
|
||||
|
||||
private:
|
||||
CheckedLock lock_;
|
||||
std::unique_ptr<ConditionVariable> on_main_exit_cv_ GUARDED_BY(lock_);
|
||||
int allowed_calls_on_main_exit_ GUARDED_BY(lock_) = 0;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(MockWorkerThreadObserver);
|
||||
};
|
||||
|
||||
class MockPooledTaskRunnerDelegate : public PooledTaskRunnerDelegate {
|
||||
public:
|
||||
MockPooledTaskRunnerDelegate(TrackedRef<TaskTracker> task_tracker,
|
||||
DelayedTaskManager* delayed_task_manager);
|
||||
~MockPooledTaskRunnerDelegate() override;
|
||||
|
||||
// PooledTaskRunnerDelegate:
|
||||
bool PostTaskWithSequence(Task task,
|
||||
scoped_refptr<Sequence> sequence) override;
|
||||
bool EnqueueJobTaskSource(scoped_refptr<JobTaskSource> task_source) override;
|
||||
void RemoveJobTaskSource(scoped_refptr<JobTaskSource> task_source) override;
|
||||
bool ShouldYield(const TaskSource* task_source) const override;
|
||||
void UpdatePriority(scoped_refptr<TaskSource> task_source,
|
||||
TaskPriority priority) override;
|
||||
|
||||
void SetThreadGroup(ThreadGroup* thread_group);
|
||||
|
||||
void PostTaskWithSequenceNow(Task task, scoped_refptr<Sequence> sequence);
|
||||
|
||||
private:
|
||||
const TrackedRef<TaskTracker> task_tracker_;
|
||||
DelayedTaskManager* const delayed_task_manager_;
|
||||
ThreadGroup* thread_group_ = nullptr;
|
||||
};
|
||||
|
||||
// A simple MockJobTask that will give |worker_task| a fixed number of times,
|
||||
// possibly in parallel.
|
||||
class MockJobTask : public base::RefCountedThreadSafe<MockJobTask> {
|
||||
public:
|
||||
// Gives |worker_task| to requesting workers |num_tasks_to_run| times.
|
||||
MockJobTask(base::RepeatingCallback<void(JobDelegate*)> worker_task,
|
||||
size_t num_tasks_to_run);
|
||||
|
||||
// Gives |worker_task| to a single requesting worker.
|
||||
MockJobTask(base::OnceClosure worker_task);
|
||||
|
||||
// Updates the remaining number of time |worker_task| runs to
|
||||
// |num_tasks_to_run|.
|
||||
void SetNumTasksToRun(size_t num_tasks_to_run) {
|
||||
remaining_num_tasks_to_run_ = num_tasks_to_run;
|
||||
}
|
||||
|
||||
size_t GetMaxConcurrency() const;
|
||||
void Run(JobDelegate* delegate);
|
||||
|
||||
scoped_refptr<JobTaskSource> GetJobTaskSource(
|
||||
const Location& from_here,
|
||||
const TaskTraits& traits,
|
||||
PooledTaskRunnerDelegate* delegate);
|
||||
|
||||
private:
|
||||
friend class base::RefCountedThreadSafe<MockJobTask>;
|
||||
|
||||
~MockJobTask();
|
||||
|
||||
base::RepeatingCallback<void(JobDelegate*)> worker_task_;
|
||||
std::atomic_size_t remaining_num_tasks_to_run_;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(MockJobTask);
|
||||
};
|
||||
|
||||
// An enumeration of possible thread pool types. Used to parametrize relevant
|
||||
// thread_pool tests.
|
||||
enum class PoolType {
|
||||
GENERIC,
|
||||
#if HAS_NATIVE_THREAD_POOL()
|
||||
NATIVE,
|
||||
#endif
|
||||
};
|
||||
|
||||
// Creates a Sequence with given |traits| and pushes |task| to it. If a
|
||||
// TaskRunner is associated with |task|, it should be be passed as |task_runner|
|
||||
// along with its |execution_mode|. Returns the created Sequence.
|
||||
scoped_refptr<Sequence> CreateSequenceWithTask(
|
||||
Task task,
|
||||
const TaskTraits& traits,
|
||||
scoped_refptr<TaskRunner> task_runner = nullptr,
|
||||
TaskSourceExecutionMode execution_mode =
|
||||
TaskSourceExecutionMode::kParallel);
|
||||
|
||||
// Creates a TaskRunner that posts tasks to the thread group owned by
|
||||
// |pooled_task_runner_delegate| with the |execution_mode|.
|
||||
// Caveat: this does not support TaskSourceExecutionMode::kSingleThread.
|
||||
scoped_refptr<TaskRunner> CreatePooledTaskRunnerWithExecutionMode(
|
||||
TaskSourceExecutionMode execution_mode,
|
||||
MockPooledTaskRunnerDelegate* mock_pooled_task_runner_delegate,
|
||||
const TaskTraits& traits = {});
|
||||
|
||||
scoped_refptr<TaskRunner> CreatePooledTaskRunner(
|
||||
const TaskTraits& traits,
|
||||
MockPooledTaskRunnerDelegate* mock_pooled_task_runner_delegate);
|
||||
|
||||
scoped_refptr<SequencedTaskRunner> CreatePooledSequencedTaskRunner(
|
||||
const TaskTraits& traits,
|
||||
MockPooledTaskRunnerDelegate* mock_pooled_task_runner_delegate);
|
||||
|
||||
RegisteredTaskSource QueueAndRunTaskSource(
|
||||
TaskTracker* task_tracker,
|
||||
scoped_refptr<TaskSource> task_source);
|
||||
|
||||
// Calls StartShutdown() and CompleteShutdown() on |task_tracker|.
|
||||
void ShutdownTaskTracker(TaskTracker* task_tracker);
|
||||
|
||||
} // namespace test
|
||||
} // namespace internal
|
||||
} // namespace base
|
||||
|
||||
#endif // BASE_TASK_THREAD_POOL_TEST_UTILS_H_
|
||||
|
|
@ -0,0 +1,288 @@
|
|||
// Copyright 2017 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "base/task/thread_pool/thread_group.h"
|
||||
|
||||
#include <utility>
|
||||
|
||||
#include "base/bind.h"
|
||||
#include "base/bind_helpers.h"
|
||||
#include "base/lazy_instance.h"
|
||||
#include "base/task/thread_pool/task_tracker.h"
|
||||
#include "base/threading/thread_local.h"
|
||||
|
||||
#if defined(OS_WIN)
|
||||
#include "base/win/com_init_check_hook.h"
|
||||
#include "base/win/scoped_com_initializer.h"
|
||||
#include "base/win/scoped_winrt_initializer.h"
|
||||
#include "base/win/windows_version.h"
|
||||
#endif
|
||||
|
||||
namespace base {
|
||||
namespace internal {
|
||||
|
||||
namespace {
|
||||
|
||||
// ThreadGroup that owns the current thread, if any.
|
||||
LazyInstance<ThreadLocalPointer<const ThreadGroup>>::Leaky
|
||||
tls_current_thread_group = LAZY_INSTANCE_INITIALIZER;
|
||||
|
||||
const ThreadGroup* GetCurrentThreadGroup() {
|
||||
return tls_current_thread_group.Get().Get();
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
void ThreadGroup::BaseScopedCommandsExecutor::ScheduleReleaseTaskSource(
|
||||
RegisteredTaskSource task_source) {
|
||||
task_sources_to_release_.push_back(std::move(task_source));
|
||||
}
|
||||
|
||||
ThreadGroup::BaseScopedCommandsExecutor::BaseScopedCommandsExecutor() = default;
|
||||
|
||||
ThreadGroup::BaseScopedCommandsExecutor::~BaseScopedCommandsExecutor() {
|
||||
CheckedLock::AssertNoLockHeldOnCurrentThread();
|
||||
}
|
||||
|
||||
ThreadGroup::ScopedReenqueueExecutor::ScopedReenqueueExecutor() = default;
|
||||
|
||||
ThreadGroup::ScopedReenqueueExecutor::~ScopedReenqueueExecutor() {
|
||||
if (destination_thread_group_) {
|
||||
destination_thread_group_->PushTaskSourceAndWakeUpWorkers(
|
||||
std::move(transaction_with_task_source_.value()));
|
||||
}
|
||||
}
|
||||
|
||||
void ThreadGroup::ScopedReenqueueExecutor::
|
||||
SchedulePushTaskSourceAndWakeUpWorkers(
|
||||
TransactionWithRegisteredTaskSource transaction_with_task_source,
|
||||
ThreadGroup* destination_thread_group) {
|
||||
DCHECK(destination_thread_group);
|
||||
DCHECK(!destination_thread_group_);
|
||||
DCHECK(!transaction_with_task_source_);
|
||||
transaction_with_task_source_.emplace(
|
||||
std::move(transaction_with_task_source));
|
||||
destination_thread_group_ = destination_thread_group;
|
||||
}
|
||||
|
||||
ThreadGroup::ThreadGroup(TrackedRef<TaskTracker> task_tracker,
|
||||
TrackedRef<Delegate> delegate,
|
||||
ThreadGroup* predecessor_thread_group)
|
||||
: task_tracker_(std::move(task_tracker)),
|
||||
delegate_(std::move(delegate)),
|
||||
lock_(predecessor_thread_group ? &predecessor_thread_group->lock_
|
||||
: nullptr) {
|
||||
DCHECK(task_tracker_);
|
||||
}
|
||||
|
||||
ThreadGroup::~ThreadGroup() = default;
|
||||
|
||||
void ThreadGroup::BindToCurrentThread() {
|
||||
DCHECK(!GetCurrentThreadGroup());
|
||||
tls_current_thread_group.Get().Set(this);
|
||||
}
|
||||
|
||||
void ThreadGroup::UnbindFromCurrentThread() {
|
||||
DCHECK(GetCurrentThreadGroup());
|
||||
tls_current_thread_group.Get().Set(nullptr);
|
||||
}
|
||||
|
||||
bool ThreadGroup::IsBoundToCurrentThread() const {
|
||||
return GetCurrentThreadGroup() == this;
|
||||
}
|
||||
|
||||
size_t
|
||||
ThreadGroup::GetNumAdditionalWorkersForBestEffortTaskSourcesLockRequired()
|
||||
const {
|
||||
// For simplicity, only 1 worker is assigned to each task source regardless of
|
||||
// its max concurrency, with the exception of the top task source.
|
||||
const size_t num_queued =
|
||||
priority_queue_.GetNumTaskSourcesWithPriority(TaskPriority::BEST_EFFORT);
|
||||
if (num_queued == 0 ||
|
||||
!task_tracker_->CanRunPriority(TaskPriority::BEST_EFFORT)) {
|
||||
return 0U;
|
||||
}
|
||||
if (priority_queue_.PeekSortKey().priority() == TaskPriority::BEST_EFFORT) {
|
||||
// Assign the correct number of workers for the top TaskSource (-1 for the
|
||||
// worker that is already accounted for in |num_queued|).
|
||||
return std::max<size_t>(
|
||||
1, num_queued +
|
||||
priority_queue_.PeekTaskSource()->GetRemainingConcurrency() - 1);
|
||||
}
|
||||
return num_queued;
|
||||
}
|
||||
|
||||
size_t
|
||||
ThreadGroup::GetNumAdditionalWorkersForForegroundTaskSourcesLockRequired()
|
||||
const {
|
||||
// For simplicity, only 1 worker is assigned to each task source regardless of
|
||||
// its max concurrency, with the exception of the top task source.
|
||||
const size_t num_queued = priority_queue_.GetNumTaskSourcesWithPriority(
|
||||
TaskPriority::USER_VISIBLE) +
|
||||
priority_queue_.GetNumTaskSourcesWithPriority(
|
||||
TaskPriority::USER_BLOCKING);
|
||||
if (num_queued == 0 ||
|
||||
!task_tracker_->CanRunPriority(TaskPriority::HIGHEST)) {
|
||||
return 0U;
|
||||
}
|
||||
auto priority = priority_queue_.PeekSortKey().priority();
|
||||
if (priority == TaskPriority::USER_VISIBLE ||
|
||||
priority == TaskPriority::USER_BLOCKING) {
|
||||
// Assign the correct number of workers for the top TaskSource (-1 for the
|
||||
// worker that is already accounted for in |num_queued|).
|
||||
return std::max<size_t>(
|
||||
1, num_queued +
|
||||
priority_queue_.PeekTaskSource()->GetRemainingConcurrency() - 1);
|
||||
}
|
||||
return num_queued;
|
||||
}
|
||||
|
||||
RegisteredTaskSource ThreadGroup::RemoveTaskSource(
|
||||
const TaskSource& task_source) {
|
||||
CheckedAutoLock auto_lock(lock_);
|
||||
return priority_queue_.RemoveTaskSource(task_source);
|
||||
}
|
||||
|
||||
void ThreadGroup::ReEnqueueTaskSourceLockRequired(
|
||||
BaseScopedCommandsExecutor* workers_executor,
|
||||
ScopedReenqueueExecutor* reenqueue_executor,
|
||||
TransactionWithRegisteredTaskSource transaction_with_task_source) {
|
||||
// Decide in which thread group the TaskSource should be reenqueued.
|
||||
ThreadGroup* destination_thread_group = delegate_->GetThreadGroupForTraits(
|
||||
transaction_with_task_source.transaction.traits());
|
||||
|
||||
if (destination_thread_group == this) {
|
||||
// Another worker that was running a task from this task source may have
|
||||
// reenqueued it already, in which case its heap_handle will be valid. It
|
||||
// shouldn't be queued twice so the task source registration is released.
|
||||
if (transaction_with_task_source.task_source->heap_handle().IsValid()) {
|
||||
workers_executor->ScheduleReleaseTaskSource(
|
||||
std::move(transaction_with_task_source.task_source));
|
||||
} else {
|
||||
// If the TaskSource should be reenqueued in the current thread group,
|
||||
// reenqueue it inside the scope of the lock.
|
||||
priority_queue_.Push(std::move(transaction_with_task_source));
|
||||
}
|
||||
// This is called unconditionally to ensure there are always workers to run
|
||||
// task sources in the queue. Some ThreadGroup implementations only invoke
|
||||
// TakeRegisteredTaskSource() once per wake up and hence this is required to
|
||||
// avoid races that could leave a task source stranded in the queue with no
|
||||
// active workers.
|
||||
EnsureEnoughWorkersLockRequired(workers_executor);
|
||||
} else {
|
||||
// Otherwise, schedule a reenqueue after releasing the lock.
|
||||
reenqueue_executor->SchedulePushTaskSourceAndWakeUpWorkers(
|
||||
std::move(transaction_with_task_source), destination_thread_group);
|
||||
}
|
||||
}
|
||||
|
||||
RegisteredTaskSource ThreadGroup::TakeRegisteredTaskSource(
|
||||
BaseScopedCommandsExecutor* executor) {
|
||||
DCHECK(!priority_queue_.IsEmpty());
|
||||
|
||||
auto run_status = priority_queue_.PeekTaskSource().WillRunTask();
|
||||
|
||||
if (run_status == TaskSource::RunStatus::kDisallowed) {
|
||||
executor->ScheduleReleaseTaskSource(priority_queue_.PopTaskSource());
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
if (run_status == TaskSource::RunStatus::kAllowedSaturated)
|
||||
return priority_queue_.PopTaskSource();
|
||||
|
||||
// If the TaskSource isn't saturated, check whether TaskTracker allows it to
|
||||
// remain in the PriorityQueue.
|
||||
// The canonical way of doing this is to pop the task source to return, call
|
||||
// WillQueueTaskSource() to get an additional RegisteredTaskSource, and
|
||||
// reenqueue that task source if valid. Instead, it is cheaper and equivalent
|
||||
// to peek the task source, call RegisterTaskSource() to get an additional
|
||||
// RegisteredTaskSource to replace if valid, and only pop |priority_queue_|
|
||||
// otherwise.
|
||||
RegisteredTaskSource task_source =
|
||||
task_tracker_->RegisterTaskSource(priority_queue_.PeekTaskSource().get());
|
||||
if (!task_source)
|
||||
return priority_queue_.PopTaskSource();
|
||||
return std::exchange(priority_queue_.PeekTaskSource(),
|
||||
std::move(task_source));
|
||||
}
|
||||
|
||||
void ThreadGroup::UpdateSortKeyImpl(BaseScopedCommandsExecutor* executor,
|
||||
TaskSource::Transaction transaction) {
|
||||
CheckedAutoLock auto_lock(lock_);
|
||||
priority_queue_.UpdateSortKey(std::move(transaction));
|
||||
EnsureEnoughWorkersLockRequired(executor);
|
||||
}
|
||||
|
||||
void ThreadGroup::PushTaskSourceAndWakeUpWorkersImpl(
|
||||
BaseScopedCommandsExecutor* executor,
|
||||
TransactionWithRegisteredTaskSource transaction_with_task_source) {
|
||||
CheckedAutoLock auto_lock(lock_);
|
||||
DCHECK(!replacement_thread_group_);
|
||||
DCHECK_EQ(delegate_->GetThreadGroupForTraits(
|
||||
transaction_with_task_source.transaction.traits()),
|
||||
this);
|
||||
if (transaction_with_task_source.task_source->heap_handle().IsValid()) {
|
||||
// If the task source changed group, it is possible that multiple concurrent
|
||||
// workers try to enqueue it. Only the first enqueue should succeed.
|
||||
executor->ScheduleReleaseTaskSource(
|
||||
std::move(transaction_with_task_source.task_source));
|
||||
return;
|
||||
}
|
||||
priority_queue_.Push(std::move(transaction_with_task_source));
|
||||
EnsureEnoughWorkersLockRequired(executor);
|
||||
}
|
||||
|
||||
void ThreadGroup::InvalidateAndHandoffAllTaskSourcesToOtherThreadGroup(
|
||||
ThreadGroup* destination_thread_group) {
|
||||
CheckedAutoLock current_thread_group_lock(lock_);
|
||||
CheckedAutoLock destination_thread_group_lock(
|
||||
destination_thread_group->lock_);
|
||||
destination_thread_group->priority_queue_ = std::move(priority_queue_);
|
||||
replacement_thread_group_ = destination_thread_group;
|
||||
}
|
||||
|
||||
bool ThreadGroup::ShouldYield(TaskPriority priority) const {
|
||||
// It is safe to read |min_allowed_priority_| without a lock since this
|
||||
// variable is atomic, keeping in mind that threads may not immediately see
|
||||
// the new value when it is updated.
|
||||
return !task_tracker_->CanRunPriority(priority) ||
|
||||
priority < TS_UNCHECKED_READ(min_allowed_priority_)
|
||||
.load(std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
#if defined(OS_WIN)
|
||||
// static
|
||||
std::unique_ptr<win::ScopedWindowsThreadEnvironment>
|
||||
ThreadGroup::GetScopedWindowsThreadEnvironment(WorkerEnvironment environment) {
|
||||
std::unique_ptr<win::ScopedWindowsThreadEnvironment> scoped_environment;
|
||||
switch (environment) {
|
||||
case WorkerEnvironment::COM_MTA: {
|
||||
if (win::GetVersion() >= win::Version::WIN8) {
|
||||
scoped_environment = std::make_unique<win::ScopedWinrtInitializer>();
|
||||
} else {
|
||||
scoped_environment = std::make_unique<win::ScopedCOMInitializer>(
|
||||
win::ScopedCOMInitializer::kMTA);
|
||||
}
|
||||
break;
|
||||
}
|
||||
case WorkerEnvironment::COM_STA: {
|
||||
// When defined(COM_INIT_CHECK_HOOK_ENABLED), ignore
|
||||
// WorkerEnvironment::COM_STA to find incorrect uses of
|
||||
// COM that should be running in a COM STA Task Runner.
|
||||
#if !defined(COM_INIT_CHECK_HOOK_ENABLED)
|
||||
scoped_environment = std::make_unique<win::ScopedCOMInitializer>();
|
||||
#endif
|
||||
break;
|
||||
}
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
DCHECK(!scoped_environment || scoped_environment->Succeeded());
|
||||
return scoped_environment;
|
||||
}
|
||||
#endif
|
||||
|
||||
} // namespace internal
|
||||
} // namespace base
|
||||
|
|
@ -0,0 +1,250 @@
|
|||
// Copyright 2016 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_TASK_THREAD_POOL_THREAD_GROUP_H_
|
||||
#define BASE_TASK_THREAD_POOL_THREAD_GROUP_H_
|
||||
|
||||
#include "base/base_export.h"
|
||||
#include "base/memory/ref_counted.h"
|
||||
#include "base/task/common/checked_lock.h"
|
||||
#include "base/task/thread_pool/priority_queue.h"
|
||||
#include "base/task/thread_pool/task.h"
|
||||
#include "base/task/thread_pool/task_source.h"
|
||||
#include "base/task/thread_pool/tracked_ref.h"
|
||||
#include "build/build_config.h"
|
||||
|
||||
#if defined(OS_WIN)
|
||||
#include "base/win/scoped_windows_thread_environment.h"
|
||||
#endif
|
||||
|
||||
namespace base {
|
||||
namespace internal {
|
||||
|
||||
class TaskTracker;
|
||||
|
||||
// Interface and base implementation for a thread group. A thread group is a
|
||||
// subset of the threads in the thread pool (see GetThreadGroupForTraits() for
|
||||
// thread group selection logic when posting tasks and creating task runners).
|
||||
class BASE_EXPORT ThreadGroup {
|
||||
public:
|
||||
// Delegate interface for ThreadGroup.
|
||||
class BASE_EXPORT Delegate {
|
||||
public:
|
||||
virtual ~Delegate() = default;
|
||||
|
||||
// Invoked when a TaskSource with |traits| is non-empty after the
|
||||
// ThreadGroup has run a task from it. The implementation must return the
|
||||
// thread group in which the TaskSource should be reenqueued.
|
||||
virtual ThreadGroup* GetThreadGroupForTraits(const TaskTraits& traits) = 0;
|
||||
};
|
||||
|
||||
enum class WorkerEnvironment {
|
||||
// No special worker environment required.
|
||||
NONE,
|
||||
#if defined(OS_WIN)
|
||||
// Initialize a COM MTA on the worker.
|
||||
COM_MTA,
|
||||
// Initialize a COM STA on the worker.
|
||||
COM_STA,
|
||||
#endif // defined(OS_WIN)
|
||||
};
|
||||
|
||||
virtual ~ThreadGroup();
|
||||
|
||||
// Registers the thread group in TLS.
|
||||
void BindToCurrentThread();
|
||||
|
||||
// Resets the thread group in TLS.
|
||||
void UnbindFromCurrentThread();
|
||||
|
||||
// Returns true if the thread group is registered in TLS.
|
||||
bool IsBoundToCurrentThread() const;
|
||||
|
||||
// Removes |task_source| from |priority_queue_|. Returns a
|
||||
// RegisteredTaskSource that evaluats to true if successful, or false if
|
||||
// |task_source| is not currently in |priority_queue_|, such as when a worker
|
||||
// is running a task from it.
|
||||
RegisteredTaskSource RemoveTaskSource(const TaskSource& task_source);
|
||||
|
||||
// Updates the position of the TaskSource in |transaction| in this
|
||||
// ThreadGroup's PriorityQueue based on the TaskSource's current traits.
|
||||
//
|
||||
// Implementations should instantiate a concrete ScopedCommandsExecutor and
|
||||
// invoke UpdateSortKeyImpl().
|
||||
virtual void UpdateSortKey(TaskSource::Transaction transaction) = 0;
|
||||
|
||||
// Pushes the TaskSource in |transaction_with_task_source| into this
|
||||
// ThreadGroup's PriorityQueue and wakes up workers as appropriate.
|
||||
//
|
||||
// Implementations should instantiate a concrete ScopedCommandsExecutor and
|
||||
// invoke PushTaskSourceAndWakeUpWorkersImpl().
|
||||
virtual void PushTaskSourceAndWakeUpWorkers(
|
||||
TransactionWithRegisteredTaskSource transaction_with_task_source) = 0;
|
||||
|
||||
// Removes all task sources from this ThreadGroup's PriorityQueue and enqueues
|
||||
// them in another |destination_thread_group|. After this method is called,
|
||||
// any task sources posted to this ThreadGroup will be forwarded to
|
||||
// |destination_thread_group|.
|
||||
//
|
||||
// TODO(crbug.com/756547): Remove this method once the UseNativeThreadPool
|
||||
// experiment is complete.
|
||||
void InvalidateAndHandoffAllTaskSourcesToOtherThreadGroup(
|
||||
ThreadGroup* destination_thread_group);
|
||||
|
||||
// Returns true if a task with |priority| running in this thread group should
|
||||
// return ASAP, either because this priority is not allowed to run or because
|
||||
// work of higher priority is pending. Thread-safe but may return an outdated
|
||||
// result (if a task unnecessarily yields due to this, it will simply be
|
||||
// re-scheduled).
|
||||
bool ShouldYield(TaskPriority priority) const;
|
||||
|
||||
// Prevents new tasks from starting to run and waits for currently running
|
||||
// tasks to complete their execution. It is guaranteed that no thread will do
|
||||
// work on behalf of this ThreadGroup after this returns. It is
|
||||
// invalid to post a task once this is called. TaskTracker::Flush() can be
|
||||
// called before this to complete existing tasks, which might otherwise post a
|
||||
// task during JoinForTesting(). This can only be called once.
|
||||
virtual void JoinForTesting() = 0;
|
||||
|
||||
// Returns the maximum number of non-blocked tasks that can run concurrently
|
||||
// in this ThreadGroup.
|
||||
//
|
||||
// TODO(fdoray): Remove this method. https://crbug.com/687264
|
||||
virtual size_t GetMaxConcurrentNonBlockedTasksDeprecated() const = 0;
|
||||
|
||||
// Reports relevant metrics per implementation.
|
||||
virtual void ReportHeartbeatMetrics() const = 0;
|
||||
|
||||
// Wakes up workers as appropriate for the new CanRunPolicy policy. Must be
|
||||
// called after an update to CanRunPolicy in TaskTracker.
|
||||
virtual void DidUpdateCanRunPolicy() = 0;
|
||||
|
||||
protected:
|
||||
// Derived classes must implement a ScopedCommandsExecutor that derives from
|
||||
// this to perform operations at the end of a scope, when all locks have been
|
||||
// released.
|
||||
class BaseScopedCommandsExecutor {
|
||||
public:
|
||||
void ScheduleReleaseTaskSource(RegisteredTaskSource task_source);
|
||||
|
||||
protected:
|
||||
BaseScopedCommandsExecutor();
|
||||
~BaseScopedCommandsExecutor();
|
||||
|
||||
private:
|
||||
std::vector<RegisteredTaskSource> task_sources_to_release_;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(BaseScopedCommandsExecutor);
|
||||
};
|
||||
|
||||
// Allows a task source to be pushed to a ThreadGroup's PriorityQueue at the
|
||||
// end of a scope, when all locks have been released.
|
||||
class ScopedReenqueueExecutor {
|
||||
public:
|
||||
ScopedReenqueueExecutor();
|
||||
~ScopedReenqueueExecutor();
|
||||
|
||||
// A TransactionWithRegisteredTaskSource and the ThreadGroup in which it
|
||||
// should be enqueued.
|
||||
void SchedulePushTaskSourceAndWakeUpWorkers(
|
||||
TransactionWithRegisteredTaskSource transaction_with_task_source,
|
||||
ThreadGroup* destination_thread_group);
|
||||
|
||||
private:
|
||||
// A TransactionWithRegisteredTaskSource and the thread group in which it
|
||||
// should be enqueued.
|
||||
Optional<TransactionWithRegisteredTaskSource> transaction_with_task_source_;
|
||||
ThreadGroup* destination_thread_group_ = nullptr;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(ScopedReenqueueExecutor);
|
||||
};
|
||||
|
||||
// |predecessor_thread_group| is a ThreadGroup whose lock can be acquired
|
||||
// before the constructed ThreadGroup's lock. This is necessary to move all
|
||||
// task sources from |predecessor_thread_group| to the constructed ThreadGroup
|
||||
// and support the UseNativeThreadPool experiment.
|
||||
//
|
||||
// TODO(crbug.com/756547): Remove |predecessor_thread_group| once the
|
||||
// experiment is complete.
|
||||
ThreadGroup(TrackedRef<TaskTracker> task_tracker,
|
||||
TrackedRef<Delegate> delegate,
|
||||
ThreadGroup* predecessor_thread_group = nullptr);
|
||||
|
||||
#if defined(OS_WIN)
|
||||
static std::unique_ptr<win::ScopedWindowsThreadEnvironment>
|
||||
GetScopedWindowsThreadEnvironment(WorkerEnvironment environment);
|
||||
#endif
|
||||
|
||||
const TrackedRef<TaskTracker> task_tracker_;
|
||||
const TrackedRef<Delegate> delegate_;
|
||||
|
||||
// Returns the number of workers required of workers to run all queued
|
||||
// BEST_EFFORT task sources allowed to run by the current CanRunPolicy.
|
||||
size_t GetNumAdditionalWorkersForBestEffortTaskSourcesLockRequired() const
|
||||
EXCLUSIVE_LOCKS_REQUIRED(lock_);
|
||||
|
||||
// Returns the number of workers required to run all queued
|
||||
// USER_VISIBLE/USER_BLOCKING task sources allowed to run by the current
|
||||
// CanRunPolicy.
|
||||
size_t GetNumAdditionalWorkersForForegroundTaskSourcesLockRequired() const
|
||||
EXCLUSIVE_LOCKS_REQUIRED(lock_);
|
||||
|
||||
// Ensures that there are enough workers to run queued task sources.
|
||||
// |executor| is forwarded from the one received in
|
||||
// PushTaskSourceAndWakeUpWorkersImpl()
|
||||
virtual void EnsureEnoughWorkersLockRequired(
|
||||
BaseScopedCommandsExecutor* executor) EXCLUSIVE_LOCKS_REQUIRED(lock_) = 0;
|
||||
|
||||
// Reenqueues a |transaction_with_task_source| from which a Task just ran in
|
||||
// the current ThreadGroup into the appropriate ThreadGroup.
|
||||
void ReEnqueueTaskSourceLockRequired(
|
||||
BaseScopedCommandsExecutor* workers_executor,
|
||||
ScopedReenqueueExecutor* reenqueue_executor,
|
||||
TransactionWithRegisteredTaskSource transaction_with_task_source)
|
||||
EXCLUSIVE_LOCKS_REQUIRED(lock_);
|
||||
|
||||
// Returns the next task source from |priority_queue_| if permitted to run and
|
||||
// pops |priority_queue_| if the task source returned no longer needs to be
|
||||
// queued (reached its maximum concurrency). Otherwise returns nullptr and
|
||||
// pops |priority_queue_| so this can be called again.
|
||||
RegisteredTaskSource TakeRegisteredTaskSource(
|
||||
BaseScopedCommandsExecutor* executor) EXCLUSIVE_LOCKS_REQUIRED(lock_);
|
||||
|
||||
// Must be invoked by implementations of the corresponding non-Impl() methods.
|
||||
void UpdateSortKeyImpl(BaseScopedCommandsExecutor* executor,
|
||||
TaskSource::Transaction transaction);
|
||||
void PushTaskSourceAndWakeUpWorkersImpl(
|
||||
BaseScopedCommandsExecutor* executor,
|
||||
TransactionWithRegisteredTaskSource transaction_with_task_source);
|
||||
|
||||
// Synchronizes accesses to all members of this class which are neither const,
|
||||
// atomic, nor immutable after start. Since this lock is a bottleneck to post
|
||||
// and schedule work, only simple data structure manipulations are allowed
|
||||
// within its scope (no thread creation or wake up).
|
||||
mutable CheckedLock lock_;
|
||||
|
||||
// PriorityQueue from which all threads of this ThreadGroup get work.
|
||||
PriorityQueue priority_queue_ GUARDED_BY(lock_);
|
||||
|
||||
// Minimum priority allowed to run below which tasks should yield. This is
|
||||
// expected to be always kept up-to-date by derived classes when |lock_| is
|
||||
// released. It is annotated as GUARDED_BY(lock_) because it is always updated
|
||||
// under the lock (to avoid races with other state during the update) but it
|
||||
// is nonetheless always safe to read it without the lock (since it's atomic).
|
||||
std::atomic<TaskPriority> min_allowed_priority_ GUARDED_BY(lock_){
|
||||
TaskPriority::BEST_EFFORT};
|
||||
|
||||
// If |replacement_thread_group_| is non-null, this ThreadGroup is invalid and
|
||||
// all task sources should be scheduled on |replacement_thread_group_|. Used
|
||||
// to support the UseNativeThreadPool experiment.
|
||||
ThreadGroup* replacement_thread_group_ = nullptr;
|
||||
|
||||
private:
|
||||
DISALLOW_COPY_AND_ASSIGN(ThreadGroup);
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
} // namespace base
|
||||
|
||||
#endif // BASE_TASK_THREAD_POOL_THREAD_GROUP_H_
|
||||
File diff suppressed because it is too large
Load diff
|
|
@ -0,0 +1,370 @@
|
|||
// Copyright 2016 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_TASK_THREAD_POOL_THREAD_GROUP_IMPL_H_
|
||||
#define BASE_TASK_THREAD_POOL_THREAD_GROUP_IMPL_H_
|
||||
|
||||
#include <stddef.h>
|
||||
|
||||
#include <memory>
|
||||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "base/base_export.h"
|
||||
#include "base/compiler_specific.h"
|
||||
#include "base/containers/stack.h"
|
||||
#include "base/gtest_prod_util.h"
|
||||
#include "base/logging.h"
|
||||
#include "base/macros.h"
|
||||
#include "base/memory/ref_counted.h"
|
||||
#include "base/optional.h"
|
||||
#include "base/sequenced_task_runner.h"
|
||||
#include "base/strings/string_piece.h"
|
||||
#include "base/synchronization/condition_variable.h"
|
||||
#include "base/synchronization/waitable_event.h"
|
||||
#include "base/task/thread_pool/task.h"
|
||||
#include "base/task/thread_pool/task_source.h"
|
||||
#include "base/task/thread_pool/thread_group.h"
|
||||
#include "base/task/thread_pool/tracked_ref.h"
|
||||
#include "base/task/thread_pool/worker_thread.h"
|
||||
#include "base/task/thread_pool/worker_thread_stack.h"
|
||||
#include "base/time/time.h"
|
||||
|
||||
namespace base {
|
||||
|
||||
class HistogramBase;
|
||||
class WorkerThreadObserver;
|
||||
|
||||
namespace internal {
|
||||
|
||||
class TaskTracker;
|
||||
|
||||
// A group of workers that run Tasks.
|
||||
//
|
||||
// The thread group doesn't create threads until Start() is called. Tasks can be
|
||||
// posted at any time but will not run until after Start() is called.
|
||||
//
|
||||
// This class is thread-safe.
|
||||
class BASE_EXPORT ThreadGroupImpl : public ThreadGroup {
|
||||
public:
|
||||
// Constructs a group without workers.
|
||||
//
|
||||
// |histogram_label| is used to label the thread group's histograms as
|
||||
// "ThreadPool." + histogram_name + "." + |histogram_label| + extra suffixes.
|
||||
// It must not be empty. |thread group_label| is used to label the thread
|
||||
// group's threads, it must not be empty. |priority_hint| is the preferred
|
||||
// thread priority; the actual thread priority depends on shutdown state and
|
||||
// platform capabilities. |task_tracker| keeps track of tasks.
|
||||
ThreadGroupImpl(StringPiece histogram_label,
|
||||
StringPiece thread_group_label,
|
||||
ThreadPriority priority_hint,
|
||||
TrackedRef<TaskTracker> task_tracker,
|
||||
TrackedRef<Delegate> delegate);
|
||||
|
||||
// Creates threads, allowing existing and future tasks to run. The thread
|
||||
// group runs at most |max_tasks| / |max_best_effort_tasks| unblocked task
|
||||
// with any / BEST_EFFORT priority concurrently. It reclaims unused threads
|
||||
// after |suggested_reclaim_time|. It uses |service_thread_task_runner| to
|
||||
// monitor for blocked tasks. If specified, it notifies
|
||||
// |worker_thread_observer| when a worker enters and exits its main function
|
||||
// (the observer must not be destroyed before JoinForTesting() has returned).
|
||||
// |worker_environment| specifies the environment in which tasks are executed.
|
||||
// |may_block_threshold| is the timeout after which a task in a MAY_BLOCK
|
||||
// ScopedBlockingCall is considered blocked (the thread group will choose an
|
||||
// appropriate value if none is specified). Can only be called once. CHECKs on
|
||||
// failure.
|
||||
void Start(int max_tasks,
|
||||
int max_best_effort_tasks,
|
||||
TimeDelta suggested_reclaim_time,
|
||||
scoped_refptr<SequencedTaskRunner> service_thread_task_runner,
|
||||
WorkerThreadObserver* worker_thread_observer,
|
||||
WorkerEnvironment worker_environment,
|
||||
Optional<TimeDelta> may_block_threshold = Optional<TimeDelta>());
|
||||
|
||||
// Destroying a ThreadGroupImpl returned by Create() is not allowed in
|
||||
// production; it is always leaked. In tests, it can only be destroyed after
|
||||
// JoinForTesting() has returned.
|
||||
~ThreadGroupImpl() override;
|
||||
|
||||
// ThreadGroup:
|
||||
void JoinForTesting() override;
|
||||
size_t GetMaxConcurrentNonBlockedTasksDeprecated() const override;
|
||||
void ReportHeartbeatMetrics() const override;
|
||||
void DidUpdateCanRunPolicy() override;
|
||||
|
||||
const HistogramBase* num_tasks_before_detach_histogram() const {
|
||||
return num_tasks_before_detach_histogram_;
|
||||
}
|
||||
|
||||
// Waits until at least |n| workers are idle. Note that while workers are
|
||||
// disallowed from cleaning up during this call: tests using a custom
|
||||
// |suggested_reclaim_time_| need to be careful to invoke this swiftly after
|
||||
// unblocking the waited upon workers as: if a worker is already detached by
|
||||
// the time this is invoked, it will never make it onto the idle stack and
|
||||
// this call will hang.
|
||||
void WaitForWorkersIdleForTesting(size_t n);
|
||||
|
||||
// Waits until at least |n| workers are idle.
|
||||
void WaitForWorkersIdleLockRequiredForTesting(size_t n)
|
||||
EXCLUSIVE_LOCKS_REQUIRED(lock_);
|
||||
|
||||
// Waits until all workers are idle.
|
||||
void WaitForAllWorkersIdleForTesting();
|
||||
|
||||
// Waits until |n| workers have cleaned up (went through
|
||||
// WorkerThreadDelegateImpl::OnMainExit()) since the last call to
|
||||
// WaitForWorkersCleanedUpForTesting() (or Start() if that wasn't called yet).
|
||||
void WaitForWorkersCleanedUpForTesting(size_t n);
|
||||
|
||||
// Returns the number of workers in this thread group.
|
||||
size_t NumberOfWorkersForTesting() const;
|
||||
|
||||
// Returns |max_tasks_|.
|
||||
size_t GetMaxTasksForTesting() const;
|
||||
|
||||
// Returns the number of workers that are idle (i.e. not running tasks).
|
||||
size_t NumberOfIdleWorkersForTesting() const;
|
||||
|
||||
private:
|
||||
class ScopedCommandsExecutor;
|
||||
class WorkerThreadDelegateImpl;
|
||||
|
||||
// Friend tests so that they can access |blocked_workers_poll_period| and
|
||||
// may_block_threshold().
|
||||
friend class ThreadGroupImplBlockingTest;
|
||||
friend class ThreadGroupImplMayBlockTest;
|
||||
FRIEND_TEST_ALL_PREFIXES(ThreadGroupImplBlockingTest,
|
||||
ThreadBlockUnblockPremature);
|
||||
|
||||
// ThreadGroup:
|
||||
void UpdateSortKey(TaskSource::Transaction transaction) override;
|
||||
void PushTaskSourceAndWakeUpWorkers(
|
||||
TransactionWithRegisteredTaskSource transaction_with_task_source)
|
||||
override;
|
||||
void EnsureEnoughWorkersLockRequired(BaseScopedCommandsExecutor* executor)
|
||||
override EXCLUSIVE_LOCKS_REQUIRED(lock_);
|
||||
|
||||
// Creates a worker and schedules its start, if needed, to maintain one idle
|
||||
// worker, |max_tasks_| permitting.
|
||||
void MaintainAtLeastOneIdleWorkerLockRequired(
|
||||
ScopedCommandsExecutor* executor) EXCLUSIVE_LOCKS_REQUIRED(lock_);
|
||||
|
||||
// Returns true if worker cleanup is permitted.
|
||||
bool CanWorkerCleanupForTestingLockRequired() EXCLUSIVE_LOCKS_REQUIRED(lock_);
|
||||
|
||||
// Creates a worker, adds it to the thread group, schedules its start and
|
||||
// returns it. Cannot be called before Start().
|
||||
scoped_refptr<WorkerThread> CreateAndRegisterWorkerLockRequired(
|
||||
ScopedCommandsExecutor* executor) EXCLUSIVE_LOCKS_REQUIRED(lock_);
|
||||
|
||||
// Returns the number of workers that are awake (i.e. not on the idle stack).
|
||||
size_t GetNumAwakeWorkersLockRequired() const EXCLUSIVE_LOCKS_REQUIRED(lock_);
|
||||
|
||||
// Returns the desired number of awake workers, given current workload and
|
||||
// concurrency limits.
|
||||
size_t GetDesiredNumAwakeWorkersLockRequired() const
|
||||
EXCLUSIVE_LOCKS_REQUIRED(lock_);
|
||||
|
||||
// Examines the list of WorkerThreads and increments |max_tasks_| for each
|
||||
// worker that has been within the scope of a MAY_BLOCK ScopedBlockingCall for
|
||||
// more than BlockedThreshold(). Reschedules a call if necessary.
|
||||
void AdjustMaxTasks();
|
||||
|
||||
// Returns the threshold after which the max tasks is increased to compensate
|
||||
// for a worker that is within a MAY_BLOCK ScopedBlockingCall.
|
||||
TimeDelta may_block_threshold_for_testing() const {
|
||||
return after_start().may_block_threshold;
|
||||
}
|
||||
|
||||
// Interval at which the service thread checks for workers in this thread
|
||||
// group that have been in a MAY_BLOCK ScopedBlockingCall for more than
|
||||
// may_block_threshold().
|
||||
TimeDelta blocked_workers_poll_period_for_testing() const {
|
||||
return after_start().blocked_workers_poll_period;
|
||||
}
|
||||
|
||||
// Starts calling AdjustMaxTasks() periodically on
|
||||
// |service_thread_task_runner_|.
|
||||
void ScheduleAdjustMaxTasks();
|
||||
|
||||
// Schedules AdjustMaxTasks() through |executor| if required.
|
||||
void MaybeScheduleAdjustMaxTasksLockRequired(ScopedCommandsExecutor* executor)
|
||||
EXCLUSIVE_LOCKS_REQUIRED(lock_);
|
||||
|
||||
// Returns true if AdjustMaxTasks() should periodically be called on
|
||||
// |service_thread_task_runner_|.
|
||||
bool ShouldPeriodicallyAdjustMaxTasksLockRequired()
|
||||
EXCLUSIVE_LOCKS_REQUIRED(lock_);
|
||||
|
||||
// Updates the minimum priority allowed to run below which tasks should yield.
|
||||
// This should be called whenever |num_running_tasks_| or |max_tasks| changes,
|
||||
// or when a new task is added to |priority_queue_|.
|
||||
void UpdateMinAllowedPriorityLockRequired() EXCLUSIVE_LOCKS_REQUIRED(lock_);
|
||||
|
||||
// Increments/decrements the number of tasks of |priority| that are currently
|
||||
// running in this thread group. Must be invoked before/after running a task.
|
||||
void DecrementTasksRunningLockRequired(TaskPriority priority)
|
||||
EXCLUSIVE_LOCKS_REQUIRED(lock_);
|
||||
void IncrementTasksRunningLockRequired(TaskPriority priority)
|
||||
EXCLUSIVE_LOCKS_REQUIRED(lock_);
|
||||
|
||||
// Increments/decrements the number of tasks that can run in this thread
|
||||
// group. May only be called in a scope where a task is running with
|
||||
// |priority|.
|
||||
void DecrementMaxTasksLockRequired(TaskPriority priority)
|
||||
EXCLUSIVE_LOCKS_REQUIRED(lock_);
|
||||
void IncrementMaxTasksLockRequired(TaskPriority priority)
|
||||
EXCLUSIVE_LOCKS_REQUIRED(lock_);
|
||||
|
||||
// Values set at Start() and never modified afterwards.
|
||||
struct InitializedInStart {
|
||||
InitializedInStart();
|
||||
~InitializedInStart();
|
||||
|
||||
#if DCHECK_IS_ON()
|
||||
// Set after all members of this struct are set.
|
||||
bool initialized = false;
|
||||
#endif
|
||||
|
||||
// Initial value of |max_tasks_|.
|
||||
size_t initial_max_tasks = 0;
|
||||
|
||||
// Suggested reclaim time for workers.
|
||||
TimeDelta suggested_reclaim_time;
|
||||
|
||||
// Environment to be initialized per worker.
|
||||
WorkerEnvironment worker_environment = WorkerEnvironment::NONE;
|
||||
|
||||
scoped_refptr<SequencedTaskRunner> service_thread_task_runner;
|
||||
|
||||
// Optional observer notified when a worker enters and exits its main.
|
||||
WorkerThreadObserver* worker_thread_observer = nullptr;
|
||||
|
||||
bool may_block_without_delay;
|
||||
bool fixed_max_best_effort_tasks;
|
||||
|
||||
// Threshold after which the max tasks is increased to compensate for a
|
||||
// worker that is within a MAY_BLOCK ScopedBlockingCall.
|
||||
TimeDelta may_block_threshold;
|
||||
|
||||
// The period between calls to AdjustMaxTasks() when the thread group is at
|
||||
// capacity.
|
||||
TimeDelta blocked_workers_poll_period;
|
||||
} initialized_in_start_;
|
||||
|
||||
InitializedInStart& in_start() {
|
||||
#if DCHECK_IS_ON()
|
||||
DCHECK(!initialized_in_start_.initialized);
|
||||
#endif
|
||||
return initialized_in_start_;
|
||||
}
|
||||
const InitializedInStart& after_start() const {
|
||||
#if DCHECK_IS_ON()
|
||||
DCHECK(initialized_in_start_.initialized);
|
||||
#endif
|
||||
return initialized_in_start_;
|
||||
}
|
||||
|
||||
const std::string thread_group_label_;
|
||||
const ThreadPriority priority_hint_;
|
||||
|
||||
// All workers owned by this thread group.
|
||||
std::vector<scoped_refptr<WorkerThread>> workers_ GUARDED_BY(lock_);
|
||||
|
||||
// Maximum number of tasks of any priority / BEST_EFFORT priority that can run
|
||||
// concurrently in this thread group.
|
||||
size_t max_tasks_ GUARDED_BY(lock_) = 0;
|
||||
size_t max_best_effort_tasks_ GUARDED_BY(lock_) = 0;
|
||||
|
||||
// Number of tasks of any priority / BEST_EFFORT priority that are currently
|
||||
// running in this thread group.
|
||||
size_t num_running_tasks_ GUARDED_BY(lock_) = 0;
|
||||
size_t num_running_best_effort_tasks_ GUARDED_BY(lock_) = 0;
|
||||
|
||||
// Number of workers running a task of any priority / BEST_EFFORT priority
|
||||
// that are within the scope of a MAY_BLOCK ScopedBlockingCall but haven't
|
||||
// caused a max tasks increase yet.
|
||||
int num_unresolved_may_block_ GUARDED_BY(lock_) = 0;
|
||||
int num_unresolved_best_effort_may_block_ GUARDED_BY(lock_) = 0;
|
||||
|
||||
// Stack of idle workers. Initially, all workers are on this stack. A worker
|
||||
// is removed from the stack before its WakeUp() function is called and when
|
||||
// it receives work from GetWork() (a worker calls GetWork() when its sleep
|
||||
// timeout expires, even if its WakeUp() method hasn't been called). A worker
|
||||
// is pushed on this stack when it receives nullptr from GetWork().
|
||||
WorkerThreadStack idle_workers_stack_ GUARDED_BY(lock_);
|
||||
|
||||
// Signaled when a worker is added to the idle workers stack.
|
||||
std::unique_ptr<ConditionVariable> idle_workers_stack_cv_for_testing_
|
||||
GUARDED_BY(lock_);
|
||||
|
||||
// Stack that contains the timestamps of when workers get cleaned up.
|
||||
// Timestamps get popped off the stack as new workers are added.
|
||||
base::stack<TimeTicks, std::vector<TimeTicks>> cleanup_timestamps_
|
||||
GUARDED_BY(lock_);
|
||||
|
||||
// Whether an AdjustMaxTasks() task was posted to the service thread.
|
||||
bool adjust_max_tasks_posted_ GUARDED_BY(lock_) = false;
|
||||
|
||||
// Indicates to the delegates that workers are not permitted to cleanup.
|
||||
bool worker_cleanup_disallowed_for_testing_ GUARDED_BY(lock_) = false;
|
||||
|
||||
// Counts the number of workers cleaned up (went through
|
||||
// WorkerThreadDelegateImpl::OnMainExit()) since the last call to
|
||||
// WaitForWorkersCleanedUpForTesting() (or Start() if that wasn't called yet).
|
||||
// |some_workers_cleaned_up_for_testing_| is true if this was ever
|
||||
// incremented. Tests with a custom |suggested_reclaim_time_| can wait on a
|
||||
// specific number of workers being cleaned up via
|
||||
// WaitForWorkersCleanedUpForTesting().
|
||||
size_t num_workers_cleaned_up_for_testing_ GUARDED_BY(lock_) = 0;
|
||||
#if DCHECK_IS_ON()
|
||||
bool some_workers_cleaned_up_for_testing_ GUARDED_BY(lock_) = false;
|
||||
#endif
|
||||
|
||||
// Signaled, if non-null, when |num_workers_cleaned_up_for_testing_| is
|
||||
// incremented.
|
||||
std::unique_ptr<ConditionVariable> num_workers_cleaned_up_for_testing_cv_
|
||||
GUARDED_BY(lock_);
|
||||
|
||||
// Set at the start of JoinForTesting().
|
||||
bool join_for_testing_started_ GUARDED_BY(lock_) = false;
|
||||
|
||||
// Cached HistogramBase pointers, can be accessed without
|
||||
// holding |lock_|. If |lock_| is held, add new samples using
|
||||
// ThreadGroupImpl::ScopedCommandsExecutor (increase
|
||||
// |scheduled_histogram_samples_| size as needed) to defer until after |lock_|
|
||||
// release, due to metrics system callbacks which may schedule tasks.
|
||||
|
||||
// ThreadPool.DetachDuration.[thread group name] histogram. Intentionally
|
||||
// leaked.
|
||||
HistogramBase* const detach_duration_histogram_;
|
||||
|
||||
// ThreadPool.NumTasksBeforeDetach.[thread group name] histogram.
|
||||
// Intentionally leaked.
|
||||
HistogramBase* const num_tasks_before_detach_histogram_;
|
||||
|
||||
// ThreadPool.NumWorkers.[thread group name] histogram.
|
||||
// Intentionally leaked.
|
||||
HistogramBase* const num_workers_histogram_;
|
||||
|
||||
// ThreadPool.NumActiveWorkers.[thread group name] histogram.
|
||||
// Intentionally leaked.
|
||||
HistogramBase* const num_active_workers_histogram_;
|
||||
|
||||
// Ensures recently cleaned up workers (ref.
|
||||
// WorkerThreadDelegateImpl::CleanupLockRequired()) had time to exit as
|
||||
// they have a raw reference to |this| (and to TaskTracker) which can
|
||||
// otherwise result in racy use-after-frees per no longer being part of
|
||||
// |workers_| and hence not being explicitly joined in JoinForTesting():
|
||||
// https://crbug.com/810464. Uses AtomicRefCount to make its only public
|
||||
// method thread-safe.
|
||||
TrackedRefFactory<ThreadGroupImpl> tracked_ref_factory_;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(ThreadGroupImpl);
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
} // namespace base
|
||||
|
||||
#endif // BASE_TASK_THREAD_POOL_THREAD_GROUP_IMPL_H_
|
||||
|
|
@ -0,0 +1,185 @@
|
|||
// Copyright 2019 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "base/task/thread_pool/thread_group_native.h"
|
||||
|
||||
#include <algorithm>
|
||||
#include <utility>
|
||||
|
||||
#include "base/system/sys_info.h"
|
||||
#include "base/task/thread_pool/task_tracker.h"
|
||||
|
||||
namespace base {
|
||||
namespace internal {
|
||||
|
||||
class ThreadGroupNative::ScopedCommandsExecutor
|
||||
: public ThreadGroup::BaseScopedCommandsExecutor {
|
||||
public:
|
||||
ScopedCommandsExecutor(ThreadGroupNative* outer) : outer_(outer) {}
|
||||
~ScopedCommandsExecutor() {
|
||||
CheckedLock::AssertNoLockHeldOnCurrentThread();
|
||||
|
||||
for (size_t i = 0; i < num_threadpool_work_to_submit_; ++i)
|
||||
outer_->SubmitWork();
|
||||
}
|
||||
|
||||
// Sets the number of threadpool work to submit upon destruction.
|
||||
void set_num_threadpool_work_to_submit(size_t num) {
|
||||
DCHECK_EQ(num_threadpool_work_to_submit_, 0U);
|
||||
num_threadpool_work_to_submit_ = num;
|
||||
}
|
||||
|
||||
private:
|
||||
ThreadGroupNative* const outer_;
|
||||
size_t num_threadpool_work_to_submit_ = 0;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(ScopedCommandsExecutor);
|
||||
};
|
||||
|
||||
ThreadGroupNative::ThreadGroupNative(TrackedRef<TaskTracker> task_tracker,
|
||||
TrackedRef<Delegate> delegate,
|
||||
ThreadGroup* predecessor_thread_group)
|
||||
: ThreadGroup(std::move(task_tracker),
|
||||
std::move(delegate),
|
||||
predecessor_thread_group) {}
|
||||
|
||||
ThreadGroupNative::~ThreadGroupNative() {
|
||||
#if DCHECK_IS_ON()
|
||||
// Verify join_for_testing has been called to ensure that there is no more
|
||||
// outstanding work. Otherwise, work may try to de-reference an invalid
|
||||
// pointer to this class.
|
||||
DCHECK(join_for_testing_returned_);
|
||||
#endif
|
||||
}
|
||||
|
||||
void ThreadGroupNative::Start(WorkerEnvironment worker_environment) {
|
||||
worker_environment_ = worker_environment;
|
||||
|
||||
StartImpl();
|
||||
|
||||
ScopedCommandsExecutor executor(this);
|
||||
CheckedAutoLock auto_lock(lock_);
|
||||
DCHECK(!started_);
|
||||
started_ = true;
|
||||
EnsureEnoughWorkersLockRequired(&executor);
|
||||
}
|
||||
|
||||
void ThreadGroupNative::JoinForTesting() {
|
||||
{
|
||||
CheckedAutoLock auto_lock(lock_);
|
||||
priority_queue_.EnableFlushTaskSourcesOnDestroyForTesting();
|
||||
}
|
||||
|
||||
JoinImpl();
|
||||
#if DCHECK_IS_ON()
|
||||
DCHECK(!join_for_testing_returned_);
|
||||
join_for_testing_returned_ = true;
|
||||
#endif
|
||||
}
|
||||
|
||||
void ThreadGroupNative::RunNextTaskSourceImpl() {
|
||||
RegisteredTaskSource task_source = GetWork();
|
||||
|
||||
if (task_source) {
|
||||
BindToCurrentThread();
|
||||
task_source = task_tracker_->RunAndPopNextTask(std::move(task_source));
|
||||
UnbindFromCurrentThread();
|
||||
|
||||
if (task_source) {
|
||||
ScopedCommandsExecutor workers_executor(this);
|
||||
ScopedReenqueueExecutor reenqueue_executor;
|
||||
auto transaction_with_task_source =
|
||||
TransactionWithRegisteredTaskSource::FromTaskSource(
|
||||
std::move(task_source));
|
||||
CheckedAutoLock auto_lock(lock_);
|
||||
ReEnqueueTaskSourceLockRequired(&workers_executor, &reenqueue_executor,
|
||||
std::move(transaction_with_task_source));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void ThreadGroupNative::UpdateMinAllowedPriorityLockRequired() {
|
||||
// Tasks should yield as soon as there is work of higher priority in
|
||||
// |priority_queue_|.
|
||||
min_allowed_priority_.store(priority_queue_.IsEmpty()
|
||||
? TaskPriority::BEST_EFFORT
|
||||
: priority_queue_.PeekSortKey().priority(),
|
||||
std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
RegisteredTaskSource ThreadGroupNative::GetWork() {
|
||||
ScopedCommandsExecutor workers_executor(this);
|
||||
CheckedAutoLock auto_lock(lock_);
|
||||
DCHECK_GT(num_pending_threadpool_work_, 0U);
|
||||
--num_pending_threadpool_work_;
|
||||
|
||||
RegisteredTaskSource task_source;
|
||||
TaskPriority priority;
|
||||
while (!task_source && !priority_queue_.IsEmpty()) {
|
||||
priority = priority_queue_.PeekSortKey().priority();
|
||||
// Enforce the CanRunPolicy.
|
||||
if (!task_tracker_->CanRunPriority(priority))
|
||||
return nullptr;
|
||||
|
||||
task_source = TakeRegisteredTaskSource(&workers_executor);
|
||||
}
|
||||
UpdateMinAllowedPriorityLockRequired();
|
||||
return task_source;
|
||||
}
|
||||
|
||||
void ThreadGroupNative::UpdateSortKey(TaskSource::Transaction transaction) {
|
||||
ScopedCommandsExecutor executor(this);
|
||||
UpdateSortKeyImpl(&executor, std::move(transaction));
|
||||
}
|
||||
|
||||
void ThreadGroupNative::PushTaskSourceAndWakeUpWorkers(
|
||||
TransactionWithRegisteredTaskSource transaction_with_task_source) {
|
||||
ScopedCommandsExecutor executor(this);
|
||||
PushTaskSourceAndWakeUpWorkersImpl(&executor,
|
||||
std::move(transaction_with_task_source));
|
||||
}
|
||||
|
||||
void ThreadGroupNative::EnsureEnoughWorkersLockRequired(
|
||||
BaseScopedCommandsExecutor* executor) {
|
||||
if (!started_)
|
||||
return;
|
||||
// Ensure that there is at least one pending threadpool work per TaskSource in
|
||||
// the PriorityQueue.
|
||||
const size_t desired_num_pending_threadpool_work =
|
||||
GetNumAdditionalWorkersForBestEffortTaskSourcesLockRequired() +
|
||||
GetNumAdditionalWorkersForForegroundTaskSourcesLockRequired();
|
||||
|
||||
if (desired_num_pending_threadpool_work > num_pending_threadpool_work_) {
|
||||
static_cast<ScopedCommandsExecutor*>(executor)
|
||||
->set_num_threadpool_work_to_submit(
|
||||
desired_num_pending_threadpool_work - num_pending_threadpool_work_);
|
||||
num_pending_threadpool_work_ = desired_num_pending_threadpool_work;
|
||||
}
|
||||
// This function is called every time a task source is queued or re-enqueued,
|
||||
// hence the minimum priority needs to be updated.
|
||||
UpdateMinAllowedPriorityLockRequired();
|
||||
}
|
||||
|
||||
size_t ThreadGroupNative::GetMaxConcurrentNonBlockedTasksDeprecated() const {
|
||||
// Native thread pools give us no control over the number of workers that are
|
||||
// active at one time. Consequently, we cannot report a true value here.
|
||||
// Instead, the values were chosen to match
|
||||
// ThreadPoolInstance::StartWithDefaultParams.
|
||||
const int num_cores = SysInfo::NumberOfProcessors();
|
||||
return std::max(3, num_cores - 1);
|
||||
}
|
||||
|
||||
void ThreadGroupNative::ReportHeartbeatMetrics() const {
|
||||
// Native thread pools do not provide the capability to determine the
|
||||
// number of worker threads created.
|
||||
}
|
||||
|
||||
void ThreadGroupNative::DidUpdateCanRunPolicy() {
|
||||
ScopedCommandsExecutor executor(this);
|
||||
CheckedAutoLock auto_lock(lock_);
|
||||
EnsureEnoughWorkersLockRequired(&executor);
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace base
|
||||
|
|
@ -0,0 +1,84 @@
|
|||
// Copyright 2019 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_TASK_THREAD_POOL_THREAD_GROUP_NATIVE_H_
|
||||
#define BASE_TASK_THREAD_POOL_THREAD_GROUP_NATIVE_H_
|
||||
|
||||
#include "base/base_export.h"
|
||||
#include "base/synchronization/atomic_flag.h"
|
||||
#include "base/task/thread_pool/thread_group.h"
|
||||
|
||||
namespace base {
|
||||
namespace internal {
|
||||
|
||||
class BASE_EXPORT ThreadGroupNative : public ThreadGroup {
|
||||
public:
|
||||
// Destroying a ThreadGroupNative is not allowed in
|
||||
// production; it is always leaked. In tests, it can only be destroyed after
|
||||
// JoinForTesting() has returned.
|
||||
~ThreadGroupNative() override;
|
||||
|
||||
// Starts the thread group and allows tasks to begin running.
|
||||
void Start(WorkerEnvironment worker_environment = WorkerEnvironment::NONE);
|
||||
|
||||
// ThreadGroup:
|
||||
void JoinForTesting() override;
|
||||
size_t GetMaxConcurrentNonBlockedTasksDeprecated() const override;
|
||||
void ReportHeartbeatMetrics() const override;
|
||||
void DidUpdateCanRunPolicy() override;
|
||||
|
||||
protected:
|
||||
ThreadGroupNative(TrackedRef<TaskTracker> task_tracker,
|
||||
TrackedRef<Delegate> delegate,
|
||||
ThreadGroup* predecessor_thread_group);
|
||||
|
||||
// Runs a task off the next task source on the |priority_queue_|. Called by
|
||||
// callbacks posted to platform native thread pools.
|
||||
void RunNextTaskSourceImpl();
|
||||
|
||||
virtual void JoinImpl() = 0;
|
||||
virtual void StartImpl() = 0;
|
||||
virtual void SubmitWork() = 0;
|
||||
|
||||
// Used to control the worker environment. Supports COM MTA on Windows.
|
||||
WorkerEnvironment worker_environment_ = WorkerEnvironment::NONE;
|
||||
|
||||
private:
|
||||
class ScopedCommandsExecutor;
|
||||
|
||||
// ThreadGroup:
|
||||
void UpdateSortKey(TaskSource::Transaction transaction) override;
|
||||
void PushTaskSourceAndWakeUpWorkers(
|
||||
TransactionWithRegisteredTaskSource transaction_with_task_source)
|
||||
override;
|
||||
void EnsureEnoughWorkersLockRequired(BaseScopedCommandsExecutor* executor)
|
||||
override EXCLUSIVE_LOCKS_REQUIRED(lock_);
|
||||
|
||||
// Updates the minimum priority allowed to run below which tasks should yield,
|
||||
// based on task sources in |priority_queue_|.
|
||||
void UpdateMinAllowedPriorityLockRequired() EXCLUSIVE_LOCKS_REQUIRED(lock_);
|
||||
|
||||
// Returns the top TaskSource off the |priority_queue_|. Returns nullptr
|
||||
// if the |priority_queue_| is empty.
|
||||
RegisteredTaskSource GetWork();
|
||||
|
||||
// Indicates whether the thread group has been started yet.
|
||||
bool started_ GUARDED_BY(lock_) = false;
|
||||
|
||||
// Number of threadpool work submitted to the thread group which haven't
|
||||
// popped a TaskSource from the PriorityQueue yet.
|
||||
size_t num_pending_threadpool_work_ GUARDED_BY(lock_) = 0;
|
||||
|
||||
#if DCHECK_IS_ON()
|
||||
// Set once JoinForTesting() has returned.
|
||||
bool join_for_testing_returned_ = false;
|
||||
#endif
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(ThreadGroupNative);
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
} // namespace base
|
||||
|
||||
#endif // BASE_TASK_THREAD_POOL_THREAD_GROUP_NATIVE_H_
|
||||
|
|
@ -0,0 +1,54 @@
|
|||
// Copyright 2019 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_TASK_THREAD_POOL_THREAD_GROUP_NATIVE_MAC_H_
|
||||
#define BASE_TASK_THREAD_POOL_THREAD_GROUP_NATIVE_MAC_H_
|
||||
|
||||
#include <dispatch/dispatch.h>
|
||||
|
||||
#include "base/base_export.h"
|
||||
#include "base/mac/scoped_dispatch_object.h"
|
||||
#include "base/task/thread_pool/thread_group_native.h"
|
||||
|
||||
namespace base {
|
||||
namespace internal {
|
||||
|
||||
// A ThreadGroup implementation backed by libdispatch.
|
||||
//
|
||||
// libdispatch official documentation:
|
||||
// https://developer.apple.com/documentation/dispatch
|
||||
//
|
||||
// Guides:
|
||||
// https://apple.github.io/swift-corelibs-libdispatch/tutorial/
|
||||
// https://developer.apple.com/library/archive/documentation/General/Conceptual/ConcurrencyProgrammingGuide/OperationQueues/OperationQueues.html
|
||||
class BASE_EXPORT ThreadGroupNativeMac : public ThreadGroupNative {
|
||||
public:
|
||||
ThreadGroupNativeMac(TrackedRef<TaskTracker> task_tracker,
|
||||
TrackedRef<Delegate> delegate,
|
||||
ThreadGroup* predecessor_thread_group = nullptr);
|
||||
|
||||
~ThreadGroupNativeMac() override;
|
||||
|
||||
private:
|
||||
// ThreadGroupNative:
|
||||
void JoinImpl() override;
|
||||
void StartImpl() override;
|
||||
void SubmitWork() override;
|
||||
|
||||
// Dispatch queue on which work is scheduled. Backed by a shared thread pool
|
||||
// managed by libdispatch.
|
||||
ScopedDispatchObject<dispatch_queue_t> queue_;
|
||||
|
||||
// Dispatch group to enable synchronization.
|
||||
ScopedDispatchObject<dispatch_group_t> group_;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(ThreadGroupNativeMac);
|
||||
};
|
||||
|
||||
using ThreadGroupNativeImpl = ThreadGroupNativeMac;
|
||||
|
||||
} // namespace internal
|
||||
} // namespace base
|
||||
|
||||
#endif // BASE_TASK_THREAD_POOL_THREAD_GROUP_NATIVE_MAC_H_
|
||||
|
|
@ -0,0 +1,41 @@
|
|||
// Copyright 2019 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "base/task/thread_pool/thread_group_native_mac.h"
|
||||
|
||||
#include "base/task/thread_pool/task_tracker.h"
|
||||
|
||||
namespace base {
|
||||
namespace internal {
|
||||
|
||||
ThreadGroupNativeMac::ThreadGroupNativeMac(
|
||||
TrackedRef<TaskTracker> task_tracker,
|
||||
TrackedRef<Delegate> delegate,
|
||||
ThreadGroup* predecessor_thread_group)
|
||||
: ThreadGroupNative(std::move(task_tracker),
|
||||
std::move(delegate),
|
||||
predecessor_thread_group) {}
|
||||
|
||||
ThreadGroupNativeMac::~ThreadGroupNativeMac() {}
|
||||
|
||||
void ThreadGroupNativeMac::StartImpl() {
|
||||
queue_.reset(dispatch_queue_create("org.chromium.base.ThreadPool.ThreadGroup",
|
||||
DISPATCH_QUEUE_CONCURRENT));
|
||||
group_.reset(dispatch_group_create());
|
||||
}
|
||||
|
||||
void ThreadGroupNativeMac::JoinImpl() {
|
||||
dispatch_group_wait(group_, DISPATCH_TIME_FOREVER);
|
||||
}
|
||||
|
||||
void ThreadGroupNativeMac::SubmitWork() {
|
||||
// TODO(adityakeerthi): Handle priorities by having multiple dispatch queues
|
||||
// with different qualities-of-service.
|
||||
dispatch_group_async(group_, queue_, ^{
|
||||
RunNextTaskSourceImpl();
|
||||
});
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace base
|
||||
|
|
@ -0,0 +1,101 @@
|
|||
// Copyright 2017 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "base/task/thread_pool/thread_group_native_win.h"
|
||||
|
||||
#include "base/optional.h"
|
||||
#include "base/task/thread_pool/task_tracker.h"
|
||||
#include "base/threading/scoped_blocking_call_internal.h"
|
||||
#include "base/win/scoped_com_initializer.h"
|
||||
|
||||
namespace base {
|
||||
namespace internal {
|
||||
|
||||
class ThreadGroupNativeWin::ScopedCallbackMayRunLongObserver
|
||||
: public BlockingObserver {
|
||||
public:
|
||||
ScopedCallbackMayRunLongObserver(PTP_CALLBACK_INSTANCE callback)
|
||||
: callback_(callback) {
|
||||
SetBlockingObserverForCurrentThread(this);
|
||||
}
|
||||
|
||||
~ScopedCallbackMayRunLongObserver() override {
|
||||
ClearBlockingObserverForCurrentThread();
|
||||
}
|
||||
|
||||
// BlockingObserver:
|
||||
void BlockingStarted(BlockingType blocking_type) override {
|
||||
::CallbackMayRunLong(callback_);
|
||||
// CallbackMayRunLong should not be called twice.
|
||||
ClearBlockingObserverForCurrentThread();
|
||||
}
|
||||
|
||||
void BlockingTypeUpgraded() override {}
|
||||
void BlockingEnded() override {}
|
||||
|
||||
private:
|
||||
PTP_CALLBACK_INSTANCE callback_;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(ScopedCallbackMayRunLongObserver);
|
||||
};
|
||||
|
||||
ThreadGroupNativeWin::ThreadGroupNativeWin(
|
||||
TrackedRef<TaskTracker> task_tracker,
|
||||
TrackedRef<Delegate> delegate,
|
||||
ThreadGroup* predecessor_thread_group)
|
||||
: ThreadGroupNative(std::move(task_tracker),
|
||||
std::move(delegate),
|
||||
predecessor_thread_group) {}
|
||||
|
||||
ThreadGroupNativeWin::~ThreadGroupNativeWin() {
|
||||
::DestroyThreadpoolEnvironment(&environment_);
|
||||
::CloseThreadpoolWork(work_);
|
||||
::CloseThreadpool(pool_);
|
||||
}
|
||||
|
||||
void ThreadGroupNativeWin::StartImpl() {
|
||||
::InitializeThreadpoolEnvironment(&environment_);
|
||||
|
||||
pool_ = ::CreateThreadpool(nullptr);
|
||||
DCHECK(pool_) << "LastError: " << ::GetLastError();
|
||||
::SetThreadpoolThreadMinimum(pool_, 1);
|
||||
::SetThreadpoolThreadMaximum(pool_, 256);
|
||||
|
||||
work_ = ::CreateThreadpoolWork(&RunNextTaskSource, this, &environment_);
|
||||
DCHECK(work_) << "LastError: " << GetLastError();
|
||||
::SetThreadpoolCallbackPool(&environment_, pool_);
|
||||
}
|
||||
|
||||
void ThreadGroupNativeWin::JoinImpl() {
|
||||
::WaitForThreadpoolWorkCallbacks(work_, true);
|
||||
}
|
||||
|
||||
void ThreadGroupNativeWin::SubmitWork() {
|
||||
// TODO(fdoray): Handle priorities by having different work objects and using
|
||||
// SetThreadpoolCallbackPriority().
|
||||
::SubmitThreadpoolWork(work_);
|
||||
}
|
||||
|
||||
// static
|
||||
void CALLBACK
|
||||
ThreadGroupNativeWin::RunNextTaskSource(PTP_CALLBACK_INSTANCE callback_instance,
|
||||
void* thread_group_windows_impl,
|
||||
PTP_WORK) {
|
||||
auto* thread_group =
|
||||
static_cast<ThreadGroupNativeWin*>(thread_group_windows_impl);
|
||||
|
||||
// Windows Thread Pool API best practices state that all resources created
|
||||
// in the callback function should be cleaned up before returning from the
|
||||
// function. This includes COM initialization.
|
||||
auto win_thread_environment = thread_group->GetScopedWindowsThreadEnvironment(
|
||||
thread_group->worker_environment_);
|
||||
|
||||
ScopedCallbackMayRunLongObserver callback_may_run_long_observer(
|
||||
callback_instance);
|
||||
|
||||
thread_group->RunNextTaskSourceImpl();
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace base
|
||||
|
|
@ -0,0 +1,69 @@
|
|||
// Copyright 2017 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_TASK_THREAD_POOL_THREAD_GROUP_NATIVE_WIN_H_
|
||||
#define BASE_TASK_THREAD_POOL_THREAD_GROUP_NATIVE_WIN_H_
|
||||
|
||||
#include <windows.h>
|
||||
|
||||
#include "base/base_export.h"
|
||||
#include "base/task/thread_pool/thread_group_native.h"
|
||||
|
||||
namespace base {
|
||||
namespace internal {
|
||||
|
||||
// A ThreadGroup implementation backed by the Windows Thread Pool API.
|
||||
//
|
||||
// Windows Thread Pool API official documentation:
|
||||
// https://msdn.microsoft.com/en-us/library/windows/desktop/ms686766(v=vs.85).aspx
|
||||
//
|
||||
// Blog posts on the Windows Thread Pool API:
|
||||
// https://msdn.microsoft.com/magazine/hh335066.aspx
|
||||
// https://msdn.microsoft.com/magazine/hh394144.aspx
|
||||
// https://msdn.microsoft.com/magazine/hh456398.aspx
|
||||
// https://msdn.microsoft.com/magazine/hh547107.aspx
|
||||
// https://msdn.microsoft.com/magazine/hh580731.aspx
|
||||
class BASE_EXPORT ThreadGroupNativeWin : public ThreadGroupNative {
|
||||
public:
|
||||
ThreadGroupNativeWin(TrackedRef<TaskTracker> task_tracker,
|
||||
TrackedRef<Delegate> delegate,
|
||||
ThreadGroup* predecessor_thread_group = nullptr);
|
||||
|
||||
~ThreadGroupNativeWin() override;
|
||||
|
||||
private:
|
||||
class ScopedCallbackMayRunLongObserver;
|
||||
|
||||
// Callback that gets run by |pool_|.
|
||||
static void CALLBACK
|
||||
RunNextTaskSource(PTP_CALLBACK_INSTANCE callback_instance,
|
||||
void* thread_group_windows_impl,
|
||||
PTP_WORK);
|
||||
|
||||
// ThreadGroupNative:
|
||||
void JoinImpl() override;
|
||||
void StartImpl() override;
|
||||
void SubmitWork() override;
|
||||
|
||||
// Thread pool object that |work_| gets executed on.
|
||||
PTP_POOL pool_ = nullptr;
|
||||
|
||||
// Callback environment. |pool_| is associated with |environment_| so that
|
||||
// work objects using this environment run on |pool_|.
|
||||
TP_CALLBACK_ENVIRON environment_ = {};
|
||||
|
||||
// Work object that executes RunNextTaskSource. It has a pointer to the
|
||||
// current |ThreadGroupNativeWin| and a pointer to |environment_| bound
|
||||
// to it.
|
||||
PTP_WORK work_ = nullptr;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(ThreadGroupNativeWin);
|
||||
};
|
||||
|
||||
using ThreadGroupNativeImpl = ThreadGroupNativeWin;
|
||||
|
||||
} // namespace internal
|
||||
} // namespace base
|
||||
|
||||
#endif // BASE_TASK_THREAD_POOL_THREAD_GROUP_NATIVE_WIN_H_
|
||||
|
|
@ -0,0 +1,542 @@
|
|||
// Copyright 2016 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "base/task/thread_pool/thread_pool_impl.h"
|
||||
|
||||
#include <algorithm>
|
||||
#include <string>
|
||||
#include <utility>
|
||||
|
||||
#include "base/base_switches.h"
|
||||
#include "base/bind.h"
|
||||
#include "base/bind_helpers.h"
|
||||
#include "base/command_line.h"
|
||||
#include "base/compiler_specific.h"
|
||||
#include "base/feature_list.h"
|
||||
#include "base/message_loop/message_pump_type.h"
|
||||
#include "base/metrics/field_trial_params.h"
|
||||
#include "base/no_destructor.h"
|
||||
#include "base/stl_util.h"
|
||||
#include "base/strings/string_util.h"
|
||||
#include "base/task/scoped_set_task_priority_for_current_thread.h"
|
||||
#include "base/task/task_features.h"
|
||||
#include "base/task/thread_pool/pooled_parallel_task_runner.h"
|
||||
#include "base/task/thread_pool/pooled_sequenced_task_runner.h"
|
||||
#include "base/task/thread_pool/sequence_sort_key.h"
|
||||
#include "base/task/thread_pool/service_thread.h"
|
||||
#include "base/task/thread_pool/task.h"
|
||||
#include "base/task/thread_pool/task_source.h"
|
||||
#include "base/task/thread_pool/thread_group_impl.h"
|
||||
#include "base/threading/platform_thread.h"
|
||||
#include "base/time/time.h"
|
||||
|
||||
#if defined(OS_WIN)
|
||||
#include "base/task/thread_pool/thread_group_native_win.h"
|
||||
#endif
|
||||
|
||||
#if defined(OS_MACOSX)
|
||||
#include "base/task/thread_pool/thread_group_native_mac.h"
|
||||
#endif
|
||||
|
||||
namespace base {
|
||||
namespace internal {
|
||||
|
||||
namespace {
|
||||
|
||||
constexpr EnvironmentParams kForegroundPoolEnvironmentParams{
|
||||
"Foreground", base::ThreadPriority::NORMAL};
|
||||
|
||||
constexpr EnvironmentParams kBackgroundPoolEnvironmentParams{
|
||||
"Background", base::ThreadPriority::BACKGROUND};
|
||||
|
||||
constexpr int kMaxBestEffortTasks = 2;
|
||||
|
||||
// Indicates whether BEST_EFFORT tasks are disabled by a command line switch.
|
||||
bool HasDisableBestEffortTasksSwitch() {
|
||||
// The CommandLine might not be initialized if ThreadPool is initialized in a
|
||||
// dynamic library which doesn't have access to argc/argv.
|
||||
return CommandLine::InitializedForCurrentProcess() &&
|
||||
CommandLine::ForCurrentProcess()->HasSwitch(
|
||||
switches::kDisableBestEffortTasks);
|
||||
}
|
||||
|
||||
} // namespace
|
||||
|
||||
ThreadPoolImpl::ThreadPoolImpl(StringPiece histogram_label)
|
||||
: ThreadPoolImpl(histogram_label,
|
||||
std::make_unique<TaskTrackerImpl>(histogram_label)) {}
|
||||
|
||||
ThreadPoolImpl::ThreadPoolImpl(StringPiece histogram_label,
|
||||
std::unique_ptr<TaskTrackerImpl> task_tracker)
|
||||
: task_tracker_(std::move(task_tracker)),
|
||||
service_thread_(std::make_unique<ServiceThread>(
|
||||
task_tracker_.get(),
|
||||
BindRepeating(&ThreadPoolImpl::ReportHeartbeatMetrics,
|
||||
Unretained(this)))),
|
||||
single_thread_task_runner_manager_(task_tracker_->GetTrackedRef(),
|
||||
&delayed_task_manager_),
|
||||
has_disable_best_effort_switch_(HasDisableBestEffortTasksSwitch()),
|
||||
tracked_ref_factory_(this) {
|
||||
foreground_thread_group_ = std::make_unique<ThreadGroupImpl>(
|
||||
histogram_label.empty()
|
||||
? std::string()
|
||||
: JoinString(
|
||||
{histogram_label, kForegroundPoolEnvironmentParams.name_suffix},
|
||||
"."),
|
||||
kForegroundPoolEnvironmentParams.name_suffix,
|
||||
kForegroundPoolEnvironmentParams.priority_hint,
|
||||
task_tracker_->GetTrackedRef(), tracked_ref_factory_.GetTrackedRef());
|
||||
|
||||
if (CanUseBackgroundPriorityForWorkerThread()) {
|
||||
background_thread_group_ = std::make_unique<ThreadGroupImpl>(
|
||||
histogram_label.empty()
|
||||
? std::string()
|
||||
: JoinString({histogram_label,
|
||||
kBackgroundPoolEnvironmentParams.name_suffix},
|
||||
"."),
|
||||
kBackgroundPoolEnvironmentParams.name_suffix,
|
||||
kBackgroundPoolEnvironmentParams.priority_hint,
|
||||
task_tracker_->GetTrackedRef(), tracked_ref_factory_.GetTrackedRef());
|
||||
}
|
||||
}
|
||||
|
||||
ThreadPoolImpl::~ThreadPoolImpl() {
|
||||
#if DCHECK_IS_ON()
|
||||
DCHECK(join_for_testing_returned_.IsSet());
|
||||
#endif
|
||||
|
||||
// Reset thread groups to release held TrackedRefs, which block teardown.
|
||||
foreground_thread_group_.reset();
|
||||
background_thread_group_.reset();
|
||||
}
|
||||
|
||||
void ThreadPoolImpl::Start(const ThreadPoolInstance::InitParams& init_params,
|
||||
WorkerThreadObserver* worker_thread_observer) {
|
||||
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
|
||||
DCHECK(!started_);
|
||||
|
||||
internal::InitializeThreadPrioritiesFeature();
|
||||
|
||||
// The max number of concurrent BEST_EFFORT tasks is |kMaxBestEffortTasks|,
|
||||
// unless the max number of foreground threads is lower.
|
||||
const int max_best_effort_tasks =
|
||||
std::min(kMaxBestEffortTasks, init_params.max_num_foreground_threads);
|
||||
|
||||
// This is set in Start() and not in the constructor because variation params
|
||||
// are usually not ready when ThreadPoolImpl is instantiated in a process.
|
||||
if (FeatureList::IsEnabled(kAllTasksUserBlocking))
|
||||
all_tasks_user_blocking_.Set();
|
||||
|
||||
#if HAS_NATIVE_THREAD_POOL()
|
||||
if (FeatureList::IsEnabled(kUseNativeThreadPool)) {
|
||||
std::unique_ptr<ThreadGroup> pool = std::move(foreground_thread_group_);
|
||||
foreground_thread_group_ = std::make_unique<ThreadGroupNativeImpl>(
|
||||
task_tracker_->GetTrackedRef(), tracked_ref_factory_.GetTrackedRef(),
|
||||
pool.get());
|
||||
pool->InvalidateAndHandoffAllTaskSourcesToOtherThreadGroup(
|
||||
foreground_thread_group_.get());
|
||||
}
|
||||
#endif
|
||||
|
||||
// Start the service thread. On platforms that support it (POSIX except NaCL
|
||||
// SFI), the service thread runs a MessageLoopForIO which is used to support
|
||||
// FileDescriptorWatcher in the scope in which tasks run.
|
||||
ServiceThread::Options service_thread_options;
|
||||
service_thread_options.message_pump_type =
|
||||
#if defined(OS_POSIX) && !defined(OS_NACL_SFI)
|
||||
MessagePumpType::IO;
|
||||
#else
|
||||
MessagePumpType::DEFAULT;
|
||||
#endif
|
||||
service_thread_options.timer_slack = TIMER_SLACK_MAXIMUM;
|
||||
CHECK(service_thread_->StartWithOptions(service_thread_options));
|
||||
|
||||
#if defined(OS_POSIX) && !defined(OS_NACL_SFI)
|
||||
// Needs to happen after starting the service thread to get its
|
||||
// task_runner().
|
||||
task_tracker_->set_io_thread_task_runner(service_thread_->task_runner());
|
||||
#endif // defined(OS_POSIX) && !defined(OS_NACL_SFI)
|
||||
|
||||
// Update the CanRunPolicy based on |has_disable_best_effort_switch_|.
|
||||
UpdateCanRunPolicy();
|
||||
|
||||
// Needs to happen after starting the service thread to get its task_runner().
|
||||
auto service_thread_task_runner = service_thread_->task_runner();
|
||||
delayed_task_manager_.Start(service_thread_task_runner);
|
||||
|
||||
single_thread_task_runner_manager_.Start(worker_thread_observer);
|
||||
|
||||
ThreadGroup::WorkerEnvironment worker_environment;
|
||||
switch (init_params.common_thread_pool_environment) {
|
||||
case InitParams::CommonThreadPoolEnvironment::DEFAULT:
|
||||
worker_environment = ThreadGroup::WorkerEnvironment::NONE;
|
||||
break;
|
||||
#if defined(OS_WIN)
|
||||
case InitParams::CommonThreadPoolEnvironment::COM_MTA:
|
||||
worker_environment = ThreadGroup::WorkerEnvironment::COM_MTA;
|
||||
break;
|
||||
case InitParams::CommonThreadPoolEnvironment::
|
||||
DEPRECATED_COM_STA_IN_FOREGROUND_GROUP:
|
||||
worker_environment = ThreadGroup::WorkerEnvironment::COM_STA;
|
||||
break;
|
||||
#endif
|
||||
}
|
||||
|
||||
const base::TimeDelta suggested_reclaim_time =
|
||||
FeatureList::IsEnabled(kUseFiveMinutesThreadReclaimTime)
|
||||
? base::TimeDelta::FromMinutes(5)
|
||||
: init_params.suggested_reclaim_time;
|
||||
|
||||
#if HAS_NATIVE_THREAD_POOL()
|
||||
if (FeatureList::IsEnabled(kUseNativeThreadPool)) {
|
||||
static_cast<ThreadGroupNative*>(foreground_thread_group_.get())
|
||||
->Start(worker_environment);
|
||||
} else
|
||||
#endif
|
||||
{
|
||||
// On platforms that can't use the background thread priority, best-effort
|
||||
// tasks run in foreground pools. A cap is set on the number of best-effort
|
||||
// tasks that can run in foreground pools to ensure that there is always
|
||||
// room for incoming foreground tasks and to minimize the performance impact
|
||||
// of best-effort tasks.
|
||||
static_cast<ThreadGroupImpl*>(foreground_thread_group_.get())
|
||||
->Start(init_params.max_num_foreground_threads, max_best_effort_tasks,
|
||||
suggested_reclaim_time, service_thread_task_runner,
|
||||
worker_thread_observer, worker_environment);
|
||||
}
|
||||
|
||||
if (background_thread_group_) {
|
||||
background_thread_group_->Start(
|
||||
max_best_effort_tasks, max_best_effort_tasks, suggested_reclaim_time,
|
||||
service_thread_task_runner, worker_thread_observer,
|
||||
#if defined(OS_WIN)
|
||||
// COM STA is a backward-compatibility feature for the foreground thread
|
||||
// group only.
|
||||
worker_environment == ThreadGroup::WorkerEnvironment::COM_STA
|
||||
? ThreadGroup::WorkerEnvironment::NONE
|
||||
:
|
||||
#endif
|
||||
worker_environment);
|
||||
}
|
||||
|
||||
started_ = true;
|
||||
}
|
||||
|
||||
bool ThreadPoolImpl::PostDelayedTask(const Location& from_here,
|
||||
const TaskTraits& traits,
|
||||
OnceClosure task,
|
||||
TimeDelta delay) {
|
||||
// Post |task| as part of a one-off single-task Sequence.
|
||||
const TaskTraits new_traits = VerifyAndAjustIncomingTraits(traits);
|
||||
return PostTaskWithSequence(
|
||||
Task(from_here, std::move(task), delay),
|
||||
MakeRefCounted<Sequence>(new_traits, nullptr,
|
||||
TaskSourceExecutionMode::kParallel));
|
||||
}
|
||||
|
||||
scoped_refptr<TaskRunner> ThreadPoolImpl::CreateTaskRunner(
|
||||
const TaskTraits& traits) {
|
||||
const TaskTraits new_traits = VerifyAndAjustIncomingTraits(traits);
|
||||
return MakeRefCounted<PooledParallelTaskRunner>(new_traits, this);
|
||||
}
|
||||
|
||||
scoped_refptr<SequencedTaskRunner> ThreadPoolImpl::CreateSequencedTaskRunner(
|
||||
const TaskTraits& traits) {
|
||||
const TaskTraits new_traits = VerifyAndAjustIncomingTraits(traits);
|
||||
return MakeRefCounted<PooledSequencedTaskRunner>(new_traits, this);
|
||||
}
|
||||
|
||||
scoped_refptr<SingleThreadTaskRunner>
|
||||
ThreadPoolImpl::CreateSingleThreadTaskRunner(
|
||||
const TaskTraits& traits,
|
||||
SingleThreadTaskRunnerThreadMode thread_mode) {
|
||||
return single_thread_task_runner_manager_.CreateSingleThreadTaskRunner(
|
||||
VerifyAndAjustIncomingTraits(traits), thread_mode);
|
||||
}
|
||||
|
||||
#if defined(OS_WIN)
|
||||
scoped_refptr<SingleThreadTaskRunner> ThreadPoolImpl::CreateCOMSTATaskRunner(
|
||||
const TaskTraits& traits,
|
||||
SingleThreadTaskRunnerThreadMode thread_mode) {
|
||||
return single_thread_task_runner_manager_.CreateCOMSTATaskRunner(
|
||||
VerifyAndAjustIncomingTraits(traits), thread_mode);
|
||||
}
|
||||
#endif // defined(OS_WIN)
|
||||
|
||||
scoped_refptr<UpdateableSequencedTaskRunner>
|
||||
ThreadPoolImpl::CreateUpdateableSequencedTaskRunner(const TaskTraits& traits) {
|
||||
const TaskTraits new_traits = VerifyAndAjustIncomingTraits(traits);
|
||||
return MakeRefCounted<PooledSequencedTaskRunner>(new_traits, this);
|
||||
}
|
||||
|
||||
Optional<TimeTicks> ThreadPoolImpl::NextScheduledRunTimeForTesting() const {
|
||||
if (task_tracker_->HasIncompleteTaskSourcesForTesting())
|
||||
return TimeTicks::Now();
|
||||
return delayed_task_manager_.NextScheduledRunTime();
|
||||
}
|
||||
|
||||
void ThreadPoolImpl::ProcessRipeDelayedTasksForTesting() {
|
||||
delayed_task_manager_.ProcessRipeTasks();
|
||||
}
|
||||
|
||||
int ThreadPoolImpl::GetMaxConcurrentNonBlockedTasksWithTraitsDeprecated(
|
||||
const TaskTraits& traits) const {
|
||||
// This method does not support getting the maximum number of BEST_EFFORT
|
||||
// tasks that can run concurrently in a pool.
|
||||
DCHECK_NE(traits.priority(), TaskPriority::BEST_EFFORT);
|
||||
return GetThreadGroupForTraits(traits)
|
||||
->GetMaxConcurrentNonBlockedTasksDeprecated();
|
||||
}
|
||||
|
||||
void ThreadPoolImpl::Shutdown() {
|
||||
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
|
||||
|
||||
// Stop() the ServiceThread before triggering shutdown. This ensures that no
|
||||
// more delayed tasks or file descriptor watches will trigger during shutdown
|
||||
// (preventing http://crbug.com/698140). None of these asynchronous tasks
|
||||
// being guaranteed to happen anyways, stopping right away is valid behavior
|
||||
// and avoids the more complex alternative of shutting down the service thread
|
||||
// atomically during TaskTracker shutdown.
|
||||
service_thread_->Stop();
|
||||
|
||||
task_tracker_->StartShutdown();
|
||||
|
||||
// Allow all tasks to run. Done after initiating shutdown to ensure that non-
|
||||
// BLOCK_SHUTDOWN tasks don't get a chance to run and that BLOCK_SHUTDOWN
|
||||
// tasks run with a normal thread priority.
|
||||
UpdateCanRunPolicy();
|
||||
|
||||
task_tracker_->CompleteShutdown();
|
||||
}
|
||||
|
||||
void ThreadPoolImpl::FlushForTesting() {
|
||||
task_tracker_->FlushForTesting();
|
||||
}
|
||||
|
||||
void ThreadPoolImpl::FlushAsyncForTesting(OnceClosure flush_callback) {
|
||||
task_tracker_->FlushAsyncForTesting(std::move(flush_callback));
|
||||
}
|
||||
|
||||
void ThreadPoolImpl::JoinForTesting() {
|
||||
#if DCHECK_IS_ON()
|
||||
DCHECK(!join_for_testing_returned_.IsSet());
|
||||
#endif
|
||||
// The service thread must be stopped before the workers are joined, otherwise
|
||||
// tasks scheduled by the DelayedTaskManager might be posted between joining
|
||||
// those workers and stopping the service thread which will cause a CHECK. See
|
||||
// https://crbug.com/771701.
|
||||
service_thread_->Stop();
|
||||
single_thread_task_runner_manager_.JoinForTesting();
|
||||
foreground_thread_group_->JoinForTesting();
|
||||
if (background_thread_group_)
|
||||
background_thread_group_->JoinForTesting();
|
||||
#if DCHECK_IS_ON()
|
||||
join_for_testing_returned_.Set();
|
||||
#endif
|
||||
}
|
||||
|
||||
void ThreadPoolImpl::BeginFence() {
|
||||
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
|
||||
++num_fences_;
|
||||
UpdateCanRunPolicy();
|
||||
}
|
||||
|
||||
void ThreadPoolImpl::EndFence() {
|
||||
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
|
||||
DCHECK_GT(num_fences_, 0);
|
||||
--num_fences_;
|
||||
UpdateCanRunPolicy();
|
||||
}
|
||||
|
||||
void ThreadPoolImpl::BeginBestEffortFence() {
|
||||
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
|
||||
++num_best_effort_fences_;
|
||||
UpdateCanRunPolicy();
|
||||
}
|
||||
|
||||
void ThreadPoolImpl::EndBestEffortFence() {
|
||||
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
|
||||
DCHECK_GT(num_best_effort_fences_, 0);
|
||||
--num_best_effort_fences_;
|
||||
UpdateCanRunPolicy();
|
||||
}
|
||||
|
||||
bool ThreadPoolImpl::PostTaskWithSequenceNow(Task task,
|
||||
scoped_refptr<Sequence> sequence) {
|
||||
auto transaction = sequence->BeginTransaction();
|
||||
const bool sequence_should_be_queued = transaction.WillPushTask();
|
||||
RegisteredTaskSource task_source;
|
||||
if (sequence_should_be_queued) {
|
||||
task_source = task_tracker_->RegisterTaskSource(sequence);
|
||||
// We shouldn't push |task| if we're not allowed to queue |task_source|.
|
||||
if (!task_source)
|
||||
return false;
|
||||
}
|
||||
if (!task_tracker_->WillPostTaskNow(task, transaction.traits().priority()))
|
||||
return false;
|
||||
transaction.PushTask(std::move(task));
|
||||
if (task_source) {
|
||||
const TaskTraits traits = transaction.traits();
|
||||
GetThreadGroupForTraits(traits)->PushTaskSourceAndWakeUpWorkers(
|
||||
{std::move(task_source), std::move(transaction)});
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
bool ThreadPoolImpl::PostTaskWithSequence(Task task,
|
||||
scoped_refptr<Sequence> sequence) {
|
||||
// Use CHECK instead of DCHECK to crash earlier. See http://crbug.com/711167
|
||||
// for details.
|
||||
CHECK(task.task);
|
||||
DCHECK(sequence);
|
||||
|
||||
if (!task_tracker_->WillPostTask(&task, sequence->shutdown_behavior()))
|
||||
return false;
|
||||
|
||||
if (task.delayed_run_time.is_null()) {
|
||||
return PostTaskWithSequenceNow(std::move(task), std::move(sequence));
|
||||
} else {
|
||||
// It's safe to take a ref on this pointer since the caller must have a ref
|
||||
// to the TaskRunner in order to post.
|
||||
scoped_refptr<TaskRunner> task_runner = sequence->task_runner();
|
||||
delayed_task_manager_.AddDelayedTask(
|
||||
std::move(task),
|
||||
BindOnce(
|
||||
[](scoped_refptr<Sequence> sequence,
|
||||
ThreadPoolImpl* thread_pool_impl, Task task) {
|
||||
thread_pool_impl->PostTaskWithSequenceNow(std::move(task),
|
||||
std::move(sequence));
|
||||
},
|
||||
std::move(sequence), Unretained(this)),
|
||||
std::move(task_runner));
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool ThreadPoolImpl::ShouldYield(const TaskSource* task_source) const {
|
||||
const TaskPriority priority = task_source->priority_racy();
|
||||
auto* const thread_group =
|
||||
GetThreadGroupForTraits({priority, task_source->thread_policy()});
|
||||
// A task whose priority changed and is now running in the wrong thread group
|
||||
// should yield so it's rescheduled in the right one.
|
||||
if (!thread_group->IsBoundToCurrentThread())
|
||||
return true;
|
||||
return GetThreadGroupForTraits({priority, task_source->thread_policy()})
|
||||
->ShouldYield(priority);
|
||||
}
|
||||
|
||||
bool ThreadPoolImpl::EnqueueJobTaskSource(
|
||||
scoped_refptr<JobTaskSource> task_source) {
|
||||
auto registered_task_source =
|
||||
task_tracker_->RegisterTaskSource(std::move(task_source));
|
||||
if (!registered_task_source)
|
||||
return false;
|
||||
auto transaction = registered_task_source->BeginTransaction();
|
||||
const TaskTraits traits = transaction.traits();
|
||||
GetThreadGroupForTraits(traits)->PushTaskSourceAndWakeUpWorkers(
|
||||
{std::move(registered_task_source), std::move(transaction)});
|
||||
return true;
|
||||
}
|
||||
|
||||
void ThreadPoolImpl::RemoveJobTaskSource(
|
||||
scoped_refptr<JobTaskSource> task_source) {
|
||||
auto transaction = task_source->BeginTransaction();
|
||||
ThreadGroup* const current_thread_group =
|
||||
GetThreadGroupForTraits(transaction.traits());
|
||||
current_thread_group->RemoveTaskSource(*task_source);
|
||||
}
|
||||
|
||||
void ThreadPoolImpl::UpdatePriority(scoped_refptr<TaskSource> task_source,
|
||||
TaskPriority priority) {
|
||||
auto transaction = task_source->BeginTransaction();
|
||||
|
||||
if (transaction.traits().priority() == priority)
|
||||
return;
|
||||
|
||||
if (transaction.traits().priority() == TaskPriority::BEST_EFFORT) {
|
||||
DCHECK(transaction.traits().thread_policy_set_explicitly())
|
||||
<< "A ThreadPolicy must be specified in the TaskTraits of an "
|
||||
"UpdateableSequencedTaskRunner whose priority is increased from "
|
||||
"BEST_EFFORT. See ThreadPolicy documentation.";
|
||||
}
|
||||
|
||||
ThreadGroup* const current_thread_group =
|
||||
GetThreadGroupForTraits(transaction.traits());
|
||||
transaction.UpdatePriority(priority);
|
||||
ThreadGroup* const new_thread_group =
|
||||
GetThreadGroupForTraits(transaction.traits());
|
||||
|
||||
if (new_thread_group == current_thread_group) {
|
||||
// |task_source|'s position needs to be updated within its current thread
|
||||
// group.
|
||||
current_thread_group->UpdateSortKey(std::move(transaction));
|
||||
} else {
|
||||
// |task_source| is changing thread groups; remove it from its current
|
||||
// thread group and reenqueue it.
|
||||
auto registered_task_source =
|
||||
current_thread_group->RemoveTaskSource(*task_source);
|
||||
if (registered_task_source) {
|
||||
DCHECK(task_source);
|
||||
new_thread_group->PushTaskSourceAndWakeUpWorkers(
|
||||
{std::move(registered_task_source), std::move(transaction)});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const ThreadGroup* ThreadPoolImpl::GetThreadGroupForTraits(
|
||||
const TaskTraits& traits) const {
|
||||
return const_cast<ThreadPoolImpl*>(this)->GetThreadGroupForTraits(traits);
|
||||
}
|
||||
|
||||
ThreadGroup* ThreadPoolImpl::GetThreadGroupForTraits(const TaskTraits& traits) {
|
||||
if (traits.priority() == TaskPriority::BEST_EFFORT &&
|
||||
traits.thread_policy() == ThreadPolicy::PREFER_BACKGROUND &&
|
||||
background_thread_group_) {
|
||||
return background_thread_group_.get();
|
||||
}
|
||||
|
||||
return foreground_thread_group_.get();
|
||||
}
|
||||
|
||||
void ThreadPoolImpl::UpdateCanRunPolicy() {
|
||||
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
|
||||
|
||||
CanRunPolicy can_run_policy;
|
||||
if ((num_fences_ == 0 && num_best_effort_fences_ == 0 &&
|
||||
!has_disable_best_effort_switch_) ||
|
||||
task_tracker_->HasShutdownStarted()) {
|
||||
can_run_policy = CanRunPolicy::kAll;
|
||||
} else if (num_fences_ != 0) {
|
||||
can_run_policy = CanRunPolicy::kNone;
|
||||
} else {
|
||||
DCHECK(num_best_effort_fences_ > 0 || has_disable_best_effort_switch_);
|
||||
can_run_policy = CanRunPolicy::kForegroundOnly;
|
||||
}
|
||||
|
||||
task_tracker_->SetCanRunPolicy(can_run_policy);
|
||||
foreground_thread_group_->DidUpdateCanRunPolicy();
|
||||
if (background_thread_group_)
|
||||
background_thread_group_->DidUpdateCanRunPolicy();
|
||||
single_thread_task_runner_manager_.DidUpdateCanRunPolicy();
|
||||
}
|
||||
|
||||
TaskTraits ThreadPoolImpl::VerifyAndAjustIncomingTraits(
|
||||
TaskTraits traits) const {
|
||||
DCHECK_EQ(traits.extension_id(),
|
||||
TaskTraitsExtensionStorage::kInvalidExtensionId)
|
||||
<< "Extension traits cannot be used with the ThreadPool API.";
|
||||
if (all_tasks_user_blocking_.IsSet())
|
||||
traits.UpdatePriority(TaskPriority::USER_BLOCKING);
|
||||
return traits;
|
||||
}
|
||||
|
||||
void ThreadPoolImpl::ReportHeartbeatMetrics() const {
|
||||
foreground_thread_group_->ReportHeartbeatMetrics();
|
||||
if (background_thread_group_)
|
||||
background_thread_group_->ReportHeartbeatMetrics();
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace base
|
||||
|
|
@ -0,0 +1,199 @@
|
|||
// Copyright 2016 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_TASK_THREAD_POOL_THREAD_POOL_IMPL_H_
|
||||
#define BASE_TASK_THREAD_POOL_THREAD_POOL_IMPL_H_
|
||||
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
#include "base/base_export.h"
|
||||
#include "base/callback.h"
|
||||
#include "base/logging.h"
|
||||
#include "base/macros.h"
|
||||
#include "base/memory/ptr_util.h"
|
||||
#include "base/memory/ref_counted.h"
|
||||
#include "base/optional.h"
|
||||
#include "base/sequence_checker.h"
|
||||
#include "base/strings/string_piece.h"
|
||||
#include "base/synchronization/atomic_flag.h"
|
||||
#include "base/task/single_thread_task_runner_thread_mode.h"
|
||||
#include "base/task/task_executor.h"
|
||||
#include "base/task/task_traits.h"
|
||||
#include "base/task/thread_pool/delayed_task_manager.h"
|
||||
#include "base/task/thread_pool/environment_config.h"
|
||||
#include "base/task/thread_pool/pooled_single_thread_task_runner_manager.h"
|
||||
#include "base/task/thread_pool/pooled_task_runner_delegate.h"
|
||||
#include "base/task/thread_pool/task_source.h"
|
||||
#include "base/task/thread_pool/task_tracker.h"
|
||||
#include "base/task/thread_pool/thread_group.h"
|
||||
#include "base/task/thread_pool/thread_group_impl.h"
|
||||
#include "base/task/thread_pool/thread_pool_instance.h"
|
||||
#include "base/updateable_sequenced_task_runner.h"
|
||||
#include "build/build_config.h"
|
||||
|
||||
#if defined(OS_POSIX) && !defined(OS_NACL_SFI)
|
||||
#include "base/task/thread_pool/task_tracker_posix.h"
|
||||
#endif
|
||||
|
||||
#if defined(OS_WIN)
|
||||
#include "base/win/com_init_check_hook.h"
|
||||
#endif
|
||||
|
||||
namespace base {
|
||||
|
||||
class Thread;
|
||||
|
||||
namespace internal {
|
||||
|
||||
// Default ThreadPoolInstance implementation. This class is thread-safe.
|
||||
class BASE_EXPORT ThreadPoolImpl : public ThreadPoolInstance,
|
||||
public TaskExecutor,
|
||||
public ThreadGroup::Delegate,
|
||||
public PooledTaskRunnerDelegate {
|
||||
public:
|
||||
using TaskTrackerImpl =
|
||||
#if defined(OS_POSIX) && !defined(OS_NACL_SFI)
|
||||
TaskTrackerPosix;
|
||||
#else
|
||||
TaskTracker;
|
||||
#endif
|
||||
|
||||
// Creates a ThreadPoolImpl with a production TaskTracker. |histogram_label|
|
||||
// is used to label histograms. No histograms are recorded if it is empty.
|
||||
explicit ThreadPoolImpl(StringPiece histogram_label);
|
||||
|
||||
// For testing only. Creates a ThreadPoolImpl with a custom TaskTracker.
|
||||
ThreadPoolImpl(StringPiece histogram_label,
|
||||
std::unique_ptr<TaskTrackerImpl> task_tracker);
|
||||
|
||||
~ThreadPoolImpl() override;
|
||||
|
||||
// ThreadPoolInstance:
|
||||
void Start(const ThreadPoolInstance::InitParams& init_params,
|
||||
WorkerThreadObserver* worker_thread_observer) override;
|
||||
int GetMaxConcurrentNonBlockedTasksWithTraitsDeprecated(
|
||||
const TaskTraits& traits) const override;
|
||||
void Shutdown() override;
|
||||
void FlushForTesting() override;
|
||||
void FlushAsyncForTesting(OnceClosure flush_callback) override;
|
||||
void JoinForTesting() override;
|
||||
void BeginFence() override;
|
||||
void EndFence() override;
|
||||
void BeginBestEffortFence() override;
|
||||
void EndBestEffortFence() override;
|
||||
|
||||
// TaskExecutor:
|
||||
bool PostDelayedTask(const Location& from_here,
|
||||
const TaskTraits& traits,
|
||||
OnceClosure task,
|
||||
TimeDelta delay) override;
|
||||
scoped_refptr<TaskRunner> CreateTaskRunner(const TaskTraits& traits) override;
|
||||
scoped_refptr<SequencedTaskRunner> CreateSequencedTaskRunner(
|
||||
const TaskTraits& traits) override;
|
||||
scoped_refptr<SingleThreadTaskRunner> CreateSingleThreadTaskRunner(
|
||||
const TaskTraits& traits,
|
||||
SingleThreadTaskRunnerThreadMode thread_mode) override;
|
||||
#if defined(OS_WIN)
|
||||
scoped_refptr<SingleThreadTaskRunner> CreateCOMSTATaskRunner(
|
||||
const TaskTraits& traits,
|
||||
SingleThreadTaskRunnerThreadMode thread_mode) override;
|
||||
#endif // defined(OS_WIN)
|
||||
scoped_refptr<UpdateableSequencedTaskRunner>
|
||||
CreateUpdateableSequencedTaskRunner(const TaskTraits& traits);
|
||||
|
||||
// PooledTaskRunnerDelegate:
|
||||
bool EnqueueJobTaskSource(scoped_refptr<JobTaskSource> task_source) override;
|
||||
void RemoveJobTaskSource(scoped_refptr<JobTaskSource> task_source) override;
|
||||
void UpdatePriority(scoped_refptr<TaskSource> task_source,
|
||||
TaskPriority priority) override;
|
||||
|
||||
// Returns the TimeTicks of the next task scheduled on ThreadPool (Now() if
|
||||
// immediate, nullopt if none). This is thread-safe, i.e., it's safe if tasks
|
||||
// are being posted in parallel with this call but such a situation obviously
|
||||
// results in a race as to whether this call will see the new tasks in time.
|
||||
Optional<TimeTicks> NextScheduledRunTimeForTesting() const;
|
||||
|
||||
// Forces ripe delayed tasks to be posted (e.g. when time is mocked and
|
||||
// advances faster than the real-time delay on ServiceThread).
|
||||
void ProcessRipeDelayedTasksForTesting();
|
||||
|
||||
private:
|
||||
// Invoked after |num_fences_| or |num_best_effort_fences_| is updated. Sets
|
||||
// the CanRunPolicy in TaskTracker and wakes up workers as appropriate.
|
||||
void UpdateCanRunPolicy();
|
||||
|
||||
// Verifies that |traits| do not have properties that are banned in ThreadPool
|
||||
// and returns |traits|, with priority set to TaskPriority::USER_BLOCKING if
|
||||
// |all_tasks_user_blocking_| is set.
|
||||
TaskTraits VerifyAndAjustIncomingTraits(TaskTraits traits) const;
|
||||
|
||||
void ReportHeartbeatMetrics() const;
|
||||
|
||||
const ThreadGroup* GetThreadGroupForTraits(const TaskTraits& traits) const;
|
||||
|
||||
// ThreadGroup::Delegate:
|
||||
ThreadGroup* GetThreadGroupForTraits(const TaskTraits& traits) override;
|
||||
|
||||
// Posts |task| to be executed by the appropriate thread group as part of
|
||||
// |sequence|. This must only be called after |task| has gone through
|
||||
// TaskTracker::WillPostTask() and after |task|'s delayed run time.
|
||||
bool PostTaskWithSequenceNow(Task task, scoped_refptr<Sequence> sequence);
|
||||
|
||||
// PooledTaskRunnerDelegate:
|
||||
bool PostTaskWithSequence(Task task,
|
||||
scoped_refptr<Sequence> sequence) override;
|
||||
bool ShouldYield(const TaskSource* task_source) const override;
|
||||
|
||||
const std::unique_ptr<TaskTrackerImpl> task_tracker_;
|
||||
std::unique_ptr<Thread> service_thread_;
|
||||
DelayedTaskManager delayed_task_manager_;
|
||||
PooledSingleThreadTaskRunnerManager single_thread_task_runner_manager_;
|
||||
|
||||
// Indicates that all tasks are handled as if they had been posted with
|
||||
// TaskPriority::USER_BLOCKING. Since this is set in Start(), it doesn't apply
|
||||
// to tasks posted before Start() or to tasks posted to TaskRunners created
|
||||
// before Start().
|
||||
//
|
||||
// TODO(fdoray): Remove after experiment. https://crbug.com/757022
|
||||
AtomicFlag all_tasks_user_blocking_;
|
||||
|
||||
std::unique_ptr<ThreadGroup> foreground_thread_group_;
|
||||
std::unique_ptr<ThreadGroupImpl> background_thread_group_;
|
||||
|
||||
// Whether this TaskScheduler was started. Access controlled by
|
||||
// |sequence_checker_|.
|
||||
bool started_ = false;
|
||||
|
||||
// Whether the --disable-best-effort-tasks switch is preventing execution of
|
||||
// BEST_EFFORT tasks until shutdown.
|
||||
const bool has_disable_best_effort_switch_;
|
||||
|
||||
// Number of fences preventing execution of tasks of any/BEST_EFFORT priority.
|
||||
// Access controlled by |sequence_checker_|.
|
||||
int num_fences_ = 0;
|
||||
int num_best_effort_fences_ = 0;
|
||||
|
||||
#if DCHECK_IS_ON()
|
||||
// Set once JoinForTesting() has returned.
|
||||
AtomicFlag join_for_testing_returned_;
|
||||
#endif
|
||||
|
||||
#if defined(OS_WIN) && defined(COM_INIT_CHECK_HOOK_ENABLED)
|
||||
// Provides COM initialization verification for supported builds.
|
||||
base::win::ComInitCheckHook com_init_check_hook_;
|
||||
#endif
|
||||
|
||||
// Asserts that operations occur in sequence with Start().
|
||||
SEQUENCE_CHECKER(sequence_checker_);
|
||||
|
||||
TrackedRefFactory<ThreadGroup::Delegate> tracked_ref_factory_;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(ThreadPoolImpl);
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
} // namespace base
|
||||
|
||||
#endif // BASE_TASK_THREAD_POOL_THREAD_POOL_IMPL_H_
|
||||
|
|
@ -0,0 +1,87 @@
|
|||
// Copyright 2016 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "base/task/thread_pool/thread_pool_instance.h"
|
||||
|
||||
#include <algorithm>
|
||||
|
||||
#include "base/logging.h"
|
||||
#include "base/memory/ptr_util.h"
|
||||
#include "base/system/sys_info.h"
|
||||
#include "base/task/thread_pool/thread_pool_impl.h"
|
||||
#include "base/threading/platform_thread.h"
|
||||
#include "base/time/time.h"
|
||||
|
||||
namespace base {
|
||||
|
||||
namespace {
|
||||
|
||||
// |g_thread_pool| is intentionally leaked on shutdown.
|
||||
ThreadPoolInstance* g_thread_pool = nullptr;
|
||||
|
||||
} // namespace
|
||||
|
||||
ThreadPoolInstance::InitParams::InitParams(int max_num_foreground_threads_in)
|
||||
: max_num_foreground_threads(max_num_foreground_threads_in) {}
|
||||
|
||||
ThreadPoolInstance::InitParams::~InitParams() = default;
|
||||
|
||||
ThreadPoolInstance::ScopedExecutionFence::ScopedExecutionFence() {
|
||||
DCHECK(g_thread_pool);
|
||||
g_thread_pool->BeginFence();
|
||||
}
|
||||
|
||||
ThreadPoolInstance::ScopedExecutionFence::~ScopedExecutionFence() {
|
||||
DCHECK(g_thread_pool);
|
||||
g_thread_pool->EndFence();
|
||||
}
|
||||
|
||||
ThreadPoolInstance::ScopedBestEffortExecutionFence::
|
||||
ScopedBestEffortExecutionFence() {
|
||||
DCHECK(g_thread_pool);
|
||||
g_thread_pool->BeginBestEffortFence();
|
||||
}
|
||||
|
||||
ThreadPoolInstance::ScopedBestEffortExecutionFence::
|
||||
~ScopedBestEffortExecutionFence() {
|
||||
DCHECK(g_thread_pool);
|
||||
g_thread_pool->EndBestEffortFence();
|
||||
}
|
||||
|
||||
#if !defined(OS_NACL)
|
||||
// static
|
||||
void ThreadPoolInstance::CreateAndStartWithDefaultParams(StringPiece name) {
|
||||
Create(name);
|
||||
g_thread_pool->StartWithDefaultParams();
|
||||
}
|
||||
|
||||
void ThreadPoolInstance::StartWithDefaultParams() {
|
||||
// Values were chosen so that:
|
||||
// * There are few background threads.
|
||||
// * Background threads never outnumber foreground threads.
|
||||
// * The system is utilized maximally by foreground threads.
|
||||
// * The main thread is assumed to be busy, cap foreground workers at
|
||||
// |num_cores - 1|.
|
||||
const int num_cores = SysInfo::NumberOfProcessors();
|
||||
const int max_num_foreground_threads = std::max(3, num_cores - 1);
|
||||
Start({max_num_foreground_threads});
|
||||
}
|
||||
#endif // !defined(OS_NACL)
|
||||
|
||||
void ThreadPoolInstance::Create(StringPiece name) {
|
||||
Set(std::make_unique<internal::ThreadPoolImpl>(name));
|
||||
}
|
||||
|
||||
// static
|
||||
void ThreadPoolInstance::Set(std::unique_ptr<ThreadPoolInstance> thread_pool) {
|
||||
delete g_thread_pool;
|
||||
g_thread_pool = thread_pool.release();
|
||||
}
|
||||
|
||||
// static
|
||||
ThreadPoolInstance* ThreadPoolInstance::Get() {
|
||||
return g_thread_pool;
|
||||
}
|
||||
|
||||
} // namespace base
|
||||
|
|
@ -0,0 +1,258 @@
|
|||
// Copyright 2016 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_TASK_THREAD_POOL_THREAD_POOL_INSTANCE_H_
|
||||
#define BASE_TASK_THREAD_POOL_THREAD_POOL_INSTANCE_H_
|
||||
|
||||
#include <memory>
|
||||
#include <vector>
|
||||
|
||||
#include "base/base_export.h"
|
||||
#include "base/callback.h"
|
||||
#include "base/gtest_prod_util.h"
|
||||
#include "base/memory/ref_counted.h"
|
||||
#include "base/sequenced_task_runner.h"
|
||||
#include "base/single_thread_task_runner.h"
|
||||
#include "base/strings/string_piece.h"
|
||||
#include "base/task/single_thread_task_runner_thread_mode.h"
|
||||
#include "base/task/task_traits.h"
|
||||
#include "base/task_runner.h"
|
||||
#include "base/time/time.h"
|
||||
#include "build/build_config.h"
|
||||
|
||||
namespace gin {
|
||||
class V8Platform;
|
||||
}
|
||||
|
||||
namespace content {
|
||||
// Can't use the FRIEND_TEST_ALL_PREFIXES macro because the test is in a
|
||||
// different namespace.
|
||||
class BrowserMainLoopTest_CreateThreadsInSingleProcess_Test;
|
||||
} // namespace content
|
||||
|
||||
namespace base {
|
||||
|
||||
class WorkerThreadObserver;
|
||||
class ThreadPoolTestHelpers;
|
||||
|
||||
// Interface for a thread pool and static methods to manage the instance used
|
||||
// by the post_task.h API.
|
||||
//
|
||||
// The thread pool doesn't create threads until Start() is called. Tasks can
|
||||
// be posted at any time but will not run until after Start() is called.
|
||||
//
|
||||
// The instance methods of this class are thread-safe.
|
||||
//
|
||||
// Note: All thread pool users should go through base/task/post_task.h instead
|
||||
// of this interface except for the one callsite per process which manages the
|
||||
// process's instance.
|
||||
class BASE_EXPORT ThreadPoolInstance {
|
||||
public:
|
||||
struct BASE_EXPORT InitParams {
|
||||
enum class CommonThreadPoolEnvironment {
|
||||
// Use the default environment (no environment).
|
||||
DEFAULT,
|
||||
#if defined(OS_WIN)
|
||||
// Place the pool's workers in a COM MTA.
|
||||
COM_MTA,
|
||||
// Place the pool's *foreground* workers in a COM STA. This exists to
|
||||
// mimic the behavior of SequencedWorkerPool and BrowserThreadImpl that
|
||||
// ThreadPool has replaced. Tasks that need a COM STA should use
|
||||
// CreateCOMSTATaskRunner() instead of Create(Sequenced)TaskRunner() +
|
||||
// this init param.
|
||||
DEPRECATED_COM_STA_IN_FOREGROUND_GROUP,
|
||||
#endif // defined(OS_WIN)
|
||||
};
|
||||
|
||||
InitParams(int max_num_foreground_threads_in);
|
||||
~InitParams();
|
||||
|
||||
// Maximum number of unblocked tasks that can run concurrently in the
|
||||
// foreground thread group.
|
||||
int max_num_foreground_threads;
|
||||
|
||||
// Whether COM is initialized when running sequenced and parallel tasks.
|
||||
CommonThreadPoolEnvironment common_thread_pool_environment =
|
||||
CommonThreadPoolEnvironment::DEFAULT;
|
||||
|
||||
// An experiment conducted in July 2019 revealed that on Android, changing
|
||||
// the reclaim time from 30 seconds to 5 minutes:
|
||||
// - Reduces jank by 5% at 99th percentile
|
||||
// - Reduces first input delay by 5% at 99th percentile
|
||||
// - Reduces input delay by 3% at 50th percentile
|
||||
// - Reduces navigation to first contentful paint by 2-3% at 25-95th
|
||||
// percentiles
|
||||
// On Windows and Mac, we instead see no impact or small regressions.
|
||||
//
|
||||
// TODO(scheduler-dev): Conduct experiments to find the optimal value for
|
||||
// each process type on each platform. In particular, due to regressions at
|
||||
// high percentiles for *HeartbeatLatencyMicroseconds.Renderer* histograms,
|
||||
// it was suggested that we might want a different reclaim time in
|
||||
// renderers. Note that the regression is not present in
|
||||
// *TaskLatencyMicroseconds.Renderer* histograms.
|
||||
TimeDelta suggested_reclaim_time =
|
||||
#if defined(OS_ANDROID)
|
||||
TimeDelta::FromMinutes(5);
|
||||
#else
|
||||
TimeDelta::FromSeconds(30);
|
||||
#endif
|
||||
};
|
||||
|
||||
// A Scoped(BestEffort)ExecutionFence prevents new tasks of any/BEST_EFFORT
|
||||
// priority from being scheduled in ThreadPoolInstance within its scope.
|
||||
// Multiple fences can exist at the same time. Upon destruction of all
|
||||
// Scoped(BestEffort)ExecutionFences, tasks that were preeempted are released.
|
||||
// Note: the constructor of Scoped(BestEffort)ExecutionFence will not wait for
|
||||
// currently running tasks (as they were posted before entering this scope and
|
||||
// do not violate the contract; some of them could be CONTINUE_ON_SHUTDOWN and
|
||||
// waiting for them to complete is ill-advised).
|
||||
class BASE_EXPORT ScopedExecutionFence {
|
||||
public:
|
||||
ScopedExecutionFence();
|
||||
~ScopedExecutionFence();
|
||||
|
||||
private:
|
||||
DISALLOW_COPY_AND_ASSIGN(ScopedExecutionFence);
|
||||
};
|
||||
|
||||
class BASE_EXPORT ScopedBestEffortExecutionFence {
|
||||
public:
|
||||
ScopedBestEffortExecutionFence();
|
||||
~ScopedBestEffortExecutionFence();
|
||||
|
||||
private:
|
||||
DISALLOW_COPY_AND_ASSIGN(ScopedBestEffortExecutionFence);
|
||||
};
|
||||
|
||||
// Destroying a ThreadPoolInstance is not allowed in production; it is always
|
||||
// leaked. In tests, it should only be destroyed after JoinForTesting() has
|
||||
// returned.
|
||||
virtual ~ThreadPoolInstance() = default;
|
||||
|
||||
// Allows the thread pool to create threads and run tasks following the
|
||||
// |init_params| specification.
|
||||
//
|
||||
// If specified, |worker_thread_observer| will be notified when a worker
|
||||
// enters and exits its main function. It must not be destroyed before
|
||||
// JoinForTesting() has returned (must never be destroyed in production).
|
||||
//
|
||||
// CHECKs on failure.
|
||||
virtual void Start(
|
||||
const InitParams& init_params,
|
||||
WorkerThreadObserver* worker_thread_observer = nullptr) = 0;
|
||||
|
||||
// Synchronously shuts down the thread pool. Once this is called, only tasks
|
||||
// posted with the BLOCK_SHUTDOWN behavior will be run. When this returns:
|
||||
// - All SKIP_ON_SHUTDOWN tasks that were already running have completed their
|
||||
// execution.
|
||||
// - All posted BLOCK_SHUTDOWN tasks have completed their execution.
|
||||
// - CONTINUE_ON_SHUTDOWN tasks might still be running.
|
||||
// Note that an implementation can keep threads and other resources alive to
|
||||
// support running CONTINUE_ON_SHUTDOWN after this returns. This can only be
|
||||
// called once.
|
||||
virtual void Shutdown() = 0;
|
||||
|
||||
// Waits until there are no pending undelayed tasks. May be called in tests
|
||||
// to validate that a condition is met after all undelayed tasks have run.
|
||||
//
|
||||
// Does not wait for delayed tasks. Waits for undelayed tasks posted from
|
||||
// other threads during the call. Returns immediately when shutdown completes.
|
||||
virtual void FlushForTesting() = 0;
|
||||
|
||||
// Returns and calls |flush_callback| when there are no incomplete undelayed
|
||||
// tasks. |flush_callback| may be called back on any thread and should not
|
||||
// perform a lot of work. May be used when additional work on the current
|
||||
// thread needs to be performed during a flush. Only one
|
||||
// FlushAsyncForTesting() may be pending at any given time.
|
||||
virtual void FlushAsyncForTesting(OnceClosure flush_callback) = 0;
|
||||
|
||||
// Joins all threads. Tasks that are already running are allowed to complete
|
||||
// their execution. This can only be called once. Using this thread pool
|
||||
// instance to create task runners or post tasks is not permitted during or
|
||||
// after this call.
|
||||
virtual void JoinForTesting() = 0;
|
||||
|
||||
// CreateAndStartWithDefaultParams(), Create(), and SetInstance() register a
|
||||
// ThreadPoolInstance to handle tasks posted through the post_task.h API for
|
||||
// this process.
|
||||
//
|
||||
// Processes that need to initialize ThreadPoolInstance with custom params or
|
||||
// that need to allow tasks to be posted before the ThreadPoolInstance creates
|
||||
// its threads should use Create() followed by Start(). Other processes can
|
||||
// use CreateAndStartWithDefaultParams().
|
||||
//
|
||||
// A registered ThreadPoolInstance is only deleted when a new
|
||||
// ThreadPoolInstance is registered. The last registered ThreadPoolInstance is
|
||||
// leaked on shutdown. The methods below must not be called when TaskRunners
|
||||
// created by a previous ThreadPoolInstance are still alive. The methods are
|
||||
// not thread-safe; proper synchronization is required to use the post_task.h
|
||||
// API after registering a new ThreadPoolInstance.
|
||||
|
||||
#if !defined(OS_NACL)
|
||||
// Creates and starts a thread pool using default params. |name| is used to
|
||||
// label histograms, it must not be empty. It should identify the component
|
||||
// that calls this. Start() is called by this method; it is invalid to call it
|
||||
// again afterwards. CHECKs on failure. For tests, prefer
|
||||
// base::test::TaskEnvironment (ensures isolation).
|
||||
static void CreateAndStartWithDefaultParams(StringPiece name);
|
||||
|
||||
// Same as CreateAndStartWithDefaultParams() but allows callers to split the
|
||||
// Create() and StartWithDefaultParams() calls.
|
||||
void StartWithDefaultParams();
|
||||
#endif // !defined(OS_NACL)
|
||||
|
||||
// Creates a ready to start thread pool. |name| is used to label histograms,
|
||||
// it must not be empty. It should identify the component that creates the
|
||||
// ThreadPoolInstance. The thread pool doesn't create threads until Start() is
|
||||
// called. Tasks can be posted at any time but will not run until after
|
||||
// Start() is called. For tests, prefer base::test::TaskEnvironment
|
||||
// (ensures isolation).
|
||||
static void Create(StringPiece name);
|
||||
|
||||
// Registers |thread_pool| to handle tasks posted through the post_task.h
|
||||
// API for this process. For tests, prefer base::test::TaskEnvironment
|
||||
// (ensures isolation).
|
||||
static void Set(std::unique_ptr<ThreadPoolInstance> thread_pool);
|
||||
|
||||
// Retrieve the ThreadPoolInstance set via SetInstance() or Create(). This
|
||||
// should be used very rarely; most users of the thread pool should use the
|
||||
// post_task.h API. In particular, refrain from doing
|
||||
// if (!ThreadPoolInstance::Get()) {
|
||||
// ThreadPoolInstance::Set(...);
|
||||
// base::PostTask(...);
|
||||
// }
|
||||
// instead make sure to SetInstance() early in one determinstic place in the
|
||||
// process' initialization phase.
|
||||
// In doubt, consult with //base/task/thread_pool/OWNERS.
|
||||
static ThreadPoolInstance* Get();
|
||||
|
||||
private:
|
||||
friend class ThreadPoolTestHelpers;
|
||||
friend class gin::V8Platform;
|
||||
friend class content::BrowserMainLoopTest_CreateThreadsInSingleProcess_Test;
|
||||
|
||||
// Returns the maximum number of non-single-threaded non-blocked tasks posted
|
||||
// with |traits| that can run concurrently in this thread pool. |traits|
|
||||
// can't contain TaskPriority::BEST_EFFORT.
|
||||
//
|
||||
// Do not use this method. To process n items, post n tasks that each process
|
||||
// 1 item rather than GetMaxConcurrentNonBlockedTasksWithTraitsDeprecated()
|
||||
// tasks that each process
|
||||
// n/GetMaxConcurrentNonBlockedTasksWithTraitsDeprecated() items.
|
||||
//
|
||||
// TODO(fdoray): Remove this method. https://crbug.com/687264
|
||||
virtual int GetMaxConcurrentNonBlockedTasksWithTraitsDeprecated(
|
||||
const TaskTraits& traits) const = 0;
|
||||
|
||||
// Starts/stops a fence that prevents execution of tasks of any / BEST_EFFORT
|
||||
// priority.
|
||||
virtual void BeginFence() = 0;
|
||||
virtual void EndFence() = 0;
|
||||
virtual void BeginBestEffortFence() = 0;
|
||||
virtual void EndBestEffortFence() = 0;
|
||||
};
|
||||
|
||||
} // namespace base
|
||||
|
||||
#endif // BASE_TASK_THREAD_POOL_THREAD_POOL_INSTANCE_H_
|
||||
|
|
@ -0,0 +1,179 @@
|
|||
// Copyright 2018 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_TASK_THREAD_POOL_TRACKED_REF_H_
|
||||
#define BASE_TASK_THREAD_POOL_TRACKED_REF_H_
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "base/atomic_ref_count.h"
|
||||
#include "base/gtest_prod_util.h"
|
||||
#include "base/logging.h"
|
||||
#include "base/macros.h"
|
||||
#include "base/memory/ptr_util.h"
|
||||
#include "base/synchronization/waitable_event.h"
|
||||
|
||||
namespace base {
|
||||
namespace internal {
|
||||
|
||||
// TrackedRefs are effectively a ref-counting scheme for objects that have a
|
||||
// single owner.
|
||||
//
|
||||
// Deletion is still controlled by the single owner but ~T() itself will block
|
||||
// until all the TrackedRefs handed by its TrackedRefFactory have been released
|
||||
// (by ~TrackedRef<T>()).
|
||||
//
|
||||
// Just like WeakPtrFactory: TrackedRefFactory<T> should be the last member of T
|
||||
// to ensure ~TrackedRefFactory<T>() runs first in ~T().
|
||||
//
|
||||
// The owner of a T should hence be certain that the last TrackedRefs to T are
|
||||
// already gone or on their way out before destroying it or ~T() will hang
|
||||
// (indicating a bug in the tear down logic -- proper refcounting on the other
|
||||
// hand would result in a leak).
|
||||
//
|
||||
// TrackedRefFactory only makes sense to use on types that are always leaked in
|
||||
// production but need to be torn down in tests (blocking destruction is
|
||||
// impractical in production).
|
||||
//
|
||||
// Why would we ever need such a thing? In thread_pool there is a clear
|
||||
// ownership hierarchy with mostly single owners and little refcounting. In
|
||||
// production nothing is ever torn down so this isn't a problem. In tests
|
||||
// however we must JoinForTesting(). At that point, all the raw back T* refs
|
||||
// used by the worker threads are problematic because they can result in use-
|
||||
// after-frees if a worker outlives the deletion of its corresponding
|
||||
// ThreadPool/TaskTracker/ThreadGroup/etc.
|
||||
//
|
||||
// JoinForTesting() isn't so hard when all workers are managed. But with cleanup
|
||||
// semantics (reclaiming a worker who's been idle for too long) it becomes
|
||||
// tricky because workers can go unaccounted for before they exit their main
|
||||
// (https://crbug.com/827615).
|
||||
//
|
||||
// For that reason and to clearly document the ownership model, thread_pool
|
||||
// uses TrackedRefs.
|
||||
//
|
||||
// On top of being a clearer ownership model than proper refcounting, a hang in
|
||||
// tear down in a test with out-of-order tear down logic is much preferred to
|
||||
// letting its worker thread and associated constructs outlive the test
|
||||
// (potentially resulting in flakes in unrelated tests running later in the same
|
||||
// process).
|
||||
//
|
||||
// Note: While there's nothing thread_pool specific about TrackedRefs it
|
||||
// requires an ownership model where all the TrackedRefs are released on other
|
||||
// threads in sync with ~T(). This isn't a typical use case beyond shutting down
|
||||
// ThreadPool in tests and as such this is kept internal here for now.
|
||||
|
||||
template <class T>
|
||||
class TrackedRefFactory;
|
||||
|
||||
// TrackedRef<T> can be used like a T*.
|
||||
template <class T>
|
||||
class TrackedRef {
|
||||
public:
|
||||
// Moveable and copyable.
|
||||
TrackedRef(TrackedRef<T>&& other)
|
||||
: ptr_(other.ptr_), factory_(other.factory_) {
|
||||
// Null out |other_|'s factory so its destructor doesn't decrement
|
||||
// |live_tracked_refs_|.
|
||||
other.factory_ = nullptr;
|
||||
}
|
||||
TrackedRef(const TrackedRef<T>& other)
|
||||
: ptr_(other.ptr_), factory_(other.factory_) {
|
||||
factory_->live_tracked_refs_.Increment();
|
||||
}
|
||||
|
||||
// Intentionally not assignable for now because it makes the logic slightly
|
||||
// convoluted and it's not a use case that makes sense for the types using
|
||||
// this at the moment.
|
||||
TrackedRef& operator=(TrackedRef<T>&& other) = delete;
|
||||
TrackedRef& operator=(const TrackedRef<T>& other) = delete;
|
||||
|
||||
~TrackedRef() {
|
||||
if (factory_ && !factory_->live_tracked_refs_.Decrement()) {
|
||||
DCHECK(factory_->ready_to_destroy_);
|
||||
DCHECK(!factory_->ready_to_destroy_->IsSignaled());
|
||||
factory_->ready_to_destroy_->Signal();
|
||||
}
|
||||
}
|
||||
|
||||
T& operator*() const { return *ptr_; }
|
||||
|
||||
T* operator->() const { return ptr_; }
|
||||
|
||||
explicit operator bool() const { return ptr_ != nullptr; }
|
||||
|
||||
bool operator==(const void* compared_ptr) const {
|
||||
return ptr_ == compared_ptr;
|
||||
}
|
||||
|
||||
// Returns the raw pointer stored in this TrackedRef. This is occasionally
|
||||
// useful for operations in scope but, as with other smart pointers, it
|
||||
// shouldn't be used beyond the scope of this TrackedRef.
|
||||
T* get() const { return ptr_; }
|
||||
|
||||
private:
|
||||
friend class TrackedRefFactory<T>;
|
||||
|
||||
TrackedRef(T* ptr, TrackedRefFactory<T>* factory)
|
||||
: ptr_(ptr), factory_(factory) {
|
||||
factory_->live_tracked_refs_.Increment();
|
||||
}
|
||||
|
||||
T* ptr_;
|
||||
TrackedRefFactory<T>* factory_;
|
||||
};
|
||||
|
||||
// TrackedRefFactory<T> should be the last member of T.
|
||||
template <class T>
|
||||
class TrackedRefFactory {
|
||||
public:
|
||||
TrackedRefFactory(T* ptr)
|
||||
: ptr_(ptr), self_ref_(WrapUnique(new TrackedRef<T>(ptr_, this))) {
|
||||
DCHECK(ptr_);
|
||||
}
|
||||
|
||||
~TrackedRefFactory() {
|
||||
// Enter the destruction phase.
|
||||
ready_to_destroy_ = std::make_unique<WaitableEvent>();
|
||||
|
||||
// Release self-ref (if this was the last one it will signal the event right
|
||||
// away).
|
||||
self_ref_.reset();
|
||||
|
||||
ready_to_destroy_->Wait();
|
||||
}
|
||||
|
||||
TrackedRef<T> GetTrackedRef() {
|
||||
// TrackedRefs cannot be obtained after |live_tracked_refs_| has already
|
||||
// reached zero. In other words, the owner of a TrackedRefFactory shouldn't
|
||||
// vend new TrackedRefs while it's being destroyed (owners of TrackedRefs
|
||||
// may still copy/move their refs around during the destruction phase).
|
||||
DCHECK(!live_tracked_refs_.IsZero());
|
||||
return TrackedRef<T>(ptr_, this);
|
||||
}
|
||||
|
||||
private:
|
||||
friend class TrackedRef<T>;
|
||||
FRIEND_TEST_ALL_PREFIXES(TrackedRefTest, CopyAndMoveSemantics);
|
||||
|
||||
T* const ptr_;
|
||||
|
||||
// The number of live TrackedRefs vended by this factory.
|
||||
AtomicRefCount live_tracked_refs_{0};
|
||||
|
||||
// Non-null during the destruction phase. Signaled once |live_tracked_refs_|
|
||||
// reaches 0. Note: while this could a direct member, only initializing it in
|
||||
// the destruction phase avoids keeping a handle open for the entire session.
|
||||
std::unique_ptr<WaitableEvent> ready_to_destroy_;
|
||||
|
||||
// TrackedRefFactory holds a TrackedRef as well to prevent
|
||||
// |live_tracked_refs_| from ever reaching zero before ~TrackedRefFactory().
|
||||
std::unique_ptr<TrackedRef<T>> self_ref_;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(TrackedRefFactory);
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
} // namespace base
|
||||
|
||||
#endif // BASE_TASK_THREAD_POOL_TRACKED_REF_H_
|
||||
|
|
@ -0,0 +1,369 @@
|
|||
// Copyright 2016 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "base/task/thread_pool/worker_thread.h"
|
||||
|
||||
#include <stddef.h>
|
||||
|
||||
#include <utility>
|
||||
|
||||
#include "base/compiler_specific.h"
|
||||
#include "base/debug/alias.h"
|
||||
#include "base/logging.h"
|
||||
#include "base/task/thread_pool/environment_config.h"
|
||||
#include "base/task/thread_pool/task_tracker.h"
|
||||
#include "base/task/thread_pool/worker_thread_observer.h"
|
||||
#include "base/threading/hang_watcher.h"
|
||||
#include "base/time/time_override.h"
|
||||
#include "base/trace_event/trace_event.h"
|
||||
|
||||
#if defined(OS_MACOSX)
|
||||
#include "base/mac/scoped_nsautorelease_pool.h"
|
||||
#endif
|
||||
|
||||
namespace base {
|
||||
namespace internal {
|
||||
|
||||
void WorkerThread::Delegate::WaitForWork(WaitableEvent* wake_up_event) {
|
||||
DCHECK(wake_up_event);
|
||||
const TimeDelta sleep_time = GetSleepTimeout();
|
||||
if (sleep_time.is_max()) {
|
||||
// Calling TimedWait with TimeDelta::Max is not recommended per
|
||||
// http://crbug.com/465948.
|
||||
wake_up_event->Wait();
|
||||
} else {
|
||||
wake_up_event->TimedWait(sleep_time);
|
||||
}
|
||||
}
|
||||
|
||||
WorkerThread::WorkerThread(ThreadPriority priority_hint,
|
||||
std::unique_ptr<Delegate> delegate,
|
||||
TrackedRef<TaskTracker> task_tracker,
|
||||
const CheckedLock* predecessor_lock)
|
||||
: thread_lock_(predecessor_lock),
|
||||
delegate_(std::move(delegate)),
|
||||
task_tracker_(std::move(task_tracker)),
|
||||
priority_hint_(priority_hint),
|
||||
current_thread_priority_(GetDesiredThreadPriority()) {
|
||||
DCHECK(delegate_);
|
||||
DCHECK(task_tracker_);
|
||||
DCHECK(CanUseBackgroundPriorityForWorkerThread() ||
|
||||
priority_hint_ != ThreadPriority::BACKGROUND);
|
||||
wake_up_event_.declare_only_used_while_idle();
|
||||
}
|
||||
|
||||
bool WorkerThread::Start(WorkerThreadObserver* worker_thread_observer) {
|
||||
CheckedLock::AssertNoLockHeldOnCurrentThread();
|
||||
CheckedAutoLock auto_lock(thread_lock_);
|
||||
DCHECK(thread_handle_.is_null());
|
||||
|
||||
if (should_exit_.IsSet() || join_called_for_testing_.IsSet())
|
||||
return true;
|
||||
|
||||
DCHECK(!worker_thread_observer_);
|
||||
worker_thread_observer_ = worker_thread_observer;
|
||||
|
||||
self_ = this;
|
||||
|
||||
constexpr size_t kDefaultStackSize = 0;
|
||||
PlatformThread::CreateWithPriority(kDefaultStackSize, this, &thread_handle_,
|
||||
current_thread_priority_);
|
||||
|
||||
if (thread_handle_.is_null()) {
|
||||
self_ = nullptr;
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void WorkerThread::WakeUp() {
|
||||
// Signalling an event can deschedule the current thread. Since being
|
||||
// descheduled while holding a lock is undesirable (https://crbug.com/890978),
|
||||
// assert that no lock is held by the current thread.
|
||||
CheckedLock::AssertNoLockHeldOnCurrentThread();
|
||||
// Calling WakeUp() after Cleanup() or Join() is wrong because the
|
||||
// WorkerThread cannot run more tasks.
|
||||
DCHECK(!join_called_for_testing_.IsSet());
|
||||
DCHECK(!should_exit_.IsSet());
|
||||
wake_up_event_.Signal();
|
||||
}
|
||||
|
||||
void WorkerThread::JoinForTesting() {
|
||||
DCHECK(!join_called_for_testing_.IsSet());
|
||||
join_called_for_testing_.Set();
|
||||
wake_up_event_.Signal();
|
||||
|
||||
PlatformThreadHandle thread_handle;
|
||||
|
||||
{
|
||||
CheckedAutoLock auto_lock(thread_lock_);
|
||||
|
||||
if (thread_handle_.is_null())
|
||||
return;
|
||||
|
||||
thread_handle = thread_handle_;
|
||||
// Reset |thread_handle_| so it isn't joined by the destructor.
|
||||
thread_handle_ = PlatformThreadHandle();
|
||||
}
|
||||
|
||||
PlatformThread::Join(thread_handle);
|
||||
}
|
||||
|
||||
bool WorkerThread::ThreadAliveForTesting() const {
|
||||
CheckedAutoLock auto_lock(thread_lock_);
|
||||
return !thread_handle_.is_null();
|
||||
}
|
||||
|
||||
WorkerThread::~WorkerThread() {
|
||||
CheckedAutoLock auto_lock(thread_lock_);
|
||||
|
||||
// If |thread_handle_| wasn't joined, detach it.
|
||||
if (!thread_handle_.is_null()) {
|
||||
DCHECK(!join_called_for_testing_.IsSet());
|
||||
PlatformThread::Detach(thread_handle_);
|
||||
}
|
||||
}
|
||||
|
||||
void WorkerThread::Cleanup() {
|
||||
DCHECK(!should_exit_.IsSet());
|
||||
should_exit_.Set();
|
||||
wake_up_event_.Signal();
|
||||
}
|
||||
|
||||
void WorkerThread::BeginUnusedPeriod() {
|
||||
CheckedAutoLock auto_lock(thread_lock_);
|
||||
DCHECK(last_used_time_.is_null());
|
||||
last_used_time_ = subtle::TimeTicksNowIgnoringOverride();
|
||||
}
|
||||
|
||||
void WorkerThread::EndUnusedPeriod() {
|
||||
CheckedAutoLock auto_lock(thread_lock_);
|
||||
DCHECK(!last_used_time_.is_null());
|
||||
last_used_time_ = TimeTicks();
|
||||
}
|
||||
|
||||
TimeTicks WorkerThread::GetLastUsedTime() const {
|
||||
CheckedAutoLock auto_lock(thread_lock_);
|
||||
return last_used_time_;
|
||||
}
|
||||
|
||||
bool WorkerThread::ShouldExit() const {
|
||||
// The ordering of the checks is important below. This WorkerThread may be
|
||||
// released and outlive |task_tracker_| in unit tests. However, when the
|
||||
// WorkerThread is released, |should_exit_| will be set, so check that
|
||||
// first.
|
||||
return should_exit_.IsSet() || join_called_for_testing_.IsSet() ||
|
||||
task_tracker_->IsShutdownComplete();
|
||||
}
|
||||
|
||||
ThreadPriority WorkerThread::GetDesiredThreadPriority() const {
|
||||
// To avoid shutdown hangs, disallow a priority below NORMAL during shutdown
|
||||
if (task_tracker_->HasShutdownStarted())
|
||||
return ThreadPriority::NORMAL;
|
||||
|
||||
return priority_hint_;
|
||||
}
|
||||
|
||||
void WorkerThread::UpdateThreadPriority(
|
||||
ThreadPriority desired_thread_priority) {
|
||||
if (desired_thread_priority == current_thread_priority_)
|
||||
return;
|
||||
|
||||
PlatformThread::SetCurrentThreadPriority(desired_thread_priority);
|
||||
current_thread_priority_ = desired_thread_priority;
|
||||
}
|
||||
|
||||
void WorkerThread::ThreadMain() {
|
||||
if (priority_hint_ == ThreadPriority::BACKGROUND) {
|
||||
switch (delegate_->GetThreadLabel()) {
|
||||
case ThreadLabel::POOLED:
|
||||
RunBackgroundPooledWorker();
|
||||
return;
|
||||
case ThreadLabel::SHARED:
|
||||
RunBackgroundSharedWorker();
|
||||
return;
|
||||
case ThreadLabel::DEDICATED:
|
||||
RunBackgroundDedicatedWorker();
|
||||
return;
|
||||
#if defined(OS_WIN)
|
||||
case ThreadLabel::SHARED_COM:
|
||||
RunBackgroundSharedCOMWorker();
|
||||
return;
|
||||
case ThreadLabel::DEDICATED_COM:
|
||||
RunBackgroundDedicatedCOMWorker();
|
||||
return;
|
||||
#endif // defined(OS_WIN)
|
||||
}
|
||||
}
|
||||
|
||||
switch (delegate_->GetThreadLabel()) {
|
||||
case ThreadLabel::POOLED:
|
||||
RunPooledWorker();
|
||||
return;
|
||||
case ThreadLabel::SHARED:
|
||||
RunSharedWorker();
|
||||
return;
|
||||
case ThreadLabel::DEDICATED:
|
||||
RunDedicatedWorker();
|
||||
return;
|
||||
#if defined(OS_WIN)
|
||||
case ThreadLabel::SHARED_COM:
|
||||
RunSharedCOMWorker();
|
||||
return;
|
||||
case ThreadLabel::DEDICATED_COM:
|
||||
RunDedicatedCOMWorker();
|
||||
return;
|
||||
#endif // defined(OS_WIN)
|
||||
}
|
||||
}
|
||||
|
||||
NOINLINE void WorkerThread::RunPooledWorker() {
|
||||
const int line_number = __LINE__;
|
||||
RunWorker();
|
||||
base::debug::Alias(&line_number);
|
||||
}
|
||||
|
||||
NOINLINE void WorkerThread::RunBackgroundPooledWorker() {
|
||||
const int line_number = __LINE__;
|
||||
RunWorker();
|
||||
base::debug::Alias(&line_number);
|
||||
}
|
||||
|
||||
NOINLINE void WorkerThread::RunSharedWorker() {
|
||||
const int line_number = __LINE__;
|
||||
RunWorker();
|
||||
base::debug::Alias(&line_number);
|
||||
}
|
||||
|
||||
NOINLINE void WorkerThread::RunBackgroundSharedWorker() {
|
||||
const int line_number = __LINE__;
|
||||
RunWorker();
|
||||
base::debug::Alias(&line_number);
|
||||
}
|
||||
|
||||
NOINLINE void WorkerThread::RunDedicatedWorker() {
|
||||
const int line_number = __LINE__;
|
||||
RunWorker();
|
||||
base::debug::Alias(&line_number);
|
||||
}
|
||||
|
||||
NOINLINE void WorkerThread::RunBackgroundDedicatedWorker() {
|
||||
const int line_number = __LINE__;
|
||||
RunWorker();
|
||||
base::debug::Alias(&line_number);
|
||||
}
|
||||
|
||||
#if defined(OS_WIN)
|
||||
NOINLINE void WorkerThread::RunSharedCOMWorker() {
|
||||
const int line_number = __LINE__;
|
||||
RunWorker();
|
||||
base::debug::Alias(&line_number);
|
||||
}
|
||||
|
||||
NOINLINE void WorkerThread::RunBackgroundSharedCOMWorker() {
|
||||
const int line_number = __LINE__;
|
||||
RunWorker();
|
||||
base::debug::Alias(&line_number);
|
||||
}
|
||||
|
||||
NOINLINE void WorkerThread::RunDedicatedCOMWorker() {
|
||||
const int line_number = __LINE__;
|
||||
RunWorker();
|
||||
base::debug::Alias(&line_number);
|
||||
}
|
||||
|
||||
NOINLINE void WorkerThread::RunBackgroundDedicatedCOMWorker() {
|
||||
const int line_number = __LINE__;
|
||||
RunWorker();
|
||||
base::debug::Alias(&line_number);
|
||||
}
|
||||
#endif // defined(OS_WIN)
|
||||
|
||||
void WorkerThread::RunWorker() {
|
||||
DCHECK_EQ(self_, this);
|
||||
TRACE_EVENT_INSTANT0("thread_pool", "WorkerThreadThread born",
|
||||
TRACE_EVENT_SCOPE_THREAD);
|
||||
TRACE_EVENT_BEGIN0("thread_pool", "WorkerThreadThread active");
|
||||
|
||||
if (worker_thread_observer_)
|
||||
worker_thread_observer_->OnWorkerThreadMainEntry();
|
||||
|
||||
delegate_->OnMainEntry(this);
|
||||
|
||||
// Background threads can take an arbitrary amount of time to complete, do not
|
||||
// watch them for hangs. Ignore priority boosting for now.
|
||||
const bool watch_for_hangs =
|
||||
base::HangWatcher::GetInstance() != nullptr &&
|
||||
GetDesiredThreadPriority() != ThreadPriority::BACKGROUND;
|
||||
|
||||
// If this process has a HangWatcher register this thread for watching.
|
||||
base::ScopedClosureRunner unregister_for_hang_watching;
|
||||
if (watch_for_hangs) {
|
||||
unregister_for_hang_watching =
|
||||
base::HangWatcher::GetInstance()->RegisterThread();
|
||||
}
|
||||
|
||||
// A WorkerThread starts out waiting for work.
|
||||
{
|
||||
TRACE_EVENT_END0("thread_pool", "WorkerThreadThread active");
|
||||
delegate_->WaitForWork(&wake_up_event_);
|
||||
TRACE_EVENT_BEGIN0("thread_pool", "WorkerThreadThread active");
|
||||
}
|
||||
|
||||
while (!ShouldExit()) {
|
||||
#if defined(OS_MACOSX)
|
||||
mac::ScopedNSAutoreleasePool autorelease_pool;
|
||||
#endif
|
||||
base::Optional<HangWatchScope> hang_watch_scope;
|
||||
if (watch_for_hangs)
|
||||
hang_watch_scope.emplace(base::HangWatchScope::kDefaultHangWatchTime);
|
||||
|
||||
UpdateThreadPriority(GetDesiredThreadPriority());
|
||||
|
||||
// Get the task source containing the next task to execute.
|
||||
RegisteredTaskSource task_source = delegate_->GetWork(this);
|
||||
if (!task_source) {
|
||||
// Exit immediately if GetWork() resulted in detaching this worker.
|
||||
if (ShouldExit())
|
||||
break;
|
||||
|
||||
TRACE_EVENT_END0("thread_pool", "WorkerThreadThread active");
|
||||
hang_watch_scope.reset();
|
||||
delegate_->WaitForWork(&wake_up_event_);
|
||||
TRACE_EVENT_BEGIN0("thread_pool", "WorkerThreadThread active");
|
||||
continue;
|
||||
}
|
||||
|
||||
task_source = task_tracker_->RunAndPopNextTask(std::move(task_source));
|
||||
|
||||
delegate_->DidProcessTask(std::move(task_source));
|
||||
|
||||
// Calling WakeUp() guarantees that this WorkerThread will run Tasks from
|
||||
// TaskSources returned by the GetWork() method of |delegate_| until it
|
||||
// returns nullptr. Resetting |wake_up_event_| here doesn't break this
|
||||
// invariant and avoids a useless loop iteration before going to sleep if
|
||||
// WakeUp() is called while this WorkerThread is awake.
|
||||
wake_up_event_.Reset();
|
||||
}
|
||||
|
||||
// Important: It is unsafe to access unowned state (e.g. |task_tracker_|)
|
||||
// after invoking OnMainExit().
|
||||
|
||||
delegate_->OnMainExit(this);
|
||||
|
||||
if (worker_thread_observer_)
|
||||
worker_thread_observer_->OnWorkerThreadMainExit();
|
||||
|
||||
// Release the self-reference to |this|. This can result in deleting |this|
|
||||
// and as such no more member accesses should be made after this point.
|
||||
self_ = nullptr;
|
||||
|
||||
TRACE_EVENT_END0("thread_pool", "WorkerThreadThread active");
|
||||
TRACE_EVENT_INSTANT0("thread_pool", "WorkerThreadThread dead",
|
||||
TRACE_EVENT_SCOPE_THREAD);
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace base
|
||||
|
|
@ -0,0 +1,243 @@
|
|||
// Copyright 2016 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_TASK_THREAD_POOL_WORKER_THREAD_H_
|
||||
#define BASE_TASK_THREAD_POOL_WORKER_THREAD_H_
|
||||
|
||||
#include <memory>
|
||||
|
||||
#include "base/base_export.h"
|
||||
#include "base/macros.h"
|
||||
#include "base/memory/ref_counted.h"
|
||||
#include "base/synchronization/atomic_flag.h"
|
||||
#include "base/synchronization/waitable_event.h"
|
||||
#include "base/task/common/checked_lock.h"
|
||||
#include "base/task/thread_pool/task_source.h"
|
||||
#include "base/task/thread_pool/tracked_ref.h"
|
||||
#include "base/thread_annotations.h"
|
||||
#include "base/threading/platform_thread.h"
|
||||
#include "base/time/time.h"
|
||||
#include "build/build_config.h"
|
||||
|
||||
namespace base {
|
||||
|
||||
class WorkerThreadObserver;
|
||||
|
||||
namespace internal {
|
||||
|
||||
class TaskTracker;
|
||||
|
||||
// A worker that manages a single thread to run Tasks from TaskSources returned
|
||||
// by a delegate.
|
||||
//
|
||||
// A WorkerThread starts out sleeping. It is woken up by a call to WakeUp().
|
||||
// After a wake-up, a WorkerThread runs Tasks from TaskSources returned by
|
||||
// the GetWork() method of its delegate as long as it doesn't return nullptr. It
|
||||
// also periodically checks with its TaskTracker whether shutdown has completed
|
||||
// and exits when it has.
|
||||
//
|
||||
// This class is thread-safe.
|
||||
class BASE_EXPORT WorkerThread : public RefCountedThreadSafe<WorkerThread>,
|
||||
public PlatformThread::Delegate {
|
||||
public:
|
||||
// Labels this WorkerThread's association. This doesn't affect any logic
|
||||
// but will add a stack frame labeling this thread for ease of stack trace
|
||||
// identification.
|
||||
enum class ThreadLabel {
|
||||
POOLED,
|
||||
SHARED,
|
||||
DEDICATED,
|
||||
#if defined(OS_WIN)
|
||||
SHARED_COM,
|
||||
DEDICATED_COM,
|
||||
#endif // defined(OS_WIN)
|
||||
};
|
||||
|
||||
// Delegate interface for WorkerThread. All methods are called from the
|
||||
// thread managed by the WorkerThread instance.
|
||||
class BASE_EXPORT Delegate {
|
||||
public:
|
||||
virtual ~Delegate() = default;
|
||||
|
||||
// Returns the ThreadLabel the Delegate wants its WorkerThreads' stacks
|
||||
// to be labeled with.
|
||||
virtual ThreadLabel GetThreadLabel() const = 0;
|
||||
|
||||
// Called by |worker|'s thread when it enters its main function.
|
||||
virtual void OnMainEntry(const WorkerThread* worker) = 0;
|
||||
|
||||
// Called by |worker|'s thread to get a TaskSource from which to run a Task.
|
||||
virtual RegisteredTaskSource GetWork(WorkerThread* worker) = 0;
|
||||
|
||||
// Called by the WorkerThread after it ran a Task. If the Task's
|
||||
// TaskSource should be reenqueued, it is passed to |task_source|.
|
||||
// Otherwise, |task_source| is nullptr.
|
||||
virtual void DidProcessTask(RegisteredTaskSource task_source) = 0;
|
||||
|
||||
// Called to determine how long to sleep before the next call to GetWork().
|
||||
// GetWork() may be called before this timeout expires if the worker's
|
||||
// WakeUp() method is called.
|
||||
virtual TimeDelta GetSleepTimeout() = 0;
|
||||
|
||||
// Called by the WorkerThread's thread to wait for work. Override this
|
||||
// method if the thread in question needs special handling to go to sleep.
|
||||
// |wake_up_event| is a manually resettable event and is signaled on
|
||||
// WorkerThread::WakeUp()
|
||||
virtual void WaitForWork(WaitableEvent* wake_up_event);
|
||||
|
||||
// Called by |worker|'s thread right before the main function exits. The
|
||||
// Delegate is free to release any associated resources in this call. It is
|
||||
// guaranteed that WorkerThread won't access the Delegate or the
|
||||
// TaskTracker after calling OnMainExit() on the Delegate.
|
||||
virtual void OnMainExit(WorkerThread* worker) {}
|
||||
};
|
||||
|
||||
// Creates a WorkerThread that runs Tasks from TaskSources returned by
|
||||
// |delegate|. No actual thread will be created for this WorkerThread
|
||||
// before Start() is called. |priority_hint| is the preferred thread priority;
|
||||
// the actual thread priority depends on shutdown state and platform
|
||||
// capabilities. |task_tracker| is used to handle shutdown behavior of Tasks.
|
||||
// |predecessor_lock| is a lock that is allowed to be held when calling
|
||||
// methods on this WorkerThread. |backward_compatibility| indicates
|
||||
// whether backward compatibility is enabled. Either JoinForTesting() or
|
||||
// Cleanup() must be called before releasing the last external reference.
|
||||
WorkerThread(ThreadPriority priority_hint,
|
||||
std::unique_ptr<Delegate> delegate,
|
||||
TrackedRef<TaskTracker> task_tracker,
|
||||
const CheckedLock* predecessor_lock = nullptr);
|
||||
|
||||
// Creates a thread to back the WorkerThread. The thread will be in a wait
|
||||
// state pending a WakeUp() call. No thread will be created if Cleanup() was
|
||||
// called. If specified, |worker_thread_observer| will be notified when the
|
||||
// worker enters and exits its main function. It must not be destroyed before
|
||||
// JoinForTesting() has returned (must never be destroyed in production).
|
||||
// Returns true on success.
|
||||
bool Start(WorkerThreadObserver* worker_thread_observer = nullptr);
|
||||
|
||||
// Wakes up this WorkerThread if it wasn't already awake. After this is
|
||||
// called, this WorkerThread will run Tasks from TaskSources returned by
|
||||
// the GetWork() method of its delegate until it returns nullptr. No-op if
|
||||
// Start() wasn't called. DCHECKs if called after Start() has failed or after
|
||||
// Cleanup() has been called.
|
||||
void WakeUp();
|
||||
|
||||
WorkerThread::Delegate* delegate() { return delegate_.get(); }
|
||||
|
||||
// Joins this WorkerThread. If a Task is already running, it will be
|
||||
// allowed to complete its execution. This can only be called once.
|
||||
//
|
||||
// Note: A thread that detaches before JoinForTesting() is called may still be
|
||||
// running after JoinForTesting() returns. However, it can't run tasks after
|
||||
// JoinForTesting() returns.
|
||||
void JoinForTesting();
|
||||
|
||||
// Returns true if the worker is alive.
|
||||
bool ThreadAliveForTesting() const;
|
||||
|
||||
// Makes a request to cleanup the worker. This may be called from any thread.
|
||||
// The caller is expected to release its reference to this object after
|
||||
// calling Cleanup(). Further method calls after Cleanup() returns are
|
||||
// undefined.
|
||||
//
|
||||
// Expected Usage:
|
||||
// scoped_refptr<WorkerThread> worker_ = /* Existing Worker */
|
||||
// worker_->Cleanup();
|
||||
// worker_ = nullptr;
|
||||
void Cleanup();
|
||||
|
||||
// Informs this WorkerThread about periods during which it is not being
|
||||
// used. Thread-safe.
|
||||
void BeginUnusedPeriod();
|
||||
void EndUnusedPeriod();
|
||||
// Returns the last time this WorkerThread was used. Returns a null time if
|
||||
// this WorkerThread is currently in-use. Thread-safe.
|
||||
TimeTicks GetLastUsedTime() const;
|
||||
|
||||
private:
|
||||
friend class RefCountedThreadSafe<WorkerThread>;
|
||||
class Thread;
|
||||
|
||||
~WorkerThread() override;
|
||||
|
||||
bool ShouldExit() const;
|
||||
|
||||
// Returns the thread priority to use based on the priority hint, current
|
||||
// shutdown state, and platform capabilities.
|
||||
ThreadPriority GetDesiredThreadPriority() const;
|
||||
|
||||
// Changes the thread priority to |desired_thread_priority|. Must be called on
|
||||
// the thread managed by |this|.
|
||||
void UpdateThreadPriority(ThreadPriority desired_thread_priority);
|
||||
|
||||
// PlatformThread::Delegate:
|
||||
void ThreadMain() override;
|
||||
|
||||
// Dummy frames to act as "RunLabeledWorker()" (see RunMain() below). Their
|
||||
// impl is aliased to prevent compiler/linker from optimizing them out.
|
||||
void RunPooledWorker();
|
||||
void RunBackgroundPooledWorker();
|
||||
void RunSharedWorker();
|
||||
void RunBackgroundSharedWorker();
|
||||
void RunDedicatedWorker();
|
||||
void RunBackgroundDedicatedWorker();
|
||||
#if defined(OS_WIN)
|
||||
void RunSharedCOMWorker();
|
||||
void RunBackgroundSharedCOMWorker();
|
||||
void RunDedicatedCOMWorker();
|
||||
void RunBackgroundDedicatedCOMWorker();
|
||||
#endif // defined(OS_WIN)
|
||||
|
||||
// The real main, invoked through :
|
||||
// ThreadMain() -> RunLabeledWorker() -> RunWorker().
|
||||
// "RunLabeledWorker()" is a dummy frame based on ThreadLabel+ThreadPriority
|
||||
// and used to easily identify threads in stack traces.
|
||||
void RunWorker();
|
||||
|
||||
// Self-reference to prevent destruction of |this| while the thread is alive.
|
||||
// Set in Start() before creating the thread. Reset in ThreadMain() before the
|
||||
// thread exits. No lock required because the first access occurs before the
|
||||
// thread is created and the second access occurs on the thread.
|
||||
scoped_refptr<WorkerThread> self_;
|
||||
|
||||
mutable CheckedLock thread_lock_;
|
||||
|
||||
// Handle for the thread managed by |this|.
|
||||
PlatformThreadHandle thread_handle_ GUARDED_BY(thread_lock_);
|
||||
|
||||
// The last time this worker was used by its owner (e.g. to process work or
|
||||
// stand as a required idle thread).
|
||||
TimeTicks last_used_time_ GUARDED_BY(thread_lock_);
|
||||
|
||||
// Event to wake up the thread managed by |this|.
|
||||
WaitableEvent wake_up_event_{WaitableEvent::ResetPolicy::AUTOMATIC,
|
||||
WaitableEvent::InitialState::NOT_SIGNALED};
|
||||
|
||||
// Whether the thread should exit. Set by Cleanup().
|
||||
AtomicFlag should_exit_;
|
||||
|
||||
const std::unique_ptr<Delegate> delegate_;
|
||||
const TrackedRef<TaskTracker> task_tracker_;
|
||||
|
||||
// Optional observer notified when a worker enters and exits its main
|
||||
// function. Set in Start() and never modified afterwards.
|
||||
WorkerThreadObserver* worker_thread_observer_ = nullptr;
|
||||
|
||||
// Desired thread priority.
|
||||
const ThreadPriority priority_hint_;
|
||||
|
||||
// Actual thread priority. Can be different than |priority_hint_| depending on
|
||||
// system capabilities and shutdown state. No lock required because all post-
|
||||
// construction accesses occur on the thread.
|
||||
ThreadPriority current_thread_priority_;
|
||||
|
||||
// Set once JoinForTesting() has been called.
|
||||
AtomicFlag join_called_for_testing_;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(WorkerThread);
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
} // namespace base
|
||||
|
||||
#endif // BASE_TASK_THREAD_POOL_WORKER_THREAD_H_
|
||||
|
|
@ -0,0 +1,27 @@
|
|||
// Copyright 2018 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_TASK_THREAD_POOL_WORKER_THREAD_OBSERVER_H_
|
||||
#define BASE_TASK_THREAD_POOL_WORKER_THREAD_OBSERVER_H_
|
||||
|
||||
namespace base {
|
||||
|
||||
// Interface to observe entry and exit of the main function of a ThreadPool
|
||||
// worker.
|
||||
class WorkerThreadObserver {
|
||||
public:
|
||||
virtual ~WorkerThreadObserver() = default;
|
||||
|
||||
// Invoked at the beginning of the main function of a ThreadPool worker,
|
||||
// before any task runs.
|
||||
virtual void OnWorkerThreadMainEntry() = 0;
|
||||
|
||||
// Invoked at the end of the main function of a ThreadPool worker, when it
|
||||
// can no longer run tasks.
|
||||
virtual void OnWorkerThreadMainExit() = 0;
|
||||
};
|
||||
|
||||
} // namespace base
|
||||
|
||||
#endif // BASE_TASK_THREAD_POOL_WORKER_THREAD_OBSERVER_H_
|
||||
|
|
@ -0,0 +1,57 @@
|
|||
// Copyright 2016 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#include "base/task/thread_pool/worker_thread_stack.h"
|
||||
|
||||
#include <algorithm>
|
||||
|
||||
#include "base/logging.h"
|
||||
#include "base/stl_util.h"
|
||||
#include "base/task/thread_pool/worker_thread.h"
|
||||
|
||||
namespace base {
|
||||
namespace internal {
|
||||
|
||||
WorkerThreadStack::WorkerThreadStack() = default;
|
||||
|
||||
WorkerThreadStack::~WorkerThreadStack() = default;
|
||||
|
||||
void WorkerThreadStack::Push(WorkerThread* worker) {
|
||||
DCHECK(!Contains(worker)) << "WorkerThread already on stack";
|
||||
if (!IsEmpty())
|
||||
stack_.back()->BeginUnusedPeriod();
|
||||
stack_.push_back(worker);
|
||||
}
|
||||
|
||||
WorkerThread* WorkerThreadStack::Pop() {
|
||||
if (IsEmpty())
|
||||
return nullptr;
|
||||
WorkerThread* const worker = stack_.back();
|
||||
stack_.pop_back();
|
||||
if (!IsEmpty())
|
||||
stack_.back()->EndUnusedPeriod();
|
||||
return worker;
|
||||
}
|
||||
|
||||
WorkerThread* WorkerThreadStack::Peek() const {
|
||||
if (IsEmpty())
|
||||
return nullptr;
|
||||
return stack_.back();
|
||||
}
|
||||
|
||||
bool WorkerThreadStack::Contains(const WorkerThread* worker) const {
|
||||
return base::Contains(stack_, worker);
|
||||
}
|
||||
|
||||
void WorkerThreadStack::Remove(const WorkerThread* worker) {
|
||||
DCHECK(!IsEmpty());
|
||||
DCHECK_NE(worker, stack_.back());
|
||||
auto it = std::find(stack_.begin(), stack_.end(), worker);
|
||||
DCHECK(it != stack_.end());
|
||||
DCHECK_NE(TimeTicks(), (*it)->GetLastUsedTime());
|
||||
stack_.erase(it);
|
||||
}
|
||||
|
||||
} // namespace internal
|
||||
} // namespace base
|
||||
|
|
@ -0,0 +1,67 @@
|
|||
// Copyright 2016 The Chromium Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style license that can be
|
||||
// found in the LICENSE file.
|
||||
|
||||
#ifndef BASE_TASK_THREAD_POOL_WORKER_THREAD_STACK_H_
|
||||
#define BASE_TASK_THREAD_POOL_WORKER_THREAD_STACK_H_
|
||||
|
||||
#include <stddef.h>
|
||||
|
||||
#include <vector>
|
||||
|
||||
#include "base/base_export.h"
|
||||
#include "base/macros.h"
|
||||
|
||||
namespace base {
|
||||
namespace internal {
|
||||
|
||||
class WorkerThread;
|
||||
|
||||
// A stack of WorkerThreads which has custom logic to treat the worker on top
|
||||
// of the stack as being "in-use" (so its time in that position doesn't count
|
||||
// towards being inactive / reclaimable). Supports removal of arbitrary
|
||||
// WorkerThreads. DCHECKs when a WorkerThread is inserted multiple times.
|
||||
// WorkerThreads are not owned by the stack. Push() is amortized O(1). Pop(),
|
||||
// Peek(), Size() and Empty() are O(1). Contains() and Remove() are O(n). This
|
||||
// class is NOT thread-safe.
|
||||
class BASE_EXPORT WorkerThreadStack {
|
||||
public:
|
||||
WorkerThreadStack();
|
||||
~WorkerThreadStack();
|
||||
|
||||
// Inserts |worker| at the top of the stack. |worker| must not already be on
|
||||
// the stack. Flags the WorkerThread previously on top of the stack, if
|
||||
// any, as unused.
|
||||
void Push(WorkerThread* worker);
|
||||
|
||||
// Removes the top WorkerThread from the stack and returns it. Returns
|
||||
// nullptr if the stack is empty. Flags the WorkerThread now on top of the
|
||||
// stack, if any, as being in-use.
|
||||
WorkerThread* Pop();
|
||||
|
||||
// Returns the top WorkerThread from the stack, nullptr if empty.
|
||||
WorkerThread* Peek() const;
|
||||
|
||||
// Returns true if |worker| is already on the stack.
|
||||
bool Contains(const WorkerThread* worker) const;
|
||||
|
||||
// Removes |worker| from the stack. Must not be invoked for the first worker
|
||||
// on the stack.
|
||||
void Remove(const WorkerThread* worker);
|
||||
|
||||
// Returns the number of WorkerThreads on the stack.
|
||||
size_t Size() const { return stack_.size(); }
|
||||
|
||||
// Returns true if the stack is empty.
|
||||
bool IsEmpty() const { return stack_.empty(); }
|
||||
|
||||
private:
|
||||
std::vector<WorkerThread*> stack_;
|
||||
|
||||
DISALLOW_COPY_AND_ASSIGN(WorkerThreadStack);
|
||||
};
|
||||
|
||||
} // namespace internal
|
||||
} // namespace base
|
||||
|
||||
#endif // BASE_TASK_THREAD_POOL_WORKER_THREAD_STACK_H_
|
||||
Loading…
Add table
Add a link
Reference in a new issue