Repo created

This commit is contained in:
Fr4nz D13trich 2025-11-22 14:04:28 +01:00
parent 81b91f4139
commit f8c34fa5ee
22732 changed files with 4815320 additions and 2 deletions

View file

@ -0,0 +1,131 @@
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TASK_COMMON_CHECKED_LOCK_H_
#define BASE_TASK_COMMON_CHECKED_LOCK_H_
#include <memory>
#include "base/base_export.h"
#include "base/macros.h"
#include "base/synchronization/condition_variable.h"
#include "base/synchronization/lock.h"
#include "base/task/common/checked_lock_impl.h"
#include "base/thread_annotations.h"
namespace base {
namespace internal {
// CheckedLock should be used anywhere a Lock would be used in the base/task
// impl. When DCHECK_IS_ON(), lock checking occurs. Otherwise, CheckedLock is
// equivalent to base::Lock.
//
// The shape of CheckedLock is as follows:
// CheckedLock()
// Default constructor, no predecessor lock.
// DCHECKs
// On Acquisition if any CheckedLock is acquired on this thread.
// Okay if a universal predecessor is acquired.
//
// CheckedLock(const CheckedLock* predecessor)
// Constructor that specifies an allowed predecessor for that lock.
// DCHECKs
// On Construction if |predecessor| forms a predecessor lock cycle.
// On Acquisition if the previous lock acquired on the thread is not
// either |predecessor| or a universal predecessor. Okay if there
// was no previous lock acquired.
//
// CheckedLock(UniversalPredecessor universal_predecessor)
// Constructor for a lock that will allow the acquisition of any lock after
// it, without needing to explicitly be named a predecessor. Can only be
// acquired if no locks are currently held by this thread.
// DCHECKs
// On Acquisition if any CheckedLock is acquired on this thread.
//
// void Acquire()
// Acquires the lock.
//
// void Release()
// Releases the lock.
//
// void AssertAcquired().
// DCHECKs if the lock is not acquired.
//
// std::unique_ptr<ConditionVariable> CreateConditionVariable()
// Creates a condition variable using this as a lock.
#if DCHECK_IS_ON()
class LOCKABLE CheckedLock : public CheckedLockImpl {
public:
CheckedLock() = default;
explicit CheckedLock(const CheckedLock* predecessor)
: CheckedLockImpl(predecessor) {}
explicit CheckedLock(UniversalPredecessor universal_predecessor)
: CheckedLockImpl(universal_predecessor) {}
};
#else // DCHECK_IS_ON()
class LOCKABLE CheckedLock : public Lock {
public:
CheckedLock() = default;
explicit CheckedLock(const CheckedLock*) {}
explicit CheckedLock(UniversalPredecessor) {}
static void AssertNoLockHeldOnCurrentThread() {}
std::unique_ptr<ConditionVariable> CreateConditionVariable() {
return std::unique_ptr<ConditionVariable>(new ConditionVariable(this));
}
};
#endif // DCHECK_IS_ON()
// Provides the same functionality as base::AutoLock for CheckedLock.
using CheckedAutoLock = internal::BasicAutoLock<CheckedLock>;
// Provides the same functionality as base::AutoUnlock for CheckedLock.
using CheckedAutoUnlock = internal::BasicAutoUnlock<CheckedLock>;
// Provides the same functionality as base::AutoLockMaybe for CheckedLock.
using CheckedAutoLockMaybe = internal::BasicAutoLockMaybe<CheckedLock>;
// Informs the clang thread safety analysis that an aliased lock is acquired.
// Because the clang thread safety analysis doesn't understand aliased locks
// [1], this code wouldn't compile without AnnotateAcquiredLockAlias:
//
// class Example {
// public:
// CheckedLock lock_;
// int value = 0 GUARDED_BY(lock_);
// };
//
// Example example;
// CheckedLock* acquired = &example.lock_;
// CheckedAutoLock auto_lock(*acquired);
// AnnotateAcquiredLockAlias annotate(*acquired, example.lock_);
// example.value = 42; // Doesn't compile without |annotate|.
//
// [1] https://clang.llvm.org/docs/ThreadSafetyAnalysis.html#no-alias-analysis
class SCOPED_LOCKABLE AnnotateAcquiredLockAlias {
public:
// |acquired_lock| is an acquired lock. |lock_alias| is an alias of
// |acquired_lock|.
AnnotateAcquiredLockAlias(const CheckedLock& acquired_lock,
const CheckedLock& lock_alias)
EXCLUSIVE_LOCK_FUNCTION(lock_alias)
: acquired_lock_(acquired_lock) {
DCHECK_EQ(&acquired_lock, &lock_alias);
acquired_lock_.AssertAcquired();
}
~AnnotateAcquiredLockAlias() UNLOCK_FUNCTION() {
acquired_lock_.AssertAcquired();
}
private:
const CheckedLock& acquired_lock_;
DISALLOW_COPY_AND_ASSIGN(AnnotateAcquiredLockAlias);
};
} // namespace internal
} // namespace base
#endif // BASE_TASK_COMMON_CHECKED_LOCK_H_

View file

@ -0,0 +1,170 @@
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/task/common/checked_lock_impl.h"
#include <algorithm>
#include <unordered_map>
#include <vector>
#include "base/lazy_instance.h"
#include "base/logging.h"
#include "base/synchronization/condition_variable.h"
#include "base/task/common/checked_lock.h"
#include "base/threading/platform_thread.h"
#include "base/threading/thread_local.h"
namespace base {
namespace internal {
namespace {
class SafeAcquisitionTracker {
public:
SafeAcquisitionTracker() = default;
void RegisterLock(const CheckedLockImpl* const lock,
const CheckedLockImpl* const predecessor) {
DCHECK_NE(lock, predecessor) << "Reentrant locks are unsupported.";
AutoLock auto_lock(allowed_predecessor_map_lock_);
allowed_predecessor_map_[lock] = predecessor;
AssertSafePredecessor(lock);
}
void UnregisterLock(const CheckedLockImpl* const lock) {
AutoLock auto_lock(allowed_predecessor_map_lock_);
allowed_predecessor_map_.erase(lock);
}
void RecordAcquisition(const CheckedLockImpl* const lock) {
AssertSafeAcquire(lock);
GetAcquiredLocksOnCurrentThread()->push_back(lock);
}
void RecordRelease(const CheckedLockImpl* const lock) {
LockVector* acquired_locks = GetAcquiredLocksOnCurrentThread();
const auto iter_at_lock =
std::find(acquired_locks->begin(), acquired_locks->end(), lock);
DCHECK(iter_at_lock != acquired_locks->end());
acquired_locks->erase(iter_at_lock);
}
void AssertNoLockHeldOnCurrentThread() {
DCHECK(GetAcquiredLocksOnCurrentThread()->empty());
}
private:
using LockVector = std::vector<const CheckedLockImpl*>;
using PredecessorMap =
std::unordered_map<const CheckedLockImpl*, const CheckedLockImpl*>;
// This asserts that the lock is safe to acquire. This means that this should
// be run before actually recording the acquisition.
void AssertSafeAcquire(const CheckedLockImpl* const lock) {
const LockVector* acquired_locks = GetAcquiredLocksOnCurrentThread();
// If the thread currently holds no locks, this is inherently safe.
if (acquired_locks->empty())
return;
// A universal predecessor may not be acquired after any other lock.
DCHECK(!lock->is_universal_predecessor());
// Otherwise, make sure that the previous lock acquired is either an
// allowed predecessor for this lock or a universal predecessor.
const CheckedLockImpl* previous_lock = acquired_locks->back();
if (previous_lock->is_universal_predecessor())
return;
AutoLock auto_lock(allowed_predecessor_map_lock_);
// Using at() is exception-safe here as |lock| was registered already.
const CheckedLockImpl* allowed_predecessor =
allowed_predecessor_map_.at(lock);
DCHECK_EQ(previous_lock, allowed_predecessor);
}
// Asserts that |lock|'s registered predecessor is safe. Because
// CheckedLocks are registered at construction time and any predecessor
// specified on a CheckedLock must already exist, the first registered
// CheckedLock in a potential chain must have a null predecessor and is thus
// cycle-free. Any subsequent CheckedLock with a predecessor must come from
// the set of registered CheckedLocks. Since the registered CheckedLocks
// only contain cycle-free CheckedLocks, this subsequent CheckedLock is
// itself cycle-free and may be safely added to the registered CheckedLock
// set.
void AssertSafePredecessor(const CheckedLockImpl* lock) const {
allowed_predecessor_map_lock_.AssertAcquired();
// Using at() is exception-safe here as |lock| was registered already.
const CheckedLockImpl* predecessor = allowed_predecessor_map_.at(lock);
if (predecessor) {
DCHECK(allowed_predecessor_map_.find(predecessor) !=
allowed_predecessor_map_.end())
<< "CheckedLock was registered before its predecessor. "
<< "Potential cycle detected";
}
}
LockVector* GetAcquiredLocksOnCurrentThread() {
if (!tls_acquired_locks_.Get())
tls_acquired_locks_.Set(std::make_unique<LockVector>());
return tls_acquired_locks_.Get();
}
// Synchronizes access to |allowed_predecessor_map_|.
Lock allowed_predecessor_map_lock_;
// A map of allowed predecessors.
PredecessorMap allowed_predecessor_map_;
// A thread-local slot holding a vector of locks currently acquired on the
// current thread.
ThreadLocalOwnedPointer<LockVector> tls_acquired_locks_;
DISALLOW_COPY_AND_ASSIGN(SafeAcquisitionTracker);
};
LazyInstance<SafeAcquisitionTracker>::Leaky g_safe_acquisition_tracker =
LAZY_INSTANCE_INITIALIZER;
} // namespace
CheckedLockImpl::CheckedLockImpl() : CheckedLockImpl(nullptr) {}
CheckedLockImpl::CheckedLockImpl(const CheckedLockImpl* predecessor)
: is_universal_predecessor_(false) {
g_safe_acquisition_tracker.Get().RegisterLock(this, predecessor);
}
CheckedLockImpl::CheckedLockImpl(UniversalPredecessor)
: is_universal_predecessor_(true) {}
CheckedLockImpl::~CheckedLockImpl() {
g_safe_acquisition_tracker.Get().UnregisterLock(this);
}
void CheckedLockImpl::AssertNoLockHeldOnCurrentThread() {
g_safe_acquisition_tracker.Get().AssertNoLockHeldOnCurrentThread();
}
void CheckedLockImpl::Acquire() {
lock_.Acquire();
g_safe_acquisition_tracker.Get().RecordAcquisition(this);
}
void CheckedLockImpl::Release() {
lock_.Release();
g_safe_acquisition_tracker.Get().RecordRelease(this);
}
void CheckedLockImpl::AssertAcquired() const {
lock_.AssertAcquired();
}
std::unique_ptr<ConditionVariable> CheckedLockImpl::CreateConditionVariable() {
return std::make_unique<ConditionVariable>(&lock_);
}
} // namespace internal
} // namespace base

View file

@ -0,0 +1,54 @@
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TASK_COMMON_CHECKED_LOCK_IMPL_H_
#define BASE_TASK_COMMON_CHECKED_LOCK_IMPL_H_
#include <memory>
#include "base/base_export.h"
#include "base/macros.h"
#include "base/synchronization/lock.h"
namespace base {
class ConditionVariable;
namespace internal {
struct UniversalPredecessor {};
// A regular lock with simple deadlock correctness checking.
// This lock tracks all of the available locks to make sure that any locks are
// acquired in an expected order.
// See scheduler_lock.h for details.
class BASE_EXPORT CheckedLockImpl {
public:
CheckedLockImpl();
explicit CheckedLockImpl(const CheckedLockImpl* predecessor);
explicit CheckedLockImpl(UniversalPredecessor);
~CheckedLockImpl();
static void AssertNoLockHeldOnCurrentThread();
void Acquire() EXCLUSIVE_LOCK_FUNCTION(lock_);
void Release() UNLOCK_FUNCTION(lock_);
void AssertAcquired() const;
std::unique_ptr<ConditionVariable> CreateConditionVariable();
bool is_universal_predecessor() const { return is_universal_predecessor_; }
private:
Lock lock_;
const bool is_universal_predecessor_;
DISALLOW_COPY_AND_ASSIGN(CheckedLockImpl);
};
} // namespace internal
} // namespace base
#endif // BASE_TASK_COMMON_CHECKED_LOCK_IMPL_H_

View file

@ -0,0 +1,81 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TASK_COMMON_INTRUSIVE_HEAP_H_
#define BASE_TASK_COMMON_INTRUSIVE_HEAP_H_
#include "base/containers/intrusive_heap.h"
namespace base {
namespace internal {
using HeapHandle = base::HeapHandle;
template <typename T>
struct IntrusiveHeapImpl {
struct GreaterUsingLessEqual {
bool operator()(const T& t1, const T& t2) const { return t2 <= t1; }
};
using type = base::IntrusiveHeap<T, GreaterUsingLessEqual>;
};
// base/task wants a min-heap that uses the <= operator, whereas
// base::IntrusiveHeap is a max-heap by default. This is a very thin adapter
// over that class that exposes minimal functionality required by the
// base/task IntrusiveHeap clients.
template <typename T>
class IntrusiveHeap : private IntrusiveHeapImpl<T>::type {
public:
using IntrusiveHeapImplType = typename IntrusiveHeapImpl<T>::type;
// The majority of sets in the scheduler have 0-3 items in them (a few will
// have perhaps up to 100), so this means we usually only have to allocate
// memory once.
static constexpr size_t kMinimumHeapSize = 4;
IntrusiveHeap() { IntrusiveHeapImplType::reserve(kMinimumHeapSize); }
~IntrusiveHeap() = default;
IntrusiveHeap& operator=(IntrusiveHeap&& other) = default;
bool empty() const { return IntrusiveHeapImplType::empty(); }
size_t size() const { return IntrusiveHeapImplType::size(); }
void Clear() {
IntrusiveHeapImplType::clear();
IntrusiveHeapImplType::reserve(kMinimumHeapSize);
}
const T& Min() const { return IntrusiveHeapImplType::top(); }
void Pop() { IntrusiveHeapImplType::pop(); }
void insert(T&& element) {
IntrusiveHeapImplType::insert(std::move(element));
}
void erase(HeapHandle handle) { IntrusiveHeapImplType::erase(handle); }
void ReplaceMin(T&& element) {
IntrusiveHeapImplType::ReplaceTop(std::move(element));
}
void ChangeKey(HeapHandle handle, T&& element) {
IntrusiveHeapImplType::Replace(handle, std::move(element));
}
const T& at(HeapHandle handle) const {
return IntrusiveHeapImplType::at(handle);
}
// Caution, mutating the heap invalidates iterators!
const T* begin() const { return IntrusiveHeapImplType::data(); }
const T* end() const { return IntrusiveHeapImplType::data() + size(); }
};
} // namespace internal
} // namespace base
#endif // BASE_TASK_COMMON_INTRUSIVE_HEAP_H_

View file

@ -0,0 +1,104 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/task/common/operations_controller.h"
#include "base/logging.h"
namespace base {
namespace internal {
OperationsController::OperationsController() = default;
OperationsController::~OperationsController() {
#if DCHECK_IS_ON()
// An OperationsController may only be deleted when it was either not
// accepting operations or after it was shutdown and there are no in flight
// attempts to perform operations.
auto value = state_and_count_.load();
DCHECK(
ExtractState(value) == State::kRejectingOperations ||
(ExtractState(value) == State::kShuttingDown && ExtractCount(value) == 0))
<< value;
#endif
}
bool OperationsController::StartAcceptingOperations() {
// Release semantics are required to ensure that all memory accesses made on
// this thread happen-before any others done on a thread which is later
// allowed to perform an operation.
auto prev_value = state_and_count_.fetch_or(kAcceptingOperationsBitMask,
std::memory_order_release);
DCHECK_EQ(ExtractState(prev_value), State::kRejectingOperations);
// The count is the number of rejected operations, unwind them now.
auto num_rejected = ExtractCount(prev_value);
DecrementBy(num_rejected);
return num_rejected != 0;
}
OperationsController::OperationToken OperationsController::TryBeginOperation() {
// Acquire semantics are required to ensure that a thread which is allowed to
// perform an operation sees all the memory side-effects that happened-before
// StartAcceptingOperations(). They're also required so that no operations on
// this thread (e.g. the operation itself) can be reordered before this one.
auto prev_value = state_and_count_.fetch_add(1, std::memory_order_acquire);
switch (ExtractState(prev_value)) {
case State::kRejectingOperations:
return OperationToken(nullptr);
case State::kAcceptingOperations:
return OperationToken(this);
case State::kShuttingDown:
DecrementBy(1);
return OperationToken(nullptr);
}
}
void OperationsController::ShutdownAndWaitForZeroOperations() {
// Acquire semantics are required to guarantee that all memory side-effects
// made by other threads that were allowed to perform operations are
// synchronized with this thread before it returns from this method.
auto prev_value = state_and_count_.fetch_or(kShuttingDownBitMask,
std::memory_order_acquire);
switch (ExtractState(prev_value)) {
case State::kRejectingOperations:
// The count is the number of rejected operations, unwind them now.
DecrementBy(ExtractCount(prev_value));
break;
case State::kAcceptingOperations:
if (ExtractCount(prev_value) != 0) {
shutdown_complete_.Wait();
}
break;
case State::kShuttingDown:
DCHECK(false) << "Multiple calls to ShutdownAndWaitForZeroOperations()";
break;
}
}
OperationsController::State OperationsController::ExtractState(uint32_t value) {
if (value & kShuttingDownBitMask) {
return State::kShuttingDown;
} else if (value & kAcceptingOperationsBitMask) {
return State::kAcceptingOperations;
} else {
return State::kRejectingOperations;
}
}
void OperationsController::DecrementBy(uint32_t n) {
// Release semantics are required to ensure that no operation on the current
// thread (e.g. the operation itself) can be reordered after this one.
auto prev_value = state_and_count_.fetch_sub(n, std::memory_order_release);
DCHECK_LE(n, ExtractCount(prev_value)) << "Decrement underflow";
if (ExtractState(prev_value) == State::kShuttingDown &&
ExtractCount(prev_value) == n) {
shutdown_complete_.Signal();
}
}
} // namespace internal
} // namespace base

View file

@ -0,0 +1,151 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TASK_COMMON_OPERATIONS_CONTROLLER_H_
#define BASE_TASK_COMMON_OPERATIONS_CONTROLLER_H_
#include <atomic>
#include <cstdint>
#include "base/synchronization/waitable_event.h"
namespace base {
namespace internal {
// A lock-free thread-safe controller to manage critical multi-threaded
// operations without locks.
//
// The controller is used to determine if operations are allowed, and to keep
// track of how many are currently active. Users will call TryBeginOperation()
// before starting such operations. If the call succeeds the user can run the
// operation and the controller will keep track of it until the user signals
// that the operation is completed. No operations are allowed before
// StartAcceptingOperations() is called, or after
// ShutdownAndWaitForZeroOperations() is called.
//
// There is no explicit way of telling the controller when an operation is
// completed, instead for convenience TryBeginOperation() will return a RAII
// like object that will do so on destruction.
//
// For example:
//
// OperationsController controller_;
//
// void SetUp() {
// controller_.StartAcceptingOperations();
// }
//
// void TearDown() {
// controller_.ShutdownAndWaitForZeroOperations();
// }
//
// void MaybeRunOperation() {
// auto operation_token = controller_.TryBeginOperation();
// if (operation_token) {
// Process();
// }
// }
//
// This class is thread-safe.
// But note that StartAcceptingOperations can never be called after
// ShutdownAndWaitForZeroOperations.
class BASE_EXPORT OperationsController {
public:
// The owner of an OperationToken which evaluates to true can safely perform
// an operation while being certain it happens-after
// StartAcceptingOperations() and happens-before
// ShutdownAndWaitForZeroOperations(). Releasing this OperationToken
// relinquishes this right.
//
// This class is thread-safe
class OperationToken {
public:
~OperationToken() {
if (outer_)
outer_->DecrementBy(1);
}
OperationToken(const OperationToken&) = delete;
OperationToken(OperationToken&& other) {
this->outer_ = other.outer_;
other.outer_ = nullptr;
}
operator bool() const { return !!outer_; }
private:
friend class OperationsController;
explicit OperationToken(OperationsController* outer) : outer_(outer) {}
OperationsController* outer_;
};
OperationsController();
// Users must call ShutdownAndWaitForZeroOperations() before destroying an
// instance of this class if StartAcceptingOperations() was called.
~OperationsController();
OperationsController(const OperationsController&) = delete;
OperationsController& operator=(const OperationsController&) = delete;
// Starts to accept operations (before this point TryBeginOperation() returns
// an invalid token). Returns true if an attempt to perform an operation was
// made and denied before StartAcceptingOperations() was called. Can be called
// at most once, never after ShutdownAndWaitForZeroOperations().
bool StartAcceptingOperations();
// Returns a RAII like object that implicitly converts to true if operations
// are allowed i.e. if this call happens-after StartAcceptingOperations() and
// happens-before Shutdown(), otherwise the object will convert to false. On
// successful return, this OperationsController will keep track of the
// operation until the returned object goes out of scope.
OperationToken TryBeginOperation();
// Prevents further calls to TryBeginOperation() from succeeding and waits for
// all the ongoing operations to complete.
//
// Attention: Can only be called once.
void ShutdownAndWaitForZeroOperations();
private:
// Atomic representation of the state of this class. We use the upper 2 bits
// to keep track of flag like values and the remainder bits are used as a
// counter. The 2 flags are used to represent 3 different states:
//
// State | AcceptOperations Bit | ShuttingDown Bit
// --------------------------------------------------------------
// kRejectingOperations | 0 | 0
// kAcceptingOperations | 1 | 0
// kShuttingDown | * | 1
//
// The counter keeps track of the rejected operations when we are in
// the kRejectingOperations state, the number of inflight operations
// otherwise. If the count reaches zero and we are in the shutting down state
// |shutdown_complete_| will be signaled.
static constexpr uint32_t kShuttingDownBitMask = uint32_t{1} << 31;
static constexpr uint32_t kAcceptingOperationsBitMask = uint32_t{1} << 30;
static constexpr uint32_t kFlagsBitMask =
(kShuttingDownBitMask | kAcceptingOperationsBitMask);
static constexpr uint32_t kCountBitMask = ~kFlagsBitMask;
enum class State {
kRejectingOperations,
kAcceptingOperations,
kShuttingDown,
};
// Helper methods for the bit fiddling. Pass a |state_and_count_| value to
// extract state or count out of it.
static uint32_t ExtractCount(uint32_t value) { return value & kCountBitMask; }
static State ExtractState(uint32_t value);
// Decrements the counter by |n| and signals |shutdown_complete_| if needed.
void DecrementBy(uint32_t n);
std::atomic<uint32_t> state_and_count_{0};
WaitableEvent shutdown_complete_;
};
} // namespace internal
} // namespace base
#endif // BASE_TASK_COMMON_OPERATIONS_CONTROLLER_H_

View file

@ -0,0 +1,97 @@
// Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/task/common/scoped_defer_task_posting.h"
#include "base/no_destructor.h"
#include "base/threading/thread_local.h"
namespace base {
namespace {
ThreadLocalPointer<ScopedDeferTaskPosting>& GetScopedDeferTaskPostingTLS() {
static NoDestructor<ThreadLocalPointer<ScopedDeferTaskPosting>> tls;
return *tls;
}
} // namespace
// static
void ScopedDeferTaskPosting::PostOrDefer(
scoped_refptr<SequencedTaskRunner> task_runner,
const Location& from_here,
OnceClosure task,
base::TimeDelta delay) {
ScopedDeferTaskPosting* scope = Get();
if (scope) {
scope->DeferTaskPosting(std::move(task_runner), from_here, std::move(task),
delay);
return;
}
task_runner->PostDelayedTask(from_here, std::move(task), delay);
}
// static
ScopedDeferTaskPosting* ScopedDeferTaskPosting::Get() {
return GetScopedDeferTaskPostingTLS().Get();
}
// static
bool ScopedDeferTaskPosting::Set(ScopedDeferTaskPosting* scope) {
// We can post a task from within a ScheduleWork in some tests, so we can
// get nested scopes. In this case ignore all except the top one.
if (Get() && scope)
return false;
GetScopedDeferTaskPostingTLS().Set(scope);
return true;
}
// static
bool ScopedDeferTaskPosting::IsPresent() {
return !!Get();
}
ScopedDeferTaskPosting::ScopedDeferTaskPosting() {
top_level_scope_ = Set(this);
}
ScopedDeferTaskPosting::~ScopedDeferTaskPosting() {
if (!top_level_scope_) {
DCHECK(deferred_tasks_.empty());
return;
}
Set(nullptr);
for (DeferredTask& deferred_task : deferred_tasks_) {
deferred_task.task_runner->PostDelayedTask(deferred_task.from_here,
std::move(deferred_task.task),
deferred_task.delay);
}
}
ScopedDeferTaskPosting::DeferredTask::DeferredTask(
scoped_refptr<SequencedTaskRunner> task_runner,
Location from_here,
OnceClosure task,
base::TimeDelta delay)
: task_runner(std::move(task_runner)),
from_here(from_here),
task(std::move(task)),
delay(delay) {}
ScopedDeferTaskPosting::DeferredTask::DeferredTask(DeferredTask&&) = default;
ScopedDeferTaskPosting::DeferredTask::~DeferredTask() = default;
void ScopedDeferTaskPosting::DeferTaskPosting(
scoped_refptr<SequencedTaskRunner> task_runner,
const Location& from_here,
OnceClosure task,
base::TimeDelta delay) {
deferred_tasks_.push_back(
{std::move(task_runner), from_here, std::move(task), delay});
}
} // namespace base

View file

@ -0,0 +1,75 @@
// Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TASK_COMMON_SCOPED_DEFER_TASK_POSTING_H_
#define BASE_TASK_COMMON_SCOPED_DEFER_TASK_POSTING_H_
#include "base/base_export.h"
#include "base/location.h"
#include "base/macros.h"
#include "base/sequenced_task_runner.h"
namespace base {
// Tracing wants to post tasks from within a trace event within PostTask, but
// this can lead to a deadlock. Create a scope to ensure that we are posting
// the tasks in question outside of the scope of the lock.
// NOTE: This scope affects only the thread it is created on. All other threads
// still can post tasks.
//
// TODO(altimin): It should be possible to get rid of this scope, but this
// requires refactoring TimeDomain to ensure that TimeDomain never changes and
// we can read current time without grabbing a lock.
class BASE_EXPORT ScopedDeferTaskPosting {
public:
static void PostOrDefer(scoped_refptr<SequencedTaskRunner> task_runner,
const Location& from_here,
OnceClosure task,
base::TimeDelta delay);
static bool IsPresent();
ScopedDeferTaskPosting();
~ScopedDeferTaskPosting();
private:
static ScopedDeferTaskPosting* Get();
// Returns whether the |scope| was set as active, which happens only
// when the scope wasn't set before.
static bool Set(ScopedDeferTaskPosting* scope);
void DeferTaskPosting(scoped_refptr<SequencedTaskRunner> task_runner,
const Location& from_here,
OnceClosure task,
base::TimeDelta delay);
struct DeferredTask {
DeferredTask(scoped_refptr<SequencedTaskRunner> task_runner,
Location from_here,
OnceClosure task,
base::TimeDelta delay);
DeferredTask(DeferredTask&& task);
~DeferredTask();
scoped_refptr<SequencedTaskRunner> task_runner;
Location from_here;
OnceClosure task;
base::TimeDelta delay;
DISALLOW_COPY_AND_ASSIGN(DeferredTask);
};
std::vector<DeferredTask> deferred_tasks_;
// Scopes can be nested (e.g. ScheduleWork inside PostTasks can post a task
// to another task runner), so we want to know whether the scope is top-level
// or not.
bool top_level_scope_ = false;
DISALLOW_COPY_AND_ASSIGN(ScopedDeferTaskPosting);
};
} // namespace base
#endif // BASE_TASK_COMMON_SCOPED_DEFER_TASK_POSTING_H_

View file

@ -0,0 +1,191 @@
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/task/common/task_annotator.h"
#include <array>
#include "base/debug/activity_tracker.h"
#include "base/debug/alias.h"
#include "base/no_destructor.h"
#include "base/threading/thread_local.h"
#include "base/trace_event/trace_event.h"
namespace base {
namespace {
TaskAnnotator::ObserverForTesting* g_task_annotator_observer = nullptr;
// Used as a sentinel to determine if a TLS-stored PendingTask is a dummy one.
static constexpr int kSentinelSequenceNum =
static_cast<int>(0xF00DBAADF00DBAAD);
// Returns the TLS slot that stores the PendingTask currently in progress on
// each thread. Used to allow creating a breadcrumb of program counters on the
// stack to help identify a task's origin in crashes.
ThreadLocalPointer<PendingTask>* GetTLSForCurrentPendingTask() {
static NoDestructor<ThreadLocalPointer<PendingTask>> instance;
return instance.get();
}
// Determines whether or not the given |task| is a dummy pending task that has
// been injected by ScopedSetIpcHash solely for the purposes of
// tracking IPC context.
bool IsDummyPendingTask(const PendingTask* task) {
if (task->sequence_num == kSentinelSequenceNum &&
!task->posted_from.has_source_info() &&
!task->posted_from.program_counter()) {
return true;
}
return false;
}
} // namespace
const PendingTask* TaskAnnotator::CurrentTaskForThread() {
auto* current_task = GetTLSForCurrentPendingTask()->Get();
// Don't return "dummy" current tasks that are only used for storing IPC
// context.
if (current_task && IsDummyPendingTask(current_task))
return nullptr;
return current_task;
}
TaskAnnotator::TaskAnnotator() = default;
TaskAnnotator::~TaskAnnotator() = default;
void TaskAnnotator::WillQueueTask(const char* trace_event_name,
PendingTask* pending_task,
const char* task_queue_name) {
DCHECK(trace_event_name);
DCHECK(pending_task);
DCHECK(task_queue_name);
TRACE_EVENT_WITH_FLOW1(
TRACE_DISABLED_BY_DEFAULT("toplevel.flow"), trace_event_name,
TRACE_ID_LOCAL(GetTaskTraceID(*pending_task)), TRACE_EVENT_FLAG_FLOW_OUT,
"task_queue_name", task_queue_name);
DCHECK(!pending_task->task_backtrace[0])
<< "Task backtrace was already set, task posted twice??";
if (pending_task->task_backtrace[0])
return;
const auto* parent_task = CurrentTaskForThread();
if (!parent_task)
return;
pending_task->ipc_hash = parent_task->ipc_hash;
pending_task->task_backtrace[0] = parent_task->posted_from.program_counter();
std::copy(parent_task->task_backtrace.begin(),
parent_task->task_backtrace.end() - 1,
pending_task->task_backtrace.begin() + 1);
pending_task->task_backtrace_overflow =
parent_task->task_backtrace_overflow ||
parent_task->task_backtrace.back() != nullptr;
}
void TaskAnnotator::RunTask(const char* trace_event_name,
PendingTask* pending_task) {
DCHECK(trace_event_name);
DCHECK(pending_task);
debug::ScopedTaskRunActivity task_activity(*pending_task);
TRACE_EVENT1(TRACE_DISABLED_BY_DEFAULT("toplevel.ipc"),
"TaskAnnotator::RunTask", "ipc_hash", pending_task->ipc_hash);
TRACE_EVENT_WITH_FLOW0(
TRACE_DISABLED_BY_DEFAULT("toplevel.flow"), trace_event_name,
TRACE_ID_LOCAL(GetTaskTraceID(*pending_task)), TRACE_EVENT_FLAG_FLOW_IN);
// Before running the task, store the IPC context and the task backtrace with
// the chain of PostTasks that resulted in this call and deliberately alias it
// to ensure it is on the stack if the task crashes. Be careful not to assume
// that the variable itself will have the expected value when displayed by the
// optimizer in an optimized build. Look at a memory dump of the stack.
static constexpr int kStackTaskTraceSnapshotSize =
PendingTask::kTaskBacktraceLength + 4;
std::array<const void*, kStackTaskTraceSnapshotSize> task_backtrace;
// Store a marker to locate |task_backtrace| content easily on a memory
// dump. The layout is as follows:
//
// +------------ +----+---------+-----+-----------+----------+-------------+
// | Head Marker | PC | frame 0 | ... | frame N-1 | IPC hash | Tail Marker |
// +------------ +----+---------+-----+-----------+----------+-------------+
//
// Markers glossary (compliments of wez):
// cool code,do it dude!
// 0x c001 c0de d0 17 d00d
// o dude,i did it biig
// 0x 0 d00d 1 d1d 17 8119
task_backtrace.front() = reinterpret_cast<void*>(0xc001c0ded017d00d);
task_backtrace.back() = reinterpret_cast<void*>(0x0d00d1d1d178119);
task_backtrace[1] = pending_task->posted_from.program_counter();
std::copy(pending_task->task_backtrace.begin(),
pending_task->task_backtrace.end(), task_backtrace.begin() + 2);
task_backtrace[kStackTaskTraceSnapshotSize - 2] =
reinterpret_cast<void*>(pending_task->ipc_hash);
debug::Alias(&task_backtrace);
auto* tls = GetTLSForCurrentPendingTask();
auto* previous_pending_task = tls->Get();
tls->Set(pending_task);
if (g_task_annotator_observer)
g_task_annotator_observer->BeforeRunTask(pending_task);
std::move(pending_task->task).Run();
tls->Set(previous_pending_task);
}
uint64_t TaskAnnotator::GetTaskTraceID(const PendingTask& task) const {
return (static_cast<uint64_t>(task.sequence_num) << 32) |
((static_cast<uint64_t>(reinterpret_cast<intptr_t>(this)) << 32) >>
32);
}
// static
void TaskAnnotator::RegisterObserverForTesting(ObserverForTesting* observer) {
DCHECK(!g_task_annotator_observer);
g_task_annotator_observer = observer;
}
// static
void TaskAnnotator::ClearObserverForTesting() {
g_task_annotator_observer = nullptr;
}
TaskAnnotator::ScopedSetIpcHash::ScopedSetIpcHash(uint32_t ipc_hash) {
// We store the IPC context in the currently running task. If there is none
// then introduce a dummy task.
auto* tls = GetTLSForCurrentPendingTask();
auto* current_task = tls->Get();
if (!current_task) {
dummy_pending_task_ = std::make_unique<PendingTask>();
dummy_pending_task_->sequence_num = kSentinelSequenceNum;
current_task = dummy_pending_task_.get();
tls->Set(current_task);
}
old_ipc_hash_ = current_task->ipc_hash;
current_task->ipc_hash = ipc_hash;
}
TaskAnnotator::ScopedSetIpcHash::~ScopedSetIpcHash() {
auto* tls = GetTLSForCurrentPendingTask();
auto* current_task = tls->Get();
DCHECK(current_task);
if (current_task == dummy_pending_task_.get()) {
tls->Set(nullptr);
} else {
current_task->ipc_hash = old_ipc_hash_;
}
}
} // namespace base

View file

@ -0,0 +1,83 @@
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TASK_COMMON_TASK_ANNOTATOR_H_
#define BASE_TASK_COMMON_TASK_ANNOTATOR_H_
#include <stdint.h>
#include <memory>
#include "base/base_export.h"
#include "base/macros.h"
#include "base/pending_task.h"
namespace base {
// Implements common debug annotations for posted tasks. This includes data
// such as task origins, IPC message contexts, queueing durations and memory
// usage.
class BASE_EXPORT TaskAnnotator {
public:
class ObserverForTesting {
public:
// Invoked just before RunTask() in the scope in which the task is about to
// be executed.
virtual void BeforeRunTask(const PendingTask* pending_task) = 0;
};
// This is used to set the |ipc_hash| field for PendingTasks. It is intended
// to be used only from within generated IPC handler dispatch code.
class ScopedSetIpcHash;
static const PendingTask* CurrentTaskForThread();
TaskAnnotator();
~TaskAnnotator();
// Called to indicate that a task is about to be queued to run in the future,
// giving one last chance for this TaskAnnotator to add metadata to
// |pending_task| before it is moved into the queue. |task_queue_name| must
// live for the duration of the process.
void WillQueueTask(const char* trace_event_name,
PendingTask* pending_task,
const char* task_queue_name);
// Run a previously queued task.
void RunTask(const char* trace_event_name, PendingTask* pending_task);
// Creates a process-wide unique ID to represent this task in trace events.
// This will be mangled with a Process ID hash to reduce the likelyhood of
// colliding with TaskAnnotator pointers on other processes. Callers may use
// this when generating their own flow events (i.e. when passing
// |queue_function == nullptr| in above methods).
uint64_t GetTaskTraceID(const PendingTask& task) const;
private:
friend class TaskAnnotatorBacktraceIntegrationTest;
// Registers an ObserverForTesting that will be invoked by all TaskAnnotators'
// RunTask(). This registration and the implementation of BeforeRunTask() are
// responsible to ensure thread-safety.
static void RegisterObserverForTesting(ObserverForTesting* observer);
static void ClearObserverForTesting();
DISALLOW_COPY_AND_ASSIGN(TaskAnnotator);
};
class BASE_EXPORT TaskAnnotator::ScopedSetIpcHash {
public:
explicit ScopedSetIpcHash(uint32_t ipc_hash);
~ScopedSetIpcHash();
private:
std::unique_ptr<PendingTask> dummy_pending_task_;
uint32_t old_ipc_hash_ = 0;
DISALLOW_COPY_AND_ASSIGN(ScopedSetIpcHash);
};
} // namespace base
#endif // BASE_TASK_COMMON_TASK_ANNOTATOR_H_