Repo created

This commit is contained in:
Fr4nz D13trich 2025-11-22 14:04:28 +01:00
parent 81b91f4139
commit f8c34fa5ee
22732 changed files with 4815320 additions and 2 deletions

View file

@ -0,0 +1,31 @@
// Copyright (c) 2011 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/synchronization/atomic_flag.h"
#include "base/logging.h"
namespace base {
AtomicFlag::AtomicFlag() {
// It doesn't matter where the AtomicFlag is built so long as it's always
// Set() from the same sequence after. Note: the sequencing requirements are
// necessary for IsSet()'s callers to know which sequence's memory operations
// they are synchronized with.
DETACH_FROM_SEQUENCE(set_sequence_checker_);
}
AtomicFlag::~AtomicFlag() = default;
void AtomicFlag::Set() {
DCHECK_CALLED_ON_VALID_SEQUENCE(set_sequence_checker_);
flag_.store(1, std::memory_order_release);
}
void AtomicFlag::UnsafeResetForTesting() {
DETACH_FROM_SEQUENCE(set_sequence_checker_);
flag_.store(0, std::memory_order_release);
}
} // namespace base

View file

@ -0,0 +1,50 @@
// Copyright (c) 2011 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_SYNCHRONIZATION_ATOMIC_FLAG_H_
#define BASE_SYNCHRONIZATION_ATOMIC_FLAG_H_
#include <stdint.h>
#include <atomic>
#include "base/base_export.h"
#include "base/macros.h"
#include "base/sequence_checker.h"
namespace base {
// A flag that can safely be set from one thread and read from other threads.
//
// This class IS NOT intended for synchronization between threads.
class BASE_EXPORT AtomicFlag {
public:
AtomicFlag();
~AtomicFlag();
// Set the flag. Must always be called from the same sequence.
void Set();
// Returns true iff the flag was set. If this returns true, the current thread
// is guaranteed to be synchronized with all memory operations on the sequence
// which invoked Set() up until at least the first call to Set() on it.
bool IsSet() const {
// Inline here: this has a measurable performance impact on base::WeakPtr.
return flag_.load(std::memory_order_acquire) != 0;
}
// Resets the flag. Be careful when using this: callers might not expect
// IsSet() to return false after returning true once.
void UnsafeResetForTesting();
private:
std::atomic<uint_fast8_t> flag_{0};
SEQUENCE_CHECKER(set_sequence_checker_);
DISALLOW_COPY_AND_ASSIGN(AtomicFlag);
};
} // namespace base
#endif // BASE_SYNCHRONIZATION_ATOMIC_FLAG_H_

View file

@ -0,0 +1,149 @@
// Copyright (c) 2011 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/synchronization/condition_variable.h"
#include <errno.h>
#include <stdint.h>
#include <sys/time.h>
#include "base/optional.h"
#include "base/synchronization/lock.h"
#include "base/threading/scoped_blocking_call.h"
#include "base/threading/thread_restrictions.h"
#include "base/time/time.h"
#include "build/build_config.h"
namespace base {
ConditionVariable::ConditionVariable(Lock* user_lock)
: user_mutex_(user_lock->lock_.native_handle())
#if DCHECK_IS_ON()
, user_lock_(user_lock)
#endif
{
int rv = 0;
// http://crbug.com/293736
// NaCl doesn't support monotonic clock based absolute deadlines.
// On older Android platform versions, it's supported through the
// non-standard pthread_cond_timedwait_monotonic_np. Newer platform
// versions have pthread_condattr_setclock.
// Mac can use relative time deadlines.
#if !defined(OS_MACOSX) && !defined(OS_NACL) && \
!(defined(OS_ANDROID) && defined(HAVE_PTHREAD_COND_TIMEDWAIT_MONOTONIC))
pthread_condattr_t attrs;
rv = pthread_condattr_init(&attrs);
DCHECK_EQ(0, rv);
pthread_condattr_setclock(&attrs, CLOCK_MONOTONIC);
rv = pthread_cond_init(&condition_, &attrs);
pthread_condattr_destroy(&attrs);
#else
rv = pthread_cond_init(&condition_, NULL);
#endif
DCHECK_EQ(0, rv);
}
ConditionVariable::~ConditionVariable() {
#if defined(OS_MACOSX)
// This hack is necessary to avoid a fatal pthreads subsystem bug in the
// Darwin kernel. http://crbug.com/517681.
{
base::Lock lock;
base::AutoLock l(lock);
struct timespec ts;
ts.tv_sec = 0;
ts.tv_nsec = 1;
pthread_cond_timedwait_relative_np(&condition_, lock.lock_.native_handle(),
&ts);
}
#endif
int rv = pthread_cond_destroy(&condition_);
DCHECK_EQ(0, rv);
}
void ConditionVariable::Wait() {
Optional<internal::ScopedBlockingCallWithBaseSyncPrimitives>
scoped_blocking_call;
if (waiting_is_blocking_)
scoped_blocking_call.emplace(FROM_HERE, BlockingType::MAY_BLOCK);
#if DCHECK_IS_ON()
user_lock_->CheckHeldAndUnmark();
#endif
int rv = pthread_cond_wait(&condition_, user_mutex_);
DCHECK_EQ(0, rv);
#if DCHECK_IS_ON()
user_lock_->CheckUnheldAndMark();
#endif
}
void ConditionVariable::TimedWait(const TimeDelta& max_time) {
Optional<internal::ScopedBlockingCallWithBaseSyncPrimitives>
scoped_blocking_call;
if (waiting_is_blocking_)
scoped_blocking_call.emplace(FROM_HERE, BlockingType::MAY_BLOCK);
int64_t usecs = max_time.InMicroseconds();
struct timespec relative_time;
relative_time.tv_sec = usecs / Time::kMicrosecondsPerSecond;
relative_time.tv_nsec =
(usecs % Time::kMicrosecondsPerSecond) * Time::kNanosecondsPerMicrosecond;
#if DCHECK_IS_ON()
user_lock_->CheckHeldAndUnmark();
#endif
#if defined(OS_MACOSX)
int rv = pthread_cond_timedwait_relative_np(
&condition_, user_mutex_, &relative_time);
#else
// The timeout argument to pthread_cond_timedwait is in absolute time.
struct timespec absolute_time;
#if defined(OS_NACL)
// See comment in constructor for why this is different in NaCl.
struct timeval now;
gettimeofday(&now, NULL);
absolute_time.tv_sec = now.tv_sec;
absolute_time.tv_nsec = now.tv_usec * Time::kNanosecondsPerMicrosecond;
#else
struct timespec now;
clock_gettime(CLOCK_MONOTONIC, &now);
absolute_time.tv_sec = now.tv_sec;
absolute_time.tv_nsec = now.tv_nsec;
#endif
absolute_time.tv_sec += relative_time.tv_sec;
absolute_time.tv_nsec += relative_time.tv_nsec;
absolute_time.tv_sec += absolute_time.tv_nsec / Time::kNanosecondsPerSecond;
absolute_time.tv_nsec %= Time::kNanosecondsPerSecond;
DCHECK_GE(absolute_time.tv_sec, now.tv_sec); // Overflow paranoia
#if defined(OS_ANDROID) && defined(HAVE_PTHREAD_COND_TIMEDWAIT_MONOTONIC)
int rv = pthread_cond_timedwait_monotonic_np(
&condition_, user_mutex_, &absolute_time);
#else
int rv = pthread_cond_timedwait(&condition_, user_mutex_, &absolute_time);
#endif // OS_ANDROID && HAVE_PTHREAD_COND_TIMEDWAIT_MONOTONIC
#endif // OS_MACOSX
// On failure, we only expect the CV to timeout. Any other error value means
// that we've unexpectedly woken up.
DCHECK(rv == 0 || rv == ETIMEDOUT);
#if DCHECK_IS_ON()
user_lock_->CheckUnheldAndMark();
#endif
}
void ConditionVariable::Broadcast() {
int rv = pthread_cond_broadcast(&condition_);
DCHECK_EQ(0, rv);
}
void ConditionVariable::Signal() {
int rv = pthread_cond_signal(&condition_);
DCHECK_EQ(0, rv);
}
} // namespace base

View file

@ -0,0 +1,69 @@
// Copyright (c) 2011 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/synchronization/condition_variable.h"
#include "base/optional.h"
#include "base/synchronization/lock.h"
#include "base/threading/scoped_blocking_call.h"
#include "base/threading/thread_restrictions.h"
#include "base/time/time.h"
#include <windows.h>
namespace base {
ConditionVariable::ConditionVariable(Lock* user_lock)
: srwlock_(user_lock->lock_.native_handle())
#if DCHECK_IS_ON()
, user_lock_(user_lock)
#endif
{
DCHECK(user_lock);
InitializeConditionVariable(reinterpret_cast<PCONDITION_VARIABLE>(&cv_));
}
ConditionVariable::~ConditionVariable() = default;
void ConditionVariable::Wait() {
TimedWait(TimeDelta::FromMilliseconds(INFINITE));
}
void ConditionVariable::TimedWait(const TimeDelta& max_time) {
Optional<internal::ScopedBlockingCallWithBaseSyncPrimitives>
scoped_blocking_call;
if (waiting_is_blocking_)
scoped_blocking_call.emplace(FROM_HERE, BlockingType::MAY_BLOCK);
DWORD timeout = static_cast<DWORD>(max_time.InMilliseconds());
#if DCHECK_IS_ON()
user_lock_->CheckHeldAndUnmark();
#endif
if (!SleepConditionVariableSRW(reinterpret_cast<PCONDITION_VARIABLE>(&cv_),
reinterpret_cast<PSRWLOCK>(srwlock_), timeout,
0)) {
// On failure, we only expect the CV to timeout. Any other error value means
// that we've unexpectedly woken up.
// Note that WAIT_TIMEOUT != ERROR_TIMEOUT. WAIT_TIMEOUT is used with the
// WaitFor* family of functions as a direct return value. ERROR_TIMEOUT is
// used with GetLastError().
DCHECK_EQ(static_cast<DWORD>(ERROR_TIMEOUT), GetLastError());
}
#if DCHECK_IS_ON()
user_lock_->CheckUnheldAndMark();
#endif
}
void ConditionVariable::Broadcast() {
WakeAllConditionVariable(reinterpret_cast<PCONDITION_VARIABLE>(&cv_));
}
void ConditionVariable::Signal() {
WakeConditionVariable(reinterpret_cast<PCONDITION_VARIABLE>(&cv_));
}
} // namespace base

View file

@ -0,0 +1,38 @@
// Copyright (c) 2011 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This file is used for debugging assertion support. The Lock class
// is functionally a wrapper around the LockImpl class, so the only
// real intelligence in the class is in the debugging logic.
#include "base/synchronization/lock.h"
#if DCHECK_IS_ON()
namespace base {
Lock::Lock() : lock_() {
}
Lock::~Lock() {
DCHECK(owning_thread_ref_.is_null());
}
void Lock::AssertAcquired() const {
DCHECK(owning_thread_ref_ == PlatformThread::CurrentRef());
}
void Lock::CheckHeldAndUnmark() {
DCHECK(owning_thread_ref_ == PlatformThread::CurrentRef());
owning_thread_ref_ = PlatformThreadRef();
}
void Lock::CheckUnheldAndMark() {
DCHECK(owning_thread_ref_.is_null());
owning_thread_ref_ = PlatformThread::CurrentRef();
}
} // namespace base
#endif // DCHECK_IS_ON()

View file

@ -0,0 +1,130 @@
// Copyright (c) 2011 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_SYNCHRONIZATION_LOCK_H_
#define BASE_SYNCHRONIZATION_LOCK_H_
#include "base/base_export.h"
#include "base/logging.h"
#include "base/macros.h"
#include "base/synchronization/lock_impl.h"
#include "base/thread_annotations.h"
#include "base/threading/platform_thread.h"
#include "build/build_config.h"
namespace base {
// A convenient wrapper for an OS specific critical section. The only real
// intelligence in this class is in debug mode for the support for the
// AssertAcquired() method.
class LOCKABLE BASE_EXPORT Lock {
public:
#if !DCHECK_IS_ON()
// Optimized wrapper implementation
Lock() : lock_() {}
~Lock() {}
void Acquire() EXCLUSIVE_LOCK_FUNCTION() { lock_.Lock(); }
void Release() UNLOCK_FUNCTION() { lock_.Unlock(); }
// If the lock is not held, take it and return true. If the lock is already
// held by another thread, immediately return false. This must not be called
// by a thread already holding the lock (what happens is undefined and an
// assertion may fail).
bool Try() EXCLUSIVE_TRYLOCK_FUNCTION(true) { return lock_.Try(); }
// Null implementation if not debug.
void AssertAcquired() const ASSERT_EXCLUSIVE_LOCK() {}
#else
Lock();
~Lock();
// NOTE: We do not permit recursive locks and will commonly fire a DCHECK() if
// a thread attempts to acquire the lock a second time (while already holding
// it).
void Acquire() EXCLUSIVE_LOCK_FUNCTION() {
lock_.Lock();
CheckUnheldAndMark();
}
void Release() UNLOCK_FUNCTION() {
CheckHeldAndUnmark();
lock_.Unlock();
}
bool Try() EXCLUSIVE_TRYLOCK_FUNCTION(true) {
bool rv = lock_.Try();
if (rv) {
CheckUnheldAndMark();
}
return rv;
}
void AssertAcquired() const ASSERT_EXCLUSIVE_LOCK();
#endif // DCHECK_IS_ON()
// Whether Lock mitigates priority inversion when used from different thread
// priorities.
static bool HandlesMultipleThreadPriorities() {
#if defined(OS_WIN)
// Windows mitigates priority inversion by randomly boosting the priority of
// ready threads.
// https://msdn.microsoft.com/library/windows/desktop/ms684831.aspx
return true;
#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
// POSIX mitigates priority inversion by setting the priority of a thread
// holding a Lock to the maximum priority of any other thread waiting on it.
return internal::LockImpl::PriorityInheritanceAvailable();
#else
#error Unsupported platform
#endif
}
// Both Windows and POSIX implementations of ConditionVariable need to be
// able to see our lock and tweak our debugging counters, as they release and
// acquire locks inside of their condition variable APIs.
friend class ConditionVariable;
private:
#if DCHECK_IS_ON()
// Members and routines taking care of locks assertions.
// Note that this checks for recursive locks and allows them
// if the variable is set. This is allowed by the underlying implementation
// on windows but not on Posix, so we're doing unneeded checks on Posix.
// It's worth it to share the code.
void CheckHeldAndUnmark();
void CheckUnheldAndMark();
// All private data is implicitly protected by lock_.
// Be VERY careful to only access members under that lock.
base::PlatformThreadRef owning_thread_ref_;
#endif // DCHECK_IS_ON()
// Platform specific underlying lock implementation.
internal::LockImpl lock_;
DISALLOW_COPY_AND_ASSIGN(Lock);
};
// A helper class that acquires the given Lock while the AutoLock is in scope.
using AutoLock = internal::BasicAutoLock<Lock>;
// AutoUnlock is a helper that will Release() the |lock| argument in the
// constructor, and re-Acquire() it in the destructor.
using AutoUnlock = internal::BasicAutoUnlock<Lock>;
// Like AutoLock but is a no-op when the provided Lock* is null. Inspired from
// absl::MutexLockMaybe. Use this instead of base::Optional<base::AutoLock> to
// get around -Wthread-safety-analysis warnings for conditional locking.
using AutoLockMaybe = internal::BasicAutoLockMaybe<Lock>;
// Like AutoLock but permits Release() of its mutex before destruction.
// Release() may be called at most once. Inspired from
// absl::ReleasableMutexLock. Use this instead of base::Optional<base::AutoLock>
// to get around -Wthread-safety-analysis warnings for AutoLocks that are
// explicitly released early (prefer proper scoping to this).
using ReleasableAutoLock = internal::BasicReleasableAutoLock<Lock>;
} // namespace base
#endif // BASE_SYNCHRONIZATION_LOCK_H_

View file

@ -0,0 +1,175 @@
// Copyright (c) 2011 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_SYNCHRONIZATION_LOCK_IMPL_H_
#define BASE_SYNCHRONIZATION_LOCK_IMPL_H_
#include "base/base_export.h"
#include "base/logging.h"
#include "base/macros.h"
#include "base/thread_annotations.h"
#include "build/build_config.h"
#if defined(OS_WIN)
#include "base/win/windows_types.h"
#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
#include <errno.h>
#include <pthread.h>
#endif
namespace base {
namespace internal {
// This class implements the underlying platform-specific spin-lock mechanism
// used for the Lock class. Most users should not use LockImpl directly, but
// should instead use Lock.
class BASE_EXPORT LockImpl {
public:
#if defined(OS_WIN)
using NativeHandle = CHROME_SRWLOCK;
#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
using NativeHandle = pthread_mutex_t;
#endif
LockImpl();
~LockImpl();
// If the lock is not held, take it and return true. If the lock is already
// held by something else, immediately return false.
bool Try();
// Take the lock, blocking until it is available if necessary.
void Lock();
// Release the lock. This must only be called by the lock's holder: after
// a successful call to Try, or a call to Lock.
inline void Unlock();
// Return the native underlying lock.
// TODO(awalker): refactor lock and condition variables so that this is
// unnecessary.
NativeHandle* native_handle() { return &native_handle_; }
#if defined(OS_POSIX) || defined(OS_FUCHSIA)
// Whether this lock will attempt to use priority inheritance.
static bool PriorityInheritanceAvailable();
#endif
private:
NativeHandle native_handle_;
DISALLOW_COPY_AND_ASSIGN(LockImpl);
};
#if defined(OS_WIN)
void LockImpl::Unlock() {
::ReleaseSRWLockExclusive(reinterpret_cast<PSRWLOCK>(&native_handle_));
}
#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
void LockImpl::Unlock() {
int rv = pthread_mutex_unlock(&native_handle_);
DCHECK_EQ(rv, 0) << ". " << strerror(rv);
}
#endif
// This is an implementation used for AutoLock templated on the lock type.
template <class LockType>
class SCOPED_LOCKABLE BasicAutoLock {
public:
struct AlreadyAcquired {};
explicit BasicAutoLock(LockType& lock) EXCLUSIVE_LOCK_FUNCTION(lock)
: lock_(lock) {
lock_.Acquire();
}
BasicAutoLock(LockType& lock, const AlreadyAcquired&)
EXCLUSIVE_LOCKS_REQUIRED(lock)
: lock_(lock) {
lock_.AssertAcquired();
}
~BasicAutoLock() UNLOCK_FUNCTION() {
lock_.AssertAcquired();
lock_.Release();
}
private:
LockType& lock_;
DISALLOW_COPY_AND_ASSIGN(BasicAutoLock);
};
// This is an implementation used for AutoUnlock templated on the lock type.
template <class LockType>
class BasicAutoUnlock {
public:
explicit BasicAutoUnlock(LockType& lock) : lock_(lock) {
// We require our caller to have the lock.
lock_.AssertAcquired();
lock_.Release();
}
~BasicAutoUnlock() { lock_.Acquire(); }
private:
LockType& lock_;
DISALLOW_COPY_AND_ASSIGN(BasicAutoUnlock);
};
// This is an implementation used for AutoLockMaybe templated on the lock type.
template <class LockType>
class SCOPED_LOCKABLE BasicAutoLockMaybe {
public:
explicit BasicAutoLockMaybe(LockType* lock) EXCLUSIVE_LOCK_FUNCTION(lock)
: lock_(lock) {
if (lock_)
lock_->Acquire();
}
~BasicAutoLockMaybe() UNLOCK_FUNCTION() {
if (lock_) {
lock_->AssertAcquired();
lock_->Release();
}
}
private:
LockType* const lock_;
DISALLOW_COPY_AND_ASSIGN(BasicAutoLockMaybe);
};
// This is an implementation used for ReleasableAutoLock templated on the lock
// type.
template <class LockType>
class SCOPED_LOCKABLE BasicReleasableAutoLock {
public:
explicit BasicReleasableAutoLock(LockType* lock) EXCLUSIVE_LOCK_FUNCTION(lock)
: lock_(lock) {
DCHECK(lock_);
lock_->Acquire();
}
~BasicReleasableAutoLock() UNLOCK_FUNCTION() {
if (lock_) {
lock_->AssertAcquired();
lock_->Release();
}
}
void Release() UNLOCK_FUNCTION() {
DCHECK(lock_);
lock_->AssertAcquired();
lock_->Release();
lock_ = nullptr;
}
private:
LockType* lock_;
DISALLOW_COPY_AND_ASSIGN(BasicReleasableAutoLock);
};
} // namespace internal
} // namespace base
#endif // BASE_SYNCHRONIZATION_LOCK_IMPL_H_

View file

@ -0,0 +1,133 @@
// Copyright (c) 2011 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/synchronization/lock_impl.h"
#include <string>
#include "base/debug/activity_tracker.h"
#include "base/logging.h"
#include "base/posix/safe_strerror.h"
#include "base/strings/stringprintf.h"
#include "base/synchronization/lock.h"
#include "base/synchronization/synchronization_buildflags.h"
#include "build/build_config.h"
namespace base {
namespace internal {
namespace {
#if DCHECK_IS_ON()
const char* AdditionalHintForSystemErrorCode(int error_code) {
switch (error_code) {
case EINVAL:
return "Hint: This is often related to a use-after-free.";
default:
return "";
}
}
#endif // DCHECK_IS_ON()
std::string SystemErrorCodeToString(int error_code) {
#if DCHECK_IS_ON()
return base::safe_strerror(error_code) + ". " +
AdditionalHintForSystemErrorCode(error_code);
#else // DCHECK_IS_ON()
return std::string();
#endif // DCHECK_IS_ON()
}
} // namespace
// Determines which platforms can consider using priority inheritance locks. Use
// this define for platform code that may not compile if priority inheritance
// locks aren't available. For this platform code,
// PRIORITY_INHERITANCE_LOCKS_POSSIBLE() is a necessary but insufficient check.
// Lock::PriorityInheritanceAvailable still must be checked as the code may
// compile but the underlying platform still may not correctly support priority
// inheritance locks.
#if defined(OS_NACL) || defined(OS_ANDROID) || defined(OS_FUCHSIA)
#define PRIORITY_INHERITANCE_LOCKS_POSSIBLE() 0
#else
#define PRIORITY_INHERITANCE_LOCKS_POSSIBLE() 1
#endif
LockImpl::LockImpl() {
pthread_mutexattr_t mta;
int rv = pthread_mutexattr_init(&mta);
DCHECK_EQ(rv, 0) << ". " << SystemErrorCodeToString(rv);
#if PRIORITY_INHERITANCE_LOCKS_POSSIBLE()
if (PriorityInheritanceAvailable()) {
rv = pthread_mutexattr_setprotocol(&mta, PTHREAD_PRIO_INHERIT);
DCHECK_EQ(rv, 0) << ". " << SystemErrorCodeToString(rv);
}
#endif
#ifndef NDEBUG
// In debug, setup attributes for lock error checking.
rv = pthread_mutexattr_settype(&mta, PTHREAD_MUTEX_ERRORCHECK);
DCHECK_EQ(rv, 0) << ". " << SystemErrorCodeToString(rv);
#endif
rv = pthread_mutex_init(&native_handle_, &mta);
DCHECK_EQ(rv, 0) << ". " << SystemErrorCodeToString(rv);
rv = pthread_mutexattr_destroy(&mta);
DCHECK_EQ(rv, 0) << ". " << SystemErrorCodeToString(rv);
}
LockImpl::~LockImpl() {
int rv = pthread_mutex_destroy(&native_handle_);
DCHECK_EQ(rv, 0) << ". " << SystemErrorCodeToString(rv);
}
bool LockImpl::Try() {
int rv = pthread_mutex_trylock(&native_handle_);
DCHECK(rv == 0 || rv == EBUSY) << ". " << SystemErrorCodeToString(rv);
return rv == 0;
}
void LockImpl::Lock() {
// The ScopedLockAcquireActivity below is relatively expensive and so its
// actions can become significant due to the very large number of locks
// that tend to be used throughout the build. To avoid this cost in the
// vast majority of the calls, simply "try" the lock first and only do the
// (tracked) blocking call if that fails. Since "try" itself is a system
// call, and thus also somewhat expensive, don't bother with it unless
// tracking is actually enabled.
if (base::debug::GlobalActivityTracker::IsEnabled())
if (Try())
return;
base::debug::ScopedLockAcquireActivity lock_activity(this);
int rv = pthread_mutex_lock(&native_handle_);
DCHECK_EQ(rv, 0) << ". " << SystemErrorCodeToString(rv);
}
// static
bool LockImpl::PriorityInheritanceAvailable() {
#if BUILDFLAG(ENABLE_MUTEX_PRIORITY_INHERITANCE)
return true;
#elif PRIORITY_INHERITANCE_LOCKS_POSSIBLE() && defined(OS_MACOSX)
return true;
#else
// Security concerns prevent the use of priority inheritance mutexes on Linux.
// * CVE-2010-0622 - Linux < 2.6.33-rc7, wake_futex_pi possible DoS.
// https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2010-0622
// * CVE-2012-6647 - Linux < 3.5.1, futex_wait_requeue_pi possible DoS.
// https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2012-6647
// * CVE-2014-3153 - Linux <= 3.14.5, futex_requeue, privilege escalation.
// https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2014-3153
//
// If the above were all addressed, we still need a runtime check to deal with
// the bug below.
// * glibc Bug 14652: https://sourceware.org/bugzilla/show_bug.cgi?id=14652
// Fixed in glibc 2.17.
// Priority inheritance mutexes may deadlock with condition variables
// during reacquisition of the mutex after the condition variable is
// signalled.
return false;
#endif
}
} // namespace internal
} // namespace base

View file

@ -0,0 +1,40 @@
// Copyright (c) 2011 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/synchronization/lock_impl.h"
#include "base/debug/activity_tracker.h"
#include <windows.h>
namespace base {
namespace internal {
LockImpl::LockImpl() : native_handle_(SRWLOCK_INIT) {}
LockImpl::~LockImpl() = default;
bool LockImpl::Try() {
return !!::TryAcquireSRWLockExclusive(
reinterpret_cast<PSRWLOCK>(&native_handle_));
}
void LockImpl::Lock() {
// The ScopedLockAcquireActivity below is relatively expensive and so its
// actions can become significant due to the very large number of locks
// that tend to be used throughout the build. To avoid this cost in the
// vast majority of the calls, simply "try" the lock first and only do the
// (tracked) blocking call if that fails. Since "try" itself is a system
// call, and thus also somewhat expensive, don't bother with it unless
// tracking is actually enabled.
if (base::debug::GlobalActivityTracker::IsEnabled())
if (Try())
return;
base::debug::ScopedLockAcquireActivity lock_activity(this);
::AcquireSRWLockExclusive(reinterpret_cast<PSRWLOCK>(&native_handle_));
}
} // namespace internal
} // namespace base

View file

@ -0,0 +1,291 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_SYNCHRONIZATION_WAITABLE_EVENT_H_
#define BASE_SYNCHRONIZATION_WAITABLE_EVENT_H_
#include <stddef.h>
#include "base/base_export.h"
#include "base/macros.h"
#include "build/build_config.h"
#if defined(OS_WIN)
#include "base/win/scoped_handle.h"
#elif defined(OS_MACOSX)
#include <mach/mach.h>
#include <list>
#include <memory>
#include "base/callback_forward.h"
#include "base/mac/scoped_mach_port.h"
#include "base/memory/ref_counted.h"
#include "base/synchronization/lock.h"
#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
#include <list>
#include <utility>
#include "base/memory/ref_counted.h"
#include "base/synchronization/lock.h"
#endif
namespace base {
class TimeDelta;
// A WaitableEvent can be a useful thread synchronization tool when you want to
// allow one thread to wait for another thread to finish some work. For
// non-Windows systems, this can only be used from within a single address
// space.
//
// Use a WaitableEvent when you would otherwise use a Lock+ConditionVariable to
// protect a simple boolean value. However, if you find yourself using a
// WaitableEvent in conjunction with a Lock to wait for a more complex state
// change (e.g., for an item to be added to a queue), then you should probably
// be using a ConditionVariable instead of a WaitableEvent.
//
// NOTE: On Windows, this class provides a subset of the functionality afforded
// by a Windows event object. This is intentional. If you are writing Windows
// specific code and you need other features of a Windows event, then you might
// be better off just using an Windows event directly.
class BASE_EXPORT WaitableEvent {
public:
// Indicates whether a WaitableEvent should automatically reset the event
// state after a single waiting thread has been released or remain signaled
// until Reset() is manually invoked.
enum class ResetPolicy { MANUAL, AUTOMATIC };
// Indicates whether a new WaitableEvent should start in a signaled state or
// not.
enum class InitialState { SIGNALED, NOT_SIGNALED };
// Constructs a WaitableEvent with policy and initial state as detailed in
// the above enums.
WaitableEvent(ResetPolicy reset_policy = ResetPolicy::MANUAL,
InitialState initial_state = InitialState::NOT_SIGNALED);
#if defined(OS_WIN)
// Create a WaitableEvent from an Event HANDLE which has already been
// created. This objects takes ownership of the HANDLE and will close it when
// deleted.
explicit WaitableEvent(win::ScopedHandle event_handle);
#endif
~WaitableEvent();
// Put the event in the un-signaled state.
void Reset();
// Put the event in the signaled state. Causing any thread blocked on Wait
// to be woken up.
void Signal();
// Returns true if the event is in the signaled state, else false. If this
// is not a manual reset event, then this test will cause a reset.
bool IsSignaled();
// Wait indefinitely for the event to be signaled. Wait's return "happens
// after" |Signal| has completed. This means that it's safe for a
// WaitableEvent to synchronise its own destruction, like this:
//
// WaitableEvent *e = new WaitableEvent;
// SendToOtherThread(e);
// e->Wait();
// delete e;
void Wait();
// Wait up until wait_delta has passed for the event to be signaled
// (real-time; ignores time overrides). Returns true if the event was
// signaled. Handles spurious wakeups and guarantees that |wait_delta| will
// have elapsed if this returns false.
//
// TimedWait can synchronise its own destruction like |Wait|.
bool TimedWait(const TimeDelta& wait_delta);
#if defined(OS_WIN)
HANDLE handle() const { return handle_.Get(); }
#endif
// Declares that this WaitableEvent will only ever be used by a thread that is
// idle at the bottom of its stack and waiting for work (in particular, it is
// not synchronously waiting on this event before resuming ongoing work). This
// is useful to avoid telling base-internals that this thread is "blocked"
// when it's merely idle and ready to do work. As such, this is only expected
// to be used by thread and thread pool impls.
void declare_only_used_while_idle() { waiting_is_blocking_ = false; }
// Wait, synchronously, on multiple events.
// waitables: an array of WaitableEvent pointers
// count: the number of elements in @waitables
//
// returns: the index of a WaitableEvent which has been signaled.
//
// You MUST NOT delete any of the WaitableEvent objects while this wait is
// happening, however WaitMany's return "happens after" the |Signal| call
// that caused it has completed, like |Wait|.
//
// If more than one WaitableEvent is signaled to unblock WaitMany, the lowest
// index among them is returned.
static size_t WaitMany(WaitableEvent** waitables, size_t count);
// For asynchronous waiting, see WaitableEventWatcher
// This is a private helper class. It's here because it's used by friends of
// this class (such as WaitableEventWatcher) to be able to enqueue elements
// of the wait-list
class Waiter {
public:
// Signal the waiter to wake up.
//
// Consider the case of a Waiter which is in multiple WaitableEvent's
// wait-lists. Each WaitableEvent is automatic-reset and two of them are
// signaled at the same time. Now, each will wake only the first waiter in
// the wake-list before resetting. However, if those two waiters happen to
// be the same object (as can happen if another thread didn't have a chance
// to dequeue the waiter from the other wait-list in time), two auto-resets
// will have happened, but only one waiter has been signaled!
//
// Because of this, a Waiter may "reject" a wake by returning false. In
// this case, the auto-reset WaitableEvent shouldn't act as if anything has
// been notified.
virtual bool Fire(WaitableEvent* signaling_event) = 0;
// Waiters may implement this in order to provide an extra condition for
// two Waiters to be considered equal. In WaitableEvent::Dequeue, if the
// pointers match then this function is called as a final check. See the
// comments in ~Handle for why.
virtual bool Compare(void* tag) = 0;
protected:
virtual ~Waiter() = default;
};
private:
friend class WaitableEventWatcher;
#if defined(OS_WIN)
win::ScopedHandle handle_;
#elif defined(OS_MACOSX)
// Prior to macOS 10.12, a TYPE_MACH_RECV dispatch source may not be invoked
// immediately. If a WaitableEventWatcher is used on a manual-reset event,
// and another thread that is Wait()ing on the event calls Reset()
// immediately after waking up, the watcher may not receive the callback.
// On macOS 10.12 and higher, dispatch delivery is reliable. But for OSes
// prior, a lock-protected list of callbacks is used for manual-reset event
// watchers. Automatic-reset events are not prone to this issue, since the
// first thread to wake will claim the event.
static bool UseSlowWatchList(ResetPolicy policy);
// Peeks the message queue named by |port| and returns true if a message
// is present and false if not. If |dequeue| is true, the messsage will be
// drained from the queue. If |dequeue| is false, the queue will only be
// peeked. |port| must be a receive right.
static bool PeekPort(mach_port_t port, bool dequeue);
// The Mach receive right is waited on by both WaitableEvent and
// WaitableEventWatcher. It is valid to signal and then delete an event, and
// a watcher should still be notified. If the right were to be destroyed
// immediately, the watcher would not receive the signal. Because Mach
// receive rights cannot have a user refcount greater than one, the right
// must be reference-counted manually.
class ReceiveRight : public RefCountedThreadSafe<ReceiveRight> {
public:
ReceiveRight(mach_port_t name, bool create_slow_watch_list);
mach_port_t Name() const { return right_.get(); }
// This structure is used iff UseSlowWatchList() is true. See the comment
// in Signal() for details.
struct WatchList {
WatchList();
~WatchList();
// The lock protects a list of closures to be run when the event is
// Signal()ed. The closures are invoked on the signaling thread, so they
// must be safe to be called from any thread.
Lock lock;
std::list<OnceClosure> list;
};
WatchList* SlowWatchList() const { return slow_watch_list_.get(); }
private:
friend class RefCountedThreadSafe<ReceiveRight>;
~ReceiveRight();
mac::ScopedMachReceiveRight right_;
// This is allocated iff UseSlowWatchList() is true. It is created on the
// heap to avoid performing initialization when not using the slow path.
std::unique_ptr<WatchList> slow_watch_list_;
DISALLOW_COPY_AND_ASSIGN(ReceiveRight);
};
const ResetPolicy policy_;
// The receive right for the event.
scoped_refptr<ReceiveRight> receive_right_;
// The send right used to signal the event. This can be disposed of with
// the event, unlike the receive right, since a deleted event cannot be
// signaled.
mac::ScopedMachSendRight send_right_;
#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
// On Windows, you must not close a HANDLE which is currently being waited on.
// The MSDN documentation says that the resulting behaviour is 'undefined'.
// To solve that issue each WaitableEventWatcher duplicates the given event
// handle.
// However, if we were to include the following members
// directly then, on POSIX, one couldn't use WaitableEventWatcher to watch an
// event which gets deleted. This mismatch has bitten us several times now,
// so we have a kernel of the WaitableEvent, which is reference counted.
// WaitableEventWatchers may then take a reference and thus match the Windows
// behaviour.
struct WaitableEventKernel :
public RefCountedThreadSafe<WaitableEventKernel> {
public:
WaitableEventKernel(ResetPolicy reset_policy, InitialState initial_state);
bool Dequeue(Waiter* waiter, void* tag);
base::Lock lock_;
const bool manual_reset_;
bool signaled_;
std::list<Waiter*> waiters_;
private:
friend class RefCountedThreadSafe<WaitableEventKernel>;
~WaitableEventKernel();
};
typedef std::pair<WaitableEvent*, size_t> WaiterAndIndex;
// When dealing with arrays of WaitableEvent*, we want to sort by the address
// of the WaitableEvent in order to have a globally consistent locking order.
// In that case we keep them, in sorted order, in an array of pairs where the
// second element is the index of the WaitableEvent in the original,
// unsorted, array.
static size_t EnqueueMany(WaiterAndIndex* waitables,
size_t count, Waiter* waiter);
bool SignalAll();
bool SignalOne();
void Enqueue(Waiter* waiter);
scoped_refptr<WaitableEventKernel> kernel_;
#endif
// Whether a thread invoking Wait() on this WaitableEvent should be considered
// blocked as opposed to idle (and potentially replaced if part of a pool).
bool waiting_is_blocking_ = true;
DISALLOW_COPY_AND_ASSIGN(WaitableEvent);
};
} // namespace base
#endif // BASE_SYNCHRONIZATION_WAITABLE_EVENT_H_

View file

@ -0,0 +1,384 @@
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/synchronization/waitable_event.h"
#include <dispatch/dispatch.h>
#include <mach/mach.h>
#include <sys/event.h>
#include "base/debug/activity_tracker.h"
#include "base/files/scoped_file.h"
#include "base/mac/dispatch_source_mach.h"
#include "base/mac/mac_util.h"
#include "base/mac/mach_logging.h"
#include "base/mac/scoped_dispatch_object.h"
#include "base/optional.h"
#include "base/posix/eintr_wrapper.h"
#include "base/threading/scoped_blocking_call.h"
#include "base/threading/thread_restrictions.h"
#include "base/time/time.h"
#include "base/time/time_override.h"
#include "build/build_config.h"
namespace base {
WaitableEvent::WaitableEvent(ResetPolicy reset_policy,
InitialState initial_state)
: policy_(reset_policy) {
mach_port_options_t options{};
options.flags = MPO_INSERT_SEND_RIGHT;
options.mpl.mpl_qlimit = 1;
mach_port_t name;
kern_return_t kr = mach_port_construct(mach_task_self(), &options, 0, &name);
MACH_CHECK(kr == KERN_SUCCESS, kr) << "mach_port_construct";
receive_right_ = new ReceiveRight(name, UseSlowWatchList(policy_));
send_right_.reset(name);
if (initial_state == InitialState::SIGNALED)
Signal();
}
WaitableEvent::~WaitableEvent() = default;
void WaitableEvent::Reset() {
PeekPort(receive_right_->Name(), true);
}
// NO_THREAD_SAFETY_ANALYSIS: Runtime dependent locking.
void WaitableEvent::Signal() NO_THREAD_SAFETY_ANALYSIS {
// If using the slow watch-list, copy the watchers to a local. After
// mach_msg(), the event object may be deleted by an awoken thread.
const bool use_slow_path = UseSlowWatchList(policy_);
ReceiveRight* receive_right = nullptr; // Manually reference counted.
std::unique_ptr<std::list<OnceClosure>> watch_list;
if (use_slow_path) {
// To avoid a race condition of a WaitableEventWatcher getting added
// while another thread is in this method, hold the watch-list lock for
// the duration of mach_msg(). This requires ref-counting the
// |receive_right_| object that contains it, in case the event is deleted
// by a waiting thread after mach_msg().
receive_right = receive_right_.get();
receive_right->AddRef();
ReceiveRight::WatchList* slow_watch_list = receive_right->SlowWatchList();
slow_watch_list->lock.Acquire();
if (!slow_watch_list->list.empty()) {
watch_list.reset(new std::list<OnceClosure>());
std::swap(*watch_list, slow_watch_list->list);
}
}
mach_msg_empty_send_t msg{};
msg.header.msgh_bits = MACH_MSGH_BITS_REMOTE(MACH_MSG_TYPE_COPY_SEND);
msg.header.msgh_size = sizeof(&msg);
msg.header.msgh_remote_port = send_right_.get();
// If the event is already signaled, this will time out because the queue
// has a length of one.
kern_return_t kr =
mach_msg(&msg.header, MACH_SEND_MSG | MACH_SEND_TIMEOUT, sizeof(msg), 0,
MACH_PORT_NULL, 0, MACH_PORT_NULL);
MACH_CHECK(kr == KERN_SUCCESS || kr == MACH_SEND_TIMED_OUT, kr) << "mach_msg";
if (use_slow_path) {
// If a WaitableEventWatcher were to start watching when the event is
// signaled, it runs the callback immediately without adding it to the
// list. Therefore the watch list can only be non-empty if the event is
// newly signaled.
if (watch_list.get()) {
MACH_CHECK(kr == KERN_SUCCESS, kr);
for (auto& watcher : *watch_list) {
std::move(watcher).Run();
}
}
receive_right->SlowWatchList()->lock.Release();
receive_right->Release();
}
}
bool WaitableEvent::IsSignaled() {
return PeekPort(receive_right_->Name(), policy_ == ResetPolicy::AUTOMATIC);
}
void WaitableEvent::Wait() {
bool result = TimedWait(TimeDelta::Max());
DCHECK(result) << "TimedWait() should never fail with infinite timeout";
}
bool WaitableEvent::TimedWait(const TimeDelta& wait_delta) {
if (wait_delta <= TimeDelta())
return IsSignaled();
// Record the event that this thread is blocking upon (for hang diagnosis) and
// consider blocked for scheduling purposes. Ignore this for non-blocking
// WaitableEvents.
Optional<debug::ScopedEventWaitActivity> event_activity;
Optional<internal::ScopedBlockingCallWithBaseSyncPrimitives>
scoped_blocking_call;
if (waiting_is_blocking_) {
event_activity.emplace(this);
scoped_blocking_call.emplace(FROM_HERE, BlockingType::MAY_BLOCK);
}
mach_msg_empty_rcv_t msg{};
msg.header.msgh_local_port = receive_right_->Name();
mach_msg_option_t options = MACH_RCV_MSG;
if (!wait_delta.is_max())
options |= MACH_RCV_TIMEOUT | MACH_RCV_INTERRUPT;
mach_msg_size_t rcv_size = sizeof(msg);
if (policy_ == ResetPolicy::MANUAL) {
// To avoid dequeing the message, receive with a size of 0 and set
// MACH_RCV_LARGE to keep the message in the queue.
options |= MACH_RCV_LARGE;
rcv_size = 0;
}
// TimeTicks takes care of overflow but we special case is_max() nonetheless
// to avoid invoking TimeTicksNowIgnoringOverride() unnecessarily (same for
// the increment step of the for loop if the condition variable returns
// early). Ref: https://crbug.com/910524#c7
const TimeTicks end_time =
wait_delta.is_max() ? TimeTicks::Max()
: subtle::TimeTicksNowIgnoringOverride() + wait_delta;
// Fake |kr| value to boostrap the for loop.
kern_return_t kr = MACH_RCV_INTERRUPTED;
for (mach_msg_timeout_t timeout = wait_delta.is_max()
? MACH_MSG_TIMEOUT_NONE
: wait_delta.InMillisecondsRoundedUp();
// If the thread is interrupted during mach_msg(), the system call will
// be restarted. However, the libsyscall wrapper does not adjust the
// timeout by the amount of time already waited. Using MACH_RCV_INTERRUPT
// will instead return from mach_msg(), so that the call can be retried
// with an adjusted timeout.
kr == MACH_RCV_INTERRUPTED;
timeout =
end_time.is_max()
? MACH_MSG_TIMEOUT_NONE
: std::max<int64_t>(
0, (end_time - subtle::TimeTicksNowIgnoringOverride())
.InMillisecondsRoundedUp())) {
kr = mach_msg(&msg.header, options, 0, rcv_size, receive_right_->Name(),
timeout, MACH_PORT_NULL);
}
if (kr == KERN_SUCCESS) {
return true;
} else if (rcv_size == 0 && kr == MACH_RCV_TOO_LARGE) {
return true;
} else {
MACH_CHECK(kr == MACH_RCV_TIMED_OUT, kr) << "mach_msg";
return false;
}
}
// static
bool WaitableEvent::UseSlowWatchList(ResetPolicy policy) {
#if defined(OS_IOS)
const bool use_slow_path = false;
#else
static bool use_slow_path = !mac::IsAtLeastOS10_12();
#endif
return policy == ResetPolicy::MANUAL && use_slow_path;
}
// static
size_t WaitableEvent::WaitMany(WaitableEvent** raw_waitables, size_t count) {
DCHECK(count) << "Cannot wait on no events";
internal::ScopedBlockingCallWithBaseSyncPrimitives scoped_blocking_call(
FROM_HERE, BlockingType::MAY_BLOCK);
// Record an event (the first) that this thread is blocking upon.
debug::ScopedEventWaitActivity event_activity(raw_waitables[0]);
// On macOS 10.11+, using Mach port sets may cause system instability, per
// https://crbug.com/756102. On macOS 10.12+, a kqueue can be used
// instead to work around that. On macOS 10.9 and 10.10, kqueue only works
// for port sets, so port sets are just used directly. On macOS 10.11,
// libdispatch sources are used. Therefore, there are three different
// primitives that can be used to implement WaitMany. Which one to use is
// selected at run-time by OS version checks.
enum WaitManyPrimitive {
KQUEUE,
DISPATCH,
PORT_SET,
};
#if defined(OS_IOS)
const WaitManyPrimitive kPrimitive = PORT_SET;
#else
const WaitManyPrimitive kPrimitive =
mac::IsAtLeastOS10_12() ? KQUEUE
: (mac::IsOS10_11() ? DISPATCH : PORT_SET);
#endif
if (kPrimitive == KQUEUE) {
std::vector<kevent64_s> events(count);
for (size_t i = 0; i < count; ++i) {
EV_SET64(&events[i], raw_waitables[i]->receive_right_->Name(),
EVFILT_MACHPORT, EV_ADD, 0, 0, i, 0, 0);
}
std::vector<kevent64_s> out_events(count);
ScopedFD wait_many(kqueue());
PCHECK(wait_many.is_valid()) << "kqueue";
int rv = HANDLE_EINTR(kevent64(wait_many.get(), events.data(), count,
out_events.data(), count, 0, nullptr));
PCHECK(rv > 0) << "kevent64";
size_t triggered = -1;
for (size_t i = 0; i < static_cast<size_t>(rv); ++i) {
// WaitMany should return the lowest index in |raw_waitables| that was
// triggered.
size_t index = static_cast<size_t>(out_events[i].udata);
triggered = std::min(triggered, index);
}
if (raw_waitables[triggered]->policy_ == ResetPolicy::AUTOMATIC) {
// The message needs to be dequeued to reset the event.
PeekPort(raw_waitables[triggered]->receive_right_->Name(), true);
}
return triggered;
} else if (kPrimitive == DISPATCH) {
// Each item in |raw_waitables| will be watched using a dispatch souce
// scheduled on the serial |queue|. The first one to be invoked will
// signal the |semaphore| that this method will wait on.
ScopedDispatchObject<dispatch_queue_t> queue(dispatch_queue_create(
"org.chromium.base.WaitableEvent.WaitMany", DISPATCH_QUEUE_SERIAL));
ScopedDispatchObject<dispatch_semaphore_t> semaphore(
dispatch_semaphore_create(0));
// Block capture references. |signaled| will identify the index in
// |raw_waitables| whose source was invoked.
dispatch_semaphore_t semaphore_ref = semaphore.get();
const size_t kUnsignaled = -1;
__block size_t signaled = kUnsignaled;
// Create a MACH_RECV dispatch source for each event. These must be
// destroyed before the |queue| and |semaphore|.
std::vector<std::unique_ptr<DispatchSourceMach>> sources;
for (size_t i = 0; i < count; ++i) {
const bool auto_reset =
raw_waitables[i]->policy_ == WaitableEvent::ResetPolicy::AUTOMATIC;
// The block will copy a reference to |right|.
scoped_refptr<WaitableEvent::ReceiveRight> right =
raw_waitables[i]->receive_right_;
auto source =
std::make_unique<DispatchSourceMach>(queue, right->Name(), ^{
// After the semaphore is signaled, another event be signaled and
// the source may have its block put on the |queue|. WaitMany
// should only report (and auto-reset) one event, so the first
// event to signal is reported.
if (signaled == kUnsignaled) {
signaled = i;
if (auto_reset) {
PeekPort(right->Name(), true);
}
dispatch_semaphore_signal(semaphore_ref);
}
});
source->Resume();
sources.push_back(std::move(source));
}
dispatch_semaphore_wait(semaphore, DISPATCH_TIME_FOREVER);
DCHECK_NE(signaled, kUnsignaled);
return signaled;
} else {
DCHECK_EQ(kPrimitive, PORT_SET);
kern_return_t kr;
mac::ScopedMachPortSet port_set;
{
mach_port_t name;
kr =
mach_port_allocate(mach_task_self(), MACH_PORT_RIGHT_PORT_SET, &name);
MACH_CHECK(kr == KERN_SUCCESS, kr) << "mach_port_allocate";
port_set.reset(name);
}
for (size_t i = 0; i < count; ++i) {
kr = mach_port_insert_member(mach_task_self(),
raw_waitables[i]->receive_right_->Name(),
port_set.get());
MACH_CHECK(kr == KERN_SUCCESS, kr) << "index " << i;
}
mach_msg_empty_rcv_t msg{};
// Wait on the port set. Only specify space enough for the header, to
// identify which port in the set is signaled. Otherwise, receiving from the
// port set may dequeue a message for a manual-reset event object, which
// would cause it to be reset.
kr = mach_msg(&msg.header,
MACH_RCV_MSG | MACH_RCV_LARGE | MACH_RCV_LARGE_IDENTITY, 0,
sizeof(msg.header), port_set.get(), 0, MACH_PORT_NULL);
MACH_CHECK(kr == MACH_RCV_TOO_LARGE, kr) << "mach_msg";
for (size_t i = 0; i < count; ++i) {
WaitableEvent* event = raw_waitables[i];
if (msg.header.msgh_local_port == event->receive_right_->Name()) {
if (event->policy_ == ResetPolicy::AUTOMATIC) {
// The message needs to be dequeued to reset the event.
PeekPort(msg.header.msgh_local_port, true);
}
return i;
}
}
NOTREACHED();
return 0;
}
}
// static
bool WaitableEvent::PeekPort(mach_port_t port, bool dequeue) {
if (dequeue) {
mach_msg_empty_rcv_t msg{};
msg.header.msgh_local_port = port;
kern_return_t kr = mach_msg(&msg.header, MACH_RCV_MSG | MACH_RCV_TIMEOUT, 0,
sizeof(msg), port, 0, MACH_PORT_NULL);
if (kr == KERN_SUCCESS) {
return true;
} else {
MACH_CHECK(kr == MACH_RCV_TIMED_OUT, kr) << "mach_msg";
return false;
}
} else {
mach_port_seqno_t seqno = 0;
mach_msg_size_t size;
mach_msg_id_t id;
mach_msg_trailer_t trailer;
mach_msg_type_number_t trailer_size = sizeof(trailer);
kern_return_t kr = mach_port_peek(
mach_task_self(), port, MACH_RCV_TRAILER_TYPE(MACH_RCV_TRAILER_NULL),
&seqno, &size, &id, reinterpret_cast<mach_msg_trailer_info_t>(&trailer),
&trailer_size);
if (kr == KERN_SUCCESS) {
return true;
} else {
MACH_CHECK(kr == KERN_FAILURE, kr) << "mach_port_peek";
return false;
}
}
}
WaitableEvent::ReceiveRight::ReceiveRight(mach_port_t name,
bool create_slow_watch_list)
: right_(name),
slow_watch_list_(create_slow_watch_list ? new WatchList() : nullptr) {}
WaitableEvent::ReceiveRight::~ReceiveRight() = default;
WaitableEvent::ReceiveRight::WatchList::WatchList() = default;
WaitableEvent::ReceiveRight::WatchList::~WatchList() = default;
} // namespace base

View file

@ -0,0 +1,447 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <stddef.h>
#include <algorithm>
#include <limits>
#include <vector>
#include "base/debug/activity_tracker.h"
#include "base/logging.h"
#include "base/optional.h"
#include "base/synchronization/condition_variable.h"
#include "base/synchronization/lock.h"
#include "base/synchronization/waitable_event.h"
#include "base/threading/scoped_blocking_call.h"
#include "base/threading/thread_restrictions.h"
#include "base/time/time.h"
#include "base/time/time_override.h"
// -----------------------------------------------------------------------------
// A WaitableEvent on POSIX is implemented as a wait-list. Currently we don't
// support cross-process events (where one process can signal an event which
// others are waiting on). Because of this, we can avoid having one thread per
// listener in several cases.
//
// The WaitableEvent maintains a list of waiters, protected by a lock. Each
// waiter is either an async wait, in which case we have a Task and the
// MessageLoop to run it on, or a blocking wait, in which case we have the
// condition variable to signal.
//
// Waiting involves grabbing the lock and adding oneself to the wait list. Async
// waits can be canceled, which means grabbing the lock and removing oneself
// from the list.
//
// Waiting on multiple events is handled by adding a single, synchronous wait to
// the wait-list of many events. An event passes a pointer to itself when
// firing a waiter and so we can store that pointer to find out which event
// triggered.
// -----------------------------------------------------------------------------
namespace base {
// -----------------------------------------------------------------------------
// This is just an abstract base class for waking the two types of waiters
// -----------------------------------------------------------------------------
WaitableEvent::WaitableEvent(ResetPolicy reset_policy,
InitialState initial_state)
: kernel_(new WaitableEventKernel(reset_policy, initial_state)) {}
WaitableEvent::~WaitableEvent() = default;
void WaitableEvent::Reset() {
base::AutoLock locked(kernel_->lock_);
kernel_->signaled_ = false;
}
void WaitableEvent::Signal() {
base::AutoLock locked(kernel_->lock_);
if (kernel_->signaled_)
return;
if (kernel_->manual_reset_) {
SignalAll();
kernel_->signaled_ = true;
} else {
// In the case of auto reset, if no waiters were woken, we remain
// signaled.
if (!SignalOne())
kernel_->signaled_ = true;
}
}
bool WaitableEvent::IsSignaled() {
base::AutoLock locked(kernel_->lock_);
const bool result = kernel_->signaled_;
if (result && !kernel_->manual_reset_)
kernel_->signaled_ = false;
return result;
}
// -----------------------------------------------------------------------------
// Synchronous waits
// -----------------------------------------------------------------------------
// This is a synchronous waiter. The thread is waiting on the given condition
// variable and the fired flag in this object.
// -----------------------------------------------------------------------------
class SyncWaiter : public WaitableEvent::Waiter {
public:
SyncWaiter()
: fired_(false), signaling_event_(nullptr), lock_(), cv_(&lock_) {}
bool Fire(WaitableEvent* signaling_event) override {
base::AutoLock locked(lock_);
if (fired_)
return false;
fired_ = true;
signaling_event_ = signaling_event;
cv_.Broadcast();
// Unlike AsyncWaiter objects, SyncWaiter objects are stack-allocated on
// the blocking thread's stack. There is no |delete this;| in Fire. The
// SyncWaiter object is destroyed when it goes out of scope.
return true;
}
WaitableEvent* signaling_event() const {
return signaling_event_;
}
// ---------------------------------------------------------------------------
// These waiters are always stack allocated and don't delete themselves. Thus
// there's no problem and the ABA tag is the same as the object pointer.
// ---------------------------------------------------------------------------
bool Compare(void* tag) override { return this == tag; }
// ---------------------------------------------------------------------------
// Called with lock held.
// ---------------------------------------------------------------------------
bool fired() const {
return fired_;
}
// ---------------------------------------------------------------------------
// During a TimedWait, we need a way to make sure that an auto-reset
// WaitableEvent doesn't think that this event has been signaled between
// unlocking it and removing it from the wait-list. Called with lock held.
// ---------------------------------------------------------------------------
void Disable() {
fired_ = true;
}
base::Lock* lock() {
return &lock_;
}
base::ConditionVariable* cv() {
return &cv_;
}
private:
bool fired_;
WaitableEvent* signaling_event_; // The WaitableEvent which woke us
base::Lock lock_;
base::ConditionVariable cv_;
};
void WaitableEvent::Wait() {
bool result = TimedWait(TimeDelta::Max());
DCHECK(result) << "TimedWait() should never fail with infinite timeout";
}
bool WaitableEvent::TimedWait(const TimeDelta& wait_delta) {
if (wait_delta <= TimeDelta())
return IsSignaled();
// Record the event that this thread is blocking upon (for hang diagnosis) and
// consider it blocked for scheduling purposes. Ignore this for non-blocking
// WaitableEvents.
Optional<debug::ScopedEventWaitActivity> event_activity;
Optional<internal::ScopedBlockingCallWithBaseSyncPrimitives>
scoped_blocking_call;
if (waiting_is_blocking_) {
event_activity.emplace(this);
scoped_blocking_call.emplace(FROM_HERE, BlockingType::MAY_BLOCK);
}
kernel_->lock_.Acquire();
if (kernel_->signaled_) {
if (!kernel_->manual_reset_) {
// In this case we were signaled when we had no waiters. Now that
// someone has waited upon us, we can automatically reset.
kernel_->signaled_ = false;
}
kernel_->lock_.Release();
return true;
}
SyncWaiter sw;
if (!waiting_is_blocking_)
sw.cv()->declare_only_used_while_idle();
sw.lock()->Acquire();
Enqueue(&sw);
kernel_->lock_.Release();
// We are violating locking order here by holding the SyncWaiter lock but not
// the WaitableEvent lock. However, this is safe because we don't lock |lock_|
// again before unlocking it.
// TimeTicks takes care of overflow but we special case is_max() nonetheless
// to avoid invoking TimeTicksNowIgnoringOverride() unnecessarily (same for
// the increment step of the for loop if the condition variable returns
// early). Ref: https://crbug.com/910524#c7
const TimeTicks end_time =
wait_delta.is_max() ? TimeTicks::Max()
: subtle::TimeTicksNowIgnoringOverride() + wait_delta;
for (TimeDelta remaining = wait_delta; remaining > TimeDelta() && !sw.fired();
remaining = end_time.is_max()
? TimeDelta::Max()
: end_time - subtle::TimeTicksNowIgnoringOverride()) {
if (end_time.is_max())
sw.cv()->Wait();
else
sw.cv()->TimedWait(remaining);
}
// Get the SyncWaiter signaled state before releasing the lock.
const bool return_value = sw.fired();
// We can't acquire |lock_| before releasing the SyncWaiter lock (because of
// locking order), however, in between the two a signal could be fired and
// |sw| would accept it, however we will still return false, so the signal
// would be lost on an auto-reset WaitableEvent. Thus we call Disable which
// makes sw::Fire return false.
sw.Disable();
sw.lock()->Release();
// This is a bug that has been enshrined in the interface of WaitableEvent
// now: |Dequeue| is called even when |sw.fired()| is true, even though it'll
// always return false in that case. However, taking the lock ensures that
// |Signal| has completed before we return and means that a WaitableEvent can
// synchronise its own destruction.
kernel_->lock_.Acquire();
kernel_->Dequeue(&sw, &sw);
kernel_->lock_.Release();
return return_value;
}
// -----------------------------------------------------------------------------
// Synchronous waiting on multiple objects.
static bool // StrictWeakOrdering
cmp_fst_addr(const std::pair<WaitableEvent*, unsigned> &a,
const std::pair<WaitableEvent*, unsigned> &b) {
return a.first < b.first;
}
// static
// NO_THREAD_SAFETY_ANALYSIS: Complex control flow.
size_t WaitableEvent::WaitMany(WaitableEvent** raw_waitables,
size_t count) NO_THREAD_SAFETY_ANALYSIS {
DCHECK(count) << "Cannot wait on no events";
internal::ScopedBlockingCallWithBaseSyncPrimitives scoped_blocking_call(
FROM_HERE, BlockingType::MAY_BLOCK);
// Record an event (the first) that this thread is blocking upon.
debug::ScopedEventWaitActivity event_activity(raw_waitables[0]);
// We need to acquire the locks in a globally consistent order. Thus we sort
// the array of waitables by address. We actually sort a pairs so that we can
// map back to the original index values later.
std::vector<std::pair<WaitableEvent*, size_t> > waitables;
waitables.reserve(count);
for (size_t i = 0; i < count; ++i)
waitables.push_back(std::make_pair(raw_waitables[i], i));
DCHECK_EQ(count, waitables.size());
sort(waitables.begin(), waitables.end(), cmp_fst_addr);
// The set of waitables must be distinct. Since we have just sorted by
// address, we can check this cheaply by comparing pairs of consecutive
// elements.
for (size_t i = 0; i < waitables.size() - 1; ++i) {
DCHECK(waitables[i].first != waitables[i+1].first);
}
SyncWaiter sw;
const size_t r = EnqueueMany(&waitables[0], count, &sw);
if (r < count) {
// One of the events is already signaled. The SyncWaiter has not been
// enqueued anywhere.
return waitables[r].second;
}
// At this point, we hold the locks on all the WaitableEvents and we have
// enqueued our waiter in them all.
sw.lock()->Acquire();
// Release the WaitableEvent locks in the reverse order
for (size_t i = 0; i < count; ++i) {
waitables[count - (1 + i)].first->kernel_->lock_.Release();
}
for (;;) {
if (sw.fired())
break;
sw.cv()->Wait();
}
sw.lock()->Release();
// The address of the WaitableEvent which fired is stored in the SyncWaiter.
WaitableEvent *const signaled_event = sw.signaling_event();
// This will store the index of the raw_waitables which fired.
size_t signaled_index = 0;
// Take the locks of each WaitableEvent in turn (except the signaled one) and
// remove our SyncWaiter from the wait-list
for (size_t i = 0; i < count; ++i) {
if (raw_waitables[i] != signaled_event) {
raw_waitables[i]->kernel_->lock_.Acquire();
// There's no possible ABA issue with the address of the SyncWaiter here
// because it lives on the stack. Thus the tag value is just the pointer
// value again.
raw_waitables[i]->kernel_->Dequeue(&sw, &sw);
raw_waitables[i]->kernel_->lock_.Release();
} else {
// By taking this lock here we ensure that |Signal| has completed by the
// time we return, because |Signal| holds this lock. This matches the
// behaviour of |Wait| and |TimedWait|.
raw_waitables[i]->kernel_->lock_.Acquire();
raw_waitables[i]->kernel_->lock_.Release();
signaled_index = i;
}
}
return signaled_index;
}
// -----------------------------------------------------------------------------
// If return value == count:
// The locks of the WaitableEvents have been taken in order and the Waiter has
// been enqueued in the wait-list of each. None of the WaitableEvents are
// currently signaled
// else:
// None of the WaitableEvent locks are held. The Waiter has not been enqueued
// in any of them and the return value is the index of the WaitableEvent which
// was signaled with the lowest input index from the original WaitMany call.
// -----------------------------------------------------------------------------
// static
// NO_THREAD_SAFETY_ANALYSIS: Complex control flow.
size_t WaitableEvent::EnqueueMany(std::pair<WaitableEvent*, size_t>* waitables,
size_t count,
Waiter* waiter) NO_THREAD_SAFETY_ANALYSIS {
size_t winner = count;
size_t winner_index = count;
for (size_t i = 0; i < count; ++i) {
auto& kernel = waitables[i].first->kernel_;
kernel->lock_.Acquire();
if (kernel->signaled_ && waitables[i].second < winner) {
winner = waitables[i].second;
winner_index = i;
}
}
// No events signaled. All locks acquired. Enqueue the Waiter on all of them
// and return.
if (winner == count) {
for (size_t i = 0; i < count; ++i)
waitables[i].first->Enqueue(waiter);
return count;
}
// Unlock in reverse order and possibly clear the chosen winner's signal
// before returning its index.
for (auto* w = waitables + count - 1; w >= waitables; --w) {
auto& kernel = w->first->kernel_;
if (w->second == winner) {
if (!kernel->manual_reset_)
kernel->signaled_ = false;
}
kernel->lock_.Release();
}
return winner_index;
}
// -----------------------------------------------------------------------------
// -----------------------------------------------------------------------------
// Private functions...
WaitableEvent::WaitableEventKernel::WaitableEventKernel(
ResetPolicy reset_policy,
InitialState initial_state)
: manual_reset_(reset_policy == ResetPolicy::MANUAL),
signaled_(initial_state == InitialState::SIGNALED) {}
WaitableEvent::WaitableEventKernel::~WaitableEventKernel() = default;
// -----------------------------------------------------------------------------
// Wake all waiting waiters. Called with lock held.
// -----------------------------------------------------------------------------
bool WaitableEvent::SignalAll() {
bool signaled_at_least_one = false;
for (auto* i : kernel_->waiters_) {
if (i->Fire(this))
signaled_at_least_one = true;
}
kernel_->waiters_.clear();
return signaled_at_least_one;
}
// ---------------------------------------------------------------------------
// Try to wake a single waiter. Return true if one was woken. Called with lock
// held.
// ---------------------------------------------------------------------------
bool WaitableEvent::SignalOne() {
for (;;) {
if (kernel_->waiters_.empty())
return false;
const bool r = (*kernel_->waiters_.begin())->Fire(this);
kernel_->waiters_.pop_front();
if (r)
return true;
}
}
// -----------------------------------------------------------------------------
// Add a waiter to the list of those waiting. Called with lock held.
// -----------------------------------------------------------------------------
void WaitableEvent::Enqueue(Waiter* waiter) {
kernel_->waiters_.push_back(waiter);
}
// -----------------------------------------------------------------------------
// Remove a waiter from the list of those waiting. Return true if the waiter was
// actually removed. Called with lock held.
// -----------------------------------------------------------------------------
bool WaitableEvent::WaitableEventKernel::Dequeue(Waiter* waiter, void* tag) {
for (auto i = waiters_.begin(); i != waiters_.end(); ++i) {
if (*i == waiter && (*i)->Compare(tag)) {
waiters_.erase(i);
return true;
}
}
return false;
}
// -----------------------------------------------------------------------------
} // namespace base

View file

@ -0,0 +1,160 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_SYNCHRONIZATION_WAITABLE_EVENT_WATCHER_H_
#define BASE_SYNCHRONIZATION_WAITABLE_EVENT_WATCHER_H_
#include "base/base_export.h"
#include "base/macros.h"
#include "base/sequenced_task_runner.h"
#include "build/build_config.h"
#if defined(OS_WIN)
#include "base/win/object_watcher.h"
#include "base/win/scoped_handle.h"
#elif defined(OS_MACOSX)
#include <dispatch/dispatch.h>
#include "base/mac/scoped_dispatch_object.h"
#include "base/memory/weak_ptr.h"
#include "base/synchronization/waitable_event.h"
#else
#include "base/sequence_checker.h"
#include "base/synchronization/waitable_event.h"
#endif
#if !defined(OS_WIN)
#include "base/callback.h"
#endif
namespace base {
class Flag;
class AsyncWaiter;
class WaitableEvent;
// This class provides a way to wait on a WaitableEvent asynchronously.
//
// Each instance of this object can be waiting on a single WaitableEvent. When
// the waitable event is signaled, a callback is invoked on the sequence that
// called StartWatching(). This callback can be deleted by deleting the waiter.
//
// Typical usage:
//
// class MyClass {
// public:
// void DoStuffWhenSignaled(WaitableEvent *waitable_event) {
// watcher_.StartWatching(waitable_event,
// base::BindOnce(&MyClass::OnWaitableEventSignaled, this);
// }
// private:
// void OnWaitableEventSignaled(WaitableEvent* waitable_event) {
// // OK, time to do stuff!
// }
// base::WaitableEventWatcher watcher_;
// };
//
// In the above example, MyClass wants to "do stuff" when waitable_event
// becomes signaled. WaitableEventWatcher makes this task easy. When MyClass
// goes out of scope, the watcher_ will be destroyed, and there is no need to
// worry about OnWaitableEventSignaled being called on a deleted MyClass
// pointer.
//
// BEWARE: With automatically reset WaitableEvents, a signal may be lost if it
// occurs just before a WaitableEventWatcher is deleted. There is currently no
// safe way to stop watching an automatic reset WaitableEvent without possibly
// missing a signal.
//
// NOTE: you /are/ allowed to delete the WaitableEvent while still waiting on
// it with a Watcher. But pay attention: if the event was signaled and deleted
// right after, the callback may be called with deleted WaitableEvent pointer.
class BASE_EXPORT WaitableEventWatcher
#if defined(OS_WIN)
: public win::ObjectWatcher::Delegate
#endif
{
public:
using EventCallback = OnceCallback<void(WaitableEvent*)>;
WaitableEventWatcher();
#if defined(OS_WIN)
~WaitableEventWatcher() override;
#else
~WaitableEventWatcher();
#endif
// When |event| is signaled, |callback| is called on the sequence that called
// StartWatching().
// |task_runner| is used for asynchronous executions of calling |callback|.
bool StartWatching(WaitableEvent* event,
EventCallback callback,
scoped_refptr<SequencedTaskRunner> task_runner);
// Cancel the current watch. Must be called from the same sequence which
// started the watch.
//
// Does nothing if no event is being watched, nor if the watch has completed.
// The callback will *not* be called for the current watch after this
// function returns. Since the callback runs on the same sequence as this
// function, it cannot be called during this function either.
void StopWatching();
private:
#if defined(OS_WIN)
void OnObjectSignaled(HANDLE h) override;
// Duplicated handle of the event passed to StartWatching().
win::ScopedHandle duplicated_event_handle_;
// A watcher for |duplicated_event_handle_|. The handle MUST outlive
// |watcher_|.
win::ObjectWatcher watcher_;
EventCallback callback_;
WaitableEvent* event_ = nullptr;
#elif defined(OS_MACOSX)
// Invokes the callback and resets the source. Must be called on the task
// runner on which StartWatching() was called.
void InvokeCallback();
// Closure bound to the event being watched. This will be is_null() if
// nothing is being watched.
OnceClosure callback_;
// A reference to the receive right that is kept alive while a watcher
// is waiting. Null if no event is being watched.
scoped_refptr<WaitableEvent::ReceiveRight> receive_right_;
// A TYPE_MACH_RECV dispatch source on |receive_right_|. When a receive event
// is delivered, the message queue will be peeked and the bound |callback_|
// may be run. This will be null if nothing is currently being watched.
ScopedDispatchObject<dispatch_source_t> source_;
// Used to vend a weak pointer for calling InvokeCallback() from the
// |source_| event handler.
WeakPtrFactory<WaitableEventWatcher> weak_ptr_factory_;
#else
// Instantiated in StartWatching(). Set before the callback runs. Reset in
// StopWatching() or StartWatching().
scoped_refptr<Flag> cancel_flag_;
// Enqueued in the wait list of the watched WaitableEvent.
AsyncWaiter* waiter_ = nullptr;
// Kernel of the watched WaitableEvent.
scoped_refptr<WaitableEvent::WaitableEventKernel> kernel_;
// Ensures that StartWatching() and StopWatching() are called on the same
// sequence.
SequenceChecker sequence_checker_;
#endif
DISALLOW_COPY_AND_ASSIGN(WaitableEventWatcher);
};
} // namespace base
#endif // BASE_SYNCHRONIZATION_WAITABLE_EVENT_WATCHER_H_

View file

@ -0,0 +1,113 @@
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/synchronization/waitable_event_watcher.h"
#include "base/bind.h"
#include "base/callback.h"
#include "base/threading/sequenced_task_runner_handle.h"
namespace base {
WaitableEventWatcher::WaitableEventWatcher() : weak_ptr_factory_(this) {}
WaitableEventWatcher::~WaitableEventWatcher() {
StopWatching();
}
bool WaitableEventWatcher::StartWatching(
WaitableEvent* event,
EventCallback callback,
scoped_refptr<SequencedTaskRunner> task_runner) {
DCHECK(task_runner->RunsTasksInCurrentSequence());
DCHECK(!source_ || dispatch_source_testcancel(source_));
// Keep a reference to the receive right, so that if the event is deleted
// out from under the watcher, a signal can still be observed.
receive_right_ = event->receive_right_;
callback_ = BindOnce(std::move(callback), event);
// Locals for capture by the block. Accessing anything through the |this| or
// |event| pointers is not safe, since either may have been deleted by the
// time the handler block is invoked.
WeakPtr<WaitableEventWatcher> weak_this = weak_ptr_factory_.GetWeakPtr();
const bool auto_reset =
event->policy_ == WaitableEvent::ResetPolicy::AUTOMATIC;
// Auto-reset events always use a dispatch source. Manual-reset events
// only do so if dispatch provides reliable delivery, otherwise a manual
// watcher list is used.
if (!WaitableEvent::UseSlowWatchList(event->policy_)) {
// Use the global concurrent queue here, since it is only used to thunk
// to the real callback on the target task runner.
source_.reset(dispatch_source_create(
DISPATCH_SOURCE_TYPE_MACH_RECV, receive_right_->Name(), 0,
dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_DEFAULT, 0)));
// Additional locals for block capture.
dispatch_source_t source = source_.get();
mach_port_t name = receive_right_->Name();
dispatch_source_set_event_handler(source_, ^{
// For automatic-reset events, only fire the callback if this watcher
// can claim/dequeue the event. For manual-reset events, all watchers can
// be called back.
if (auto_reset && !WaitableEvent::PeekPort(name, true)) {
return;
}
// The event has been consumed. A watcher is one-shot, so cancel the
// source to prevent receiving future event signals.
dispatch_source_cancel(source);
task_runner->PostTask(
FROM_HERE,
BindOnce(&WaitableEventWatcher::InvokeCallback, weak_this));
});
dispatch_resume(source_);
} else {
// The |event->watch_list_| closures can be run from any thread, so bind
// the callback as an invocation of PostTask.
OnceClosure watcher =
BindOnce(IgnoreResult(&TaskRunner::PostTask), task_runner, FROM_HERE,
BindOnce(&WaitableEventWatcher::InvokeCallback, weak_this));
// Hold an additional reference to the ReceiveRight, in case |watcher|
// runs and deletes the event while the lock is held.
// Hold the lock for the duration of IsSignaled() so that if Signal()
// is called by another thread, it waits for this to be added to the
// watch list.
scoped_refptr<WaitableEvent::ReceiveRight> receive_right(receive_right_);
AutoLock lock(receive_right->SlowWatchList()->lock);
if (event->IsSignaled()) {
std::move(watcher).Run();
return true;
}
receive_right_->SlowWatchList()->list.push_back(std::move(watcher));
}
return true;
}
void WaitableEventWatcher::StopWatching() {
callback_.Reset();
receive_right_ = nullptr;
if (source_) {
dispatch_source_cancel(source_);
source_.reset();
}
}
void WaitableEventWatcher::InvokeCallback() {
// The callback can be null if StopWatching() is called between signaling
// and the |callback_| getting run on the target task runner.
if (callback_.is_null())
return;
source_.reset();
receive_right_ = nullptr;
std::move(callback_).Run();
}
} // namespace base

View file

@ -0,0 +1,234 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/synchronization/waitable_event_watcher.h"
#include <utility>
#include "base/bind.h"
#include "base/logging.h"
#include "base/synchronization/lock.h"
#include "base/threading/sequenced_task_runner_handle.h"
namespace base {
// -----------------------------------------------------------------------------
// WaitableEventWatcher (async waits).
//
// The basic design is that we add an AsyncWaiter to the wait-list of the event.
// That AsyncWaiter has a pointer to SequencedTaskRunner, and a Task to be
// posted to it. The task ends up calling the callback when it runs on the
// sequence.
//
// Since the wait can be canceled, we have a thread-safe Flag object which is
// set when the wait has been canceled. At each stage in the above, we check the
// flag before going onto the next stage. Since the wait may only be canceled in
// the sequence which runs the Task, we are assured that the callback cannot be
// called after canceling...
// -----------------------------------------------------------------------------
// A thread-safe, reference-counted, write-once flag.
// -----------------------------------------------------------------------------
class Flag : public RefCountedThreadSafe<Flag> {
public:
Flag() { flag_ = false; }
void Set() {
AutoLock locked(lock_);
flag_ = true;
}
bool value() const {
AutoLock locked(lock_);
return flag_;
}
private:
friend class RefCountedThreadSafe<Flag>;
~Flag() = default;
mutable Lock lock_;
bool flag_;
DISALLOW_COPY_AND_ASSIGN(Flag);
};
// -----------------------------------------------------------------------------
// This is an asynchronous waiter which posts a task to a SequencedTaskRunner
// when fired. An AsyncWaiter may only be in a single wait-list.
// -----------------------------------------------------------------------------
class AsyncWaiter : public WaitableEvent::Waiter {
public:
AsyncWaiter(scoped_refptr<SequencedTaskRunner> task_runner,
base::OnceClosure callback,
Flag* flag)
: task_runner_(std::move(task_runner)),
callback_(std::move(callback)),
flag_(flag) {}
bool Fire(WaitableEvent* event) override {
// Post the callback if we haven't been cancelled.
if (!flag_->value())
task_runner_->PostTask(FROM_HERE, std::move(callback_));
// We are removed from the wait-list by the WaitableEvent itself. It only
// remains to delete ourselves.
delete this;
// We can always return true because an AsyncWaiter is never in two
// different wait-lists at the same time.
return true;
}
// See StopWatching for discussion
bool Compare(void* tag) override { return tag == flag_.get(); }
private:
const scoped_refptr<SequencedTaskRunner> task_runner_;
base::OnceClosure callback_;
const scoped_refptr<Flag> flag_;
};
// -----------------------------------------------------------------------------
// For async waits we need to run a callback on a sequence. We do this by
// posting an AsyncCallbackHelper task, which calls the callback and keeps track
// of when the event is canceled.
// -----------------------------------------------------------------------------
void AsyncCallbackHelper(Flag* flag,
WaitableEventWatcher::EventCallback callback,
WaitableEvent* event) {
// Runs on the sequence that called StartWatching().
if (!flag->value()) {
// This is to let the WaitableEventWatcher know that the event has occured.
flag->Set();
std::move(callback).Run(event);
}
}
WaitableEventWatcher::WaitableEventWatcher() {
sequence_checker_.DetachFromSequence();
}
WaitableEventWatcher::~WaitableEventWatcher() {
// The destructor may be called from a different sequence than StartWatching()
// when there is no active watch. To avoid triggering a DCHECK in
// StopWatching(), do not call it when there is no active watch.
if (cancel_flag_ && !cancel_flag_->value())
StopWatching();
}
// -----------------------------------------------------------------------------
// The Handle is how the user cancels a wait. After deleting the Handle we
// insure that the delegate cannot be called.
// -----------------------------------------------------------------------------
bool WaitableEventWatcher::StartWatching(
WaitableEvent* event,
EventCallback callback,
scoped_refptr<SequencedTaskRunner> task_runner) {
DCHECK(sequence_checker_.CalledOnValidSequence());
// A user may call StartWatching from within the callback function. In this
// case, we won't know that we have finished watching, expect that the Flag
// will have been set in AsyncCallbackHelper().
if (cancel_flag_.get() && cancel_flag_->value())
cancel_flag_ = nullptr;
DCHECK(!cancel_flag_) << "StartWatching called while still watching";
cancel_flag_ = new Flag;
OnceClosure internal_callback =
base::BindOnce(&AsyncCallbackHelper, base::RetainedRef(cancel_flag_),
std::move(callback), event);
WaitableEvent::WaitableEventKernel* kernel = event->kernel_.get();
AutoLock locked(kernel->lock_);
if (kernel->signaled_) {
if (!kernel->manual_reset_)
kernel->signaled_ = false;
// No hairpinning - we can't call the delegate directly here. We have to
// post a task to |task_runner| as usual.
task_runner->PostTask(FROM_HERE, std::move(internal_callback));
return true;
}
kernel_ = kernel;
waiter_ = new AsyncWaiter(std::move(task_runner),
std::move(internal_callback), cancel_flag_.get());
event->Enqueue(waiter_);
return true;
}
void WaitableEventWatcher::StopWatching() {
DCHECK(sequence_checker_.CalledOnValidSequence());
if (!cancel_flag_.get()) // if not currently watching...
return;
if (cancel_flag_->value()) {
// In this case, the event has fired, but we haven't figured that out yet.
// The WaitableEvent may have been deleted too.
cancel_flag_ = nullptr;
return;
}
if (!kernel_.get()) {
// We have no kernel. This means that we never enqueued a Waiter on an
// event because the event was already signaled when StartWatching was
// called.
//
// In this case, a task was enqueued on the MessageLoop and will run.
// We set the flag in case the task hasn't yet run. The flag will stop the
// delegate getting called. If the task has run then we have the last
// reference to the flag and it will be deleted immedately after.
cancel_flag_->Set();
cancel_flag_ = nullptr;
return;
}
AutoLock locked(kernel_->lock_);
// We have a lock on the kernel. No one else can signal the event while we
// have it.
// We have a possible ABA issue here. If Dequeue was to compare only the
// pointer values then it's possible that the AsyncWaiter could have been
// fired, freed and the memory reused for a different Waiter which was
// enqueued in the same wait-list. We would think that that waiter was our
// AsyncWaiter and remove it.
//
// To stop this, Dequeue also takes a tag argument which is passed to the
// virtual Compare function before the two are considered a match. So we need
// a tag which is good for the lifetime of this handle: the Flag. Since we
// have a reference to the Flag, its memory cannot be reused while this object
// still exists. So if we find a waiter with the correct pointer value, and
// which shares a Flag pointer, we have a real match.
if (kernel_->Dequeue(waiter_, cancel_flag_.get())) {
// Case 2: the waiter hasn't been signaled yet; it was still on the wait
// list. We've removed it, thus we can delete it and the task (which cannot
// have been enqueued with the MessageLoop because the waiter was never
// signaled)
delete waiter_;
cancel_flag_ = nullptr;
return;
}
// Case 3: the waiter isn't on the wait-list, thus it was signaled. It may not
// have run yet, so we set the flag to tell it not to bother enqueuing the
// task on the SequencedTaskRunner, but to delete it instead. The Waiter
// deletes itself once run.
cancel_flag_->Set();
cancel_flag_ = nullptr;
// If the waiter has already run then the task has been enqueued. If the Task
// hasn't yet run, the flag will stop the delegate from getting called. (This
// is thread safe because one may only delete a Handle from the sequence that
// called StartWatching()).
//
// If the delegate has already been called then we have nothing to do. The
// task has been deleted by the MessageLoop.
}
} // namespace base

View file

@ -0,0 +1,61 @@
// Copyright (c) 2011 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/synchronization/waitable_event_watcher.h"
#include "base/compiler_specific.h"
#include "base/synchronization/waitable_event.h"
#include "base/win/object_watcher.h"
#include <windows.h>
namespace base {
WaitableEventWatcher::WaitableEventWatcher() = default;
WaitableEventWatcher::~WaitableEventWatcher() {}
bool WaitableEventWatcher::StartWatching(
WaitableEvent* event,
EventCallback callback,
scoped_refptr<SequencedTaskRunner> task_runner) {
DCHECK(event);
callback_ = std::move(callback);
event_ = event;
// Duplicate and hold the event handle until a callback is returned or
// waiting is stopped.
HANDLE handle = nullptr;
if (!::DuplicateHandle(::GetCurrentProcess(), // hSourceProcessHandle
event->handle(),
::GetCurrentProcess(), // hTargetProcessHandle
&handle,
0, // dwDesiredAccess ignored due to SAME_ACCESS
FALSE, // !bInheritHandle
DUPLICATE_SAME_ACCESS)) {
return false;
}
duplicated_event_handle_.Set(handle);
return watcher_.StartWatchingOnce(handle, this);
}
void WaitableEventWatcher::StopWatching() {
callback_.Reset();
event_ = NULL;
watcher_.StopWatching();
duplicated_event_handle_.Close();
}
void WaitableEventWatcher::OnObjectSignaled(HANDLE h) {
DCHECK_EQ(duplicated_event_handle_.Get(), h);
WaitableEvent* event = event_;
EventCallback callback = std::move(callback_);
event_ = NULL;
duplicated_event_handle_.Close();
DCHECK(event);
std::move(callback).Run(event);
}
} // namespace base

View file

@ -0,0 +1,153 @@
// Copyright (c) 2011 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/synchronization/waitable_event.h"
#include <windows.h>
#include <stddef.h>
#include <algorithm>
#include <utility>
#include "base/debug/activity_tracker.h"
#include "base/logging.h"
#include "base/numerics/safe_conversions.h"
#include "base/optional.h"
#include "base/threading/scoped_blocking_call.h"
#include "base/threading/thread_restrictions.h"
#include "base/time/time.h"
#include "base/time/time_override.h"
namespace base {
WaitableEvent::WaitableEvent(ResetPolicy reset_policy,
InitialState initial_state)
: handle_(CreateEvent(nullptr,
reset_policy == ResetPolicy::MANUAL,
initial_state == InitialState::SIGNALED,
nullptr)) {
// We're probably going to crash anyways if this is ever NULL, so we might as
// well make our stack reports more informative by crashing here.
CHECK(handle_.IsValid());
}
WaitableEvent::WaitableEvent(win::ScopedHandle handle)
: handle_(std::move(handle)) {
CHECK(handle_.IsValid()) << "Tried to create WaitableEvent from NULL handle";
}
WaitableEvent::~WaitableEvent() = default;
void WaitableEvent::Reset() {
ResetEvent(handle_.Get());
}
void WaitableEvent::Signal() {
SetEvent(handle_.Get());
}
bool WaitableEvent::IsSignaled() {
DWORD result = WaitForSingleObject(handle_.Get(), 0);
DCHECK(result == WAIT_OBJECT_0 || result == WAIT_TIMEOUT)
<< "Unexpected WaitForSingleObject result " << result;
return result == WAIT_OBJECT_0;
}
void WaitableEvent::Wait() {
// Record the event that this thread is blocking upon (for hang diagnosis) and
// consider it blocked for scheduling purposes. Ignore this for non-blocking
// WaitableEvents.
Optional<debug::ScopedEventWaitActivity> event_activity;
Optional<internal::ScopedBlockingCallWithBaseSyncPrimitives>
scoped_blocking_call;
if (waiting_is_blocking_) {
event_activity.emplace(this);
scoped_blocking_call.emplace(FROM_HERE, BlockingType::MAY_BLOCK);
}
DWORD result = WaitForSingleObject(handle_.Get(), INFINITE);
// It is most unexpected that this should ever fail. Help consumers learn
// about it if it should ever fail.
DPCHECK(result != WAIT_FAILED);
DCHECK_EQ(WAIT_OBJECT_0, result);
}
bool WaitableEvent::TimedWait(const TimeDelta& wait_delta) {
if (wait_delta <= TimeDelta())
return IsSignaled();
// Record the event that this thread is blocking upon (for hang diagnosis) and
// consider it blocked for scheduling purposes. Ignore this for non-blocking
// WaitableEvents.
Optional<debug::ScopedEventWaitActivity> event_activity;
Optional<internal::ScopedBlockingCallWithBaseSyncPrimitives>
scoped_blocking_call;
if (waiting_is_blocking_) {
event_activity.emplace(this);
scoped_blocking_call.emplace(FROM_HERE, BlockingType::MAY_BLOCK);
}
// TimeTicks takes care of overflow but we special case is_max() nonetheless
// to avoid invoking TimeTicksNowIgnoringOverride() unnecessarily.
// WaitForSingleObject(handle_.Get(), INFINITE) doesn't spuriously wakeup so
// we don't need to worry about is_max() for the increment phase of the loop.
const TimeTicks end_time =
wait_delta.is_max() ? TimeTicks::Max()
: subtle::TimeTicksNowIgnoringOverride() + wait_delta;
for (TimeDelta remaining = wait_delta; remaining > TimeDelta();
remaining = end_time - subtle::TimeTicksNowIgnoringOverride()) {
// Truncate the timeout to milliseconds, rounded up to avoid spinning
// (either by returning too early or because a < 1ms timeout on Windows
// tends to return immediately).
const DWORD timeout_ms =
remaining.is_max()
? INFINITE
: saturated_cast<DWORD>(remaining.InMillisecondsRoundedUp());
const DWORD result = WaitForSingleObject(handle_.Get(), timeout_ms);
DCHECK(result == WAIT_OBJECT_0 || result == WAIT_TIMEOUT)
<< "Unexpected WaitForSingleObject result " << result;
switch (result) {
case WAIT_OBJECT_0:
return true;
case WAIT_TIMEOUT:
// TimedWait can time out earlier than the specified |timeout| on
// Windows. To make this consistent with the posix implementation we
// should guarantee that TimedWait doesn't return earlier than the
// specified |max_time| and wait again for the remaining time.
continue;
}
}
return false;
}
// static
size_t WaitableEvent::WaitMany(WaitableEvent** events, size_t count) {
DCHECK(count) << "Cannot wait on no events";
internal::ScopedBlockingCallWithBaseSyncPrimitives scoped_blocking_call(
FROM_HERE, BlockingType::MAY_BLOCK);
// Record an event (the first) that this thread is blocking upon.
debug::ScopedEventWaitActivity event_activity(events[0]);
HANDLE handles[MAXIMUM_WAIT_OBJECTS];
CHECK_LE(count, static_cast<size_t>(MAXIMUM_WAIT_OBJECTS))
<< "Can only wait on " << MAXIMUM_WAIT_OBJECTS << " with WaitMany";
for (size_t i = 0; i < count; ++i)
handles[i] = events[i]->handle();
// The cast is safe because count is small - see the CHECK above.
DWORD result =
WaitForMultipleObjects(static_cast<DWORD>(count),
handles,
FALSE, // don't wait for all the objects
INFINITE); // no timeout
if (result >= WAIT_OBJECT_0 + count) {
DPLOG(FATAL) << "WaitForMultipleObjects failed";
return 0;
}
return result - WAIT_OBJECT_0;
}
} // namespace base