Repo created

This commit is contained in:
Fr4nz D13trich 2025-11-22 14:04:28 +01:00
parent 81b91f4139
commit f8c34fa5ee
22732 changed files with 4815320 additions and 2 deletions

View file

@ -0,0 +1,7 @@
per-file *chromeos*=skuhne@chromium.org
per-file *chromeos*=oshima@chromium.org
per-file *shared_memory*=set noparent
per-file *shared_memory*=file://ipc/SECURITY_OWNERS
per-file madv_free*=lizeb@chromium.org
per-file madv_free*=pasko@chromium.org
per-file madv_free*=mthiesse@chromium.org

View file

@ -0,0 +1,49 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/memory/aligned_memory.h"
#include "base/logging.h"
#include "build/build_config.h"
#if defined(OS_ANDROID)
#include <malloc.h>
#endif
namespace base {
void* AlignedAlloc(size_t size, size_t alignment) {
DCHECK_GT(size, 0U);
DCHECK_EQ(alignment & (alignment - 1), 0U);
DCHECK_EQ(alignment % sizeof(void*), 0U);
void* ptr = nullptr;
#if defined(COMPILER_MSVC)
ptr = _aligned_malloc(size, alignment);
// Android technically supports posix_memalign(), but does not expose it in
// the current version of the library headers used by Chrome. Luckily,
// memalign() on Android returns pointers which can safely be used with
// free(), so we can use it instead. Issue filed to document this:
// http://code.google.com/p/android/issues/detail?id=35391
#elif defined(OS_ANDROID)
ptr = memalign(alignment, size);
#else
if (int ret = posix_memalign(&ptr, alignment, size)) {
DLOG(ERROR) << "posix_memalign() returned with error " << ret;
ptr = nullptr;
}
#endif
// Since aligned allocations may fail for non-memory related reasons, force a
// crash if we encounter a failed allocation; maintaining consistent behavior
// with a normal allocation failure in Chrome.
if (!ptr) {
DLOG(ERROR) << "If you crashed here, your aligned allocation is incorrect: "
<< "size=" << size << ", alignment=" << alignment;
CHECK(false);
}
// Sanity check alignment just to be safe.
DCHECK_EQ(reinterpret_cast<uintptr_t>(ptr) & (alignment - 1), 0U);
return ptr;
}
} // namespace base

View file

@ -0,0 +1,60 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_MEMORY_ALIGNED_MEMORY_H_
#define BASE_MEMORY_ALIGNED_MEMORY_H_
#include <stddef.h>
#include <stdint.h>
#include <type_traits>
#include "base/base_export.h"
#include "base/compiler_specific.h"
#include "build/build_config.h"
#if defined(COMPILER_MSVC)
#include <malloc.h>
#else
#include <stdlib.h>
#endif
// A runtime sized aligned allocation can be created:
//
// float* my_array = static_cast<float*>(AlignedAlloc(size, alignment));
//
// // ... later, to release the memory:
// AlignedFree(my_array);
//
// Or using unique_ptr:
//
// std::unique_ptr<float, AlignedFreeDeleter> my_array(
// static_cast<float*>(AlignedAlloc(size, alignment)));
namespace base {
// This can be replaced with std::aligned_alloc when we have C++17.
// Caveat: std::aligned_alloc requires the size parameter be an integral
// multiple of alignment.
BASE_EXPORT void* AlignedAlloc(size_t size, size_t alignment);
inline void AlignedFree(void* ptr) {
#if defined(COMPILER_MSVC)
_aligned_free(ptr);
#else
free(ptr);
#endif
}
// Deleter for use with unique_ptr. E.g., use as
// std::unique_ptr<Foo, base::AlignedFreeDeleter> foo;
struct AlignedFreeDeleter {
inline void operator()(void* ptr) const {
AlignedFree(ptr);
}
};
} // namespace base
#endif // BASE_MEMORY_ALIGNED_MEMORY_H_

View file

@ -0,0 +1,119 @@
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/memory/discardable_memory.h"
#include "base/feature_list.h"
#include "base/memory/discardable_memory_internal.h"
#include "base/memory/madv_free_discardable_memory_posix.h"
#include "base/metrics/field_trial_params.h"
#include "build/build_config.h"
#if defined(OS_ANDROID)
#include <third_party/ashmem/ashmem.h>
#endif // defined(OS_ANDROID)
namespace base {
namespace features {
#if defined(OS_POSIX)
// Feature flag allowing the use of MADV_FREE discardable memory when there are
// multiple supported discardable memory backings.
const base::Feature kMadvFreeDiscardableMemory{
"MadvFreeDiscardableMemory", base::FEATURE_DISABLED_BY_DEFAULT};
#endif // defined(OS_POSIX)
#if defined(OS_ANDROID) || defined(OS_LINUX)
const base::Feature kDiscardableMemoryBackingTrial{
"DiscardableMemoryBackingTrial", base::FEATURE_DISABLED_BY_DEFAULT};
// Association of trial group names to trial group enum. Array order must match
// order of DiscardableMemoryTrialGroup enum.
const base::FeatureParam<DiscardableMemoryTrialGroup>::Option
kDiscardableMemoryBackingParamOptions[] = {
{DiscardableMemoryTrialGroup::kEmulatedSharedMemory, "shmem"},
{DiscardableMemoryTrialGroup::kMadvFree, "madvfree"},
{DiscardableMemoryTrialGroup::kAshmem, "ashmem"}};
const base::FeatureParam<DiscardableMemoryTrialGroup>
kDiscardableMemoryBackingParam{
&kDiscardableMemoryBackingTrial, "DiscardableMemoryBacking",
DiscardableMemoryTrialGroup::kEmulatedSharedMemory,
&kDiscardableMemoryBackingParamOptions};
#endif // defined(OS_ANDROID) || defined(OS_LINUX)
} // namespace features
namespace {
#if defined(OS_ANDROID) || defined(OS_LINUX)
DiscardableMemoryBacking GetBackingForFieldTrial() {
DiscardableMemoryTrialGroup trial_group =
GetDiscardableMemoryBackingFieldTrialGroup();
switch (trial_group) {
case DiscardableMemoryTrialGroup::kEmulatedSharedMemory:
case DiscardableMemoryTrialGroup::kAshmem:
return DiscardableMemoryBacking::kSharedMemory;
case DiscardableMemoryTrialGroup::kMadvFree:
return DiscardableMemoryBacking::kMadvFree;
}
NOTREACHED();
}
#endif // defined(OS_ANDROID) || defined(OS_LINUX)
} // namespace
#if defined(OS_ANDROID) || defined(OS_LINUX)
// Probe capabilities of this device to determine whether we should participate
// in the discardable memory backing trial.
bool DiscardableMemoryBackingFieldTrialIsEnabled() {
#if defined(OS_ANDROID)
if (!ashmem_device_is_supported())
return false;
#endif // defined(OS_ANDROID)
if (base::GetMadvFreeSupport() != base::MadvFreeSupport::kSupported)
return false;
// IMPORTANT: Only query the feature after we determine the device has the
// capabilities required, which will have the side-effect of assigning a
// trial-group.
return base::FeatureList::IsEnabled(features::kDiscardableMemoryBackingTrial);
}
DiscardableMemoryTrialGroup GetDiscardableMemoryBackingFieldTrialGroup() {
DCHECK(DiscardableMemoryBackingFieldTrialIsEnabled());
return features::kDiscardableMemoryBackingParam.Get();
}
#endif // defined(OS_ANDROID) || defined(OS_LINUX)
DiscardableMemory::DiscardableMemory() = default;
DiscardableMemory::~DiscardableMemory() = default;
DiscardableMemoryBacking GetDiscardableMemoryBacking() {
#if defined(OS_ANDROID) || defined(OS_LINUX)
if (DiscardableMemoryBackingFieldTrialIsEnabled()) {
return GetBackingForFieldTrial();
}
#endif // defined(OS_ANDROID) || defined(OS_LINUX)
#if defined(OS_ANDROID)
if (ashmem_device_is_supported())
return DiscardableMemoryBacking::kSharedMemory;
#endif // defined(OS_ANDROID)
#if defined(OS_POSIX)
if (base::FeatureList::IsEnabled(
base::features::kMadvFreeDiscardableMemory) &&
base::GetMadvFreeSupport() == base::MadvFreeSupport::kSupported) {
return DiscardableMemoryBacking::kMadvFree;
}
#endif // defined(OS_POSIX)
return DiscardableMemoryBacking::kSharedMemory;
}
} // namespace base

View file

@ -0,0 +1,86 @@
// Copyright (c) 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_MEMORY_DISCARDABLE_MEMORY_H_
#define BASE_MEMORY_DISCARDABLE_MEMORY_H_
#include "base/base_export.h"
#include "base/compiler_specific.h"
#include "build/build_config.h"
namespace base {
namespace trace_event {
class MemoryAllocatorDump;
class ProcessMemoryDump;
} // namespace trace_event
// Discardable memory is used to cache large objects without worrying about
// blowing out memory, both on mobile devices where there is no swap, and
// desktop devices where unused free memory should be used to help the user
// experience. This is preferable to releasing memory in response to an OOM
// signal because it is simpler and provides system-wide management of
// purgable memory, though it has less flexibility as to which objects get
// discarded.
//
// Discardable memory has two states: locked and unlocked. While the memory is
// locked, it will not be discarded. Unlocking the memory allows the
// discardable memory system and the OS to reclaim it if needed. Locks do not
// nest.
//
// Notes:
// - The paging behavior of memory while it is locked is not specified. While
// mobile platforms will not swap it out, it may qualify for swapping
// on desktop platforms. It is not expected that this will matter, as the
// preferred pattern of usage for DiscardableMemory is to lock down the
// memory, use it as quickly as possible, and then unlock it.
// - Because of memory alignment, the amount of memory allocated can be
// larger than the requested memory size. It is not very efficient for
// small allocations.
// - A discardable memory instance is not thread safe. It is the
// responsibility of users of discardable memory to ensure there are no
// races.
//
class BASE_EXPORT DiscardableMemory {
public:
DiscardableMemory();
virtual ~DiscardableMemory();
// Locks the memory so that it will not be purged by the system. Returns
// true on success. If the return value is false then this object should be
// destroyed and a new one should be created.
virtual bool Lock() WARN_UNUSED_RESULT = 0;
// Unlocks the memory so that it can be purged by the system. Must be called
// after every successful lock call.
virtual void Unlock() = 0;
// Returns the memory address held by this object. The object must be locked
// before calling this.
virtual void* data() const = 0;
// Forces the memory to be purged, such that any following Lock() will fail.
// The object must be unlocked before calling this.
virtual void DiscardForTesting() = 0;
// Handy method to simplify calling data() with a reinterpret_cast.
template<typename T> T* data_as() const {
return reinterpret_cast<T*>(data());
}
// Used for dumping the statistics of discardable memory allocated in tracing.
// Returns a new MemoryAllocatorDump in the |pmd| with the size of the
// discardable memory. The MemoryAllocatorDump created is owned by |pmd|. See
// ProcessMemoryDump::CreateAllocatorDump.
virtual trace_event::MemoryAllocatorDump* CreateMemoryAllocatorDump(
const char* name,
trace_event::ProcessMemoryDump* pmd) const = 0;
};
enum class DiscardableMemoryBacking { kSharedMemory, kMadvFree };
BASE_EXPORT DiscardableMemoryBacking GetDiscardableMemoryBacking();
} // namespace base
#endif // BASE_MEMORY_DISCARDABLE_MEMORY_H_

View file

@ -0,0 +1,54 @@
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/memory/discardable_memory_allocator.h"
#include <utility>
#include "base/logging.h"
#include "base/process/memory.h"
namespace base {
namespace {
DiscardableMemoryAllocator* g_discardable_allocator = nullptr;
} // namespace
// static
void DiscardableMemoryAllocator::SetInstance(
DiscardableMemoryAllocator* allocator) {
DCHECK(!allocator || !g_discardable_allocator);
g_discardable_allocator = allocator;
}
// static
DiscardableMemoryAllocator* DiscardableMemoryAllocator::GetInstance() {
DCHECK(g_discardable_allocator);
return g_discardable_allocator;
}
std::unique_ptr<base::DiscardableMemory>
DiscardableMemoryAllocator::AllocateLockedDiscardableMemoryWithRetryOrDie(
size_t size,
OnceClosure on_no_memory) {
auto* allocator = GetInstance();
auto memory = allocator->AllocateLockedDiscardableMemory(size);
if (memory)
return memory;
std::move(on_no_memory).Run();
// The call above will likely have freed some memory, which will end up in the
// freelist. To actually reduce memory footprint, need to empty the freelist
// as well.
ReleaseFreeMemory();
memory = allocator->AllocateLockedDiscardableMemory(size);
if (!memory)
TerminateBecauseOutOfMemory(size);
return memory;
}
} // namespace base

View file

@ -0,0 +1,70 @@
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_MEMORY_DISCARDABLE_MEMORY_ALLOCATOR_H_
#define BASE_MEMORY_DISCARDABLE_MEMORY_ALLOCATOR_H_
#include <stddef.h>
#include <memory>
#include "base/base_export.h"
#include "base/callback.h"
#include "base/macros.h"
#include "base/memory/discardable_memory.h"
namespace base {
class DiscardableMemory;
// An allocator which creates and manages DiscardableMemory. The allocator
// itself should be created via CreateDiscardableMemoryAllocator, which
// selects an appropriate implementation depending on platform support.
class BASE_EXPORT DiscardableMemoryAllocator {
public:
DiscardableMemoryAllocator() = default;
virtual ~DiscardableMemoryAllocator() = default;
// Returns the allocator instance.
static DiscardableMemoryAllocator* GetInstance();
// Sets the allocator instance. Can only be called once, e.g. on startup.
// Ownership of |instance| remains with the caller.
static void SetInstance(DiscardableMemoryAllocator* allocator);
// Creates an initially-locked instance of discardable memory.
// If the platform supports Android ashmem or madvise(MADV_FREE),
// platform-specific techniques will be used to discard memory under pressure.
// Otherwise, discardable memory is emulated and manually discarded
// heuristicly (via memory pressure notifications).
virtual std::unique_ptr<DiscardableMemory> AllocateLockedDiscardableMemory(
size_t size) = 0;
// Allocates discardable memory the same way |AllocateLockedDiscardableMemory|
// does. In case of failure, calls |on_no_memory| and retries once. As a
// consequence, |on_no_memory| should free some memory, and importantly,
// address space as well.
//
// In case of allocation failure after retry, terminates the process with
// an Out Of Memory status (for triage in crash reports).
//
// As a consequence, does *not* return nullptr.
std::unique_ptr<DiscardableMemory>
AllocateLockedDiscardableMemoryWithRetryOrDie(size_t size,
OnceClosure on_no_memory);
// Gets the total number of bytes allocated by this allocator which have not
// been discarded.
virtual size_t GetBytesAllocated() const = 0;
// Release any memory used in the implementation of discardable memory that is
// not immediately being used.
virtual void ReleaseFreeMemory() = 0;
private:
DISALLOW_COPY_AND_ASSIGN(DiscardableMemoryAllocator);
};
} // namespace base
#endif // BASE_MEMORY_DISCARDABLE_MEMORY_ALLOCATOR_H_

View file

@ -0,0 +1,52 @@
// Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_MEMORY_DISCARDABLE_MEMORY_INTERNAL_H_
#define BASE_MEMORY_DISCARDABLE_MEMORY_INTERNAL_H_
#include "base/base_export.h"
#include "base/feature_list.h"
#include "base/metrics/field_trial_params.h"
#include "build/build_config.h"
#if defined(OS_ANDROID) || defined(OS_LINUX)
namespace base {
// Enumeration of the possible experiment groups in the discardable memory
// backing trial. Note that |kAshmem| and |kEmulatedSharedMemory| both map to
// discardable shared memory, except the former allows for the use of ashmem for
// unpinning memory. Ensure that the order of the enum values matches those in
// |kDiscardableMemoryBackingParamOptions|.
enum DiscardableMemoryTrialGroup : int {
kEmulatedSharedMemory = 0,
kMadvFree,
// Only Android devices will be assigned to the ashmem group.
kAshmem,
};
namespace features {
// Feature flag enabling the discardable memory backing trial.
BASE_EXPORT extern const base::Feature kDiscardableMemoryBackingTrial;
BASE_EXPORT extern const base::FeatureParam<DiscardableMemoryTrialGroup>::Option
kDiscardableMemoryBackingParamOptions[];
BASE_EXPORT extern const base::FeatureParam<DiscardableMemoryTrialGroup>
kDiscardableMemoryBackingParam;
} // namespace features
// Whether we should do the discardable memory backing trial for this session.
BASE_EXPORT bool DiscardableMemoryBackingFieldTrialIsEnabled();
// If we should do the discardable memory backing trial, then get the trial
// group this session belongs in.
BASE_EXPORT DiscardableMemoryTrialGroup
GetDiscardableMemoryBackingFieldTrialGroup();
} // namespace base
#endif // defined(OS_LINUX) || defined(OS_ANDROID)
#endif // BASE_MEMORY_DISCARDABLE_MEMORY_INTERNAL_H_

View file

@ -0,0 +1,564 @@
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/memory/discardable_shared_memory.h"
#include <stdint.h>
#include <algorithm>
#include "base/atomicops.h"
#include "base/bits.h"
#include "base/feature_list.h"
#include "base/logging.h"
#include "base/memory/discardable_memory.h"
#include "base/memory/discardable_memory_internal.h"
#include "base/memory/shared_memory_tracker.h"
#include "base/numerics/safe_math.h"
#include "base/process/process_metrics.h"
#include "base/trace_event/memory_allocator_dump.h"
#include "base/trace_event/process_memory_dump.h"
#include "build/build_config.h"
#if defined(OS_POSIX) && !defined(OS_NACL)
// For madvise() which is available on all POSIX compatible systems.
#include <sys/mman.h>
#endif
#if defined(OS_ANDROID)
#include "third_party/ashmem/ashmem.h"
#endif
#if defined(OS_WIN)
#include <windows.h>
#include "base/win/windows_version.h"
#endif
#if defined(OS_FUCHSIA)
#include <lib/zx/vmar.h>
#include <zircon/types.h>
#include "base/fuchsia/fuchsia_logging.h"
#endif
namespace base {
namespace {
// Use a machine-sized pointer as atomic type. It will use the Atomic32 or
// Atomic64 routines, depending on the architecture.
typedef intptr_t AtomicType;
typedef uintptr_t UAtomicType;
// Template specialization for timestamp serialization/deserialization. This
// is used to serialize timestamps using Unix time on systems where AtomicType
// does not have enough precision to contain a timestamp in the standard
// serialized format.
template <int>
Time TimeFromWireFormat(int64_t value);
template <int>
int64_t TimeToWireFormat(Time time);
// Serialize to Unix time when using 4-byte wire format.
// Note: 19 January 2038, this will cease to work.
template <>
Time ALLOW_UNUSED_TYPE TimeFromWireFormat<4>(int64_t value) {
return value ? Time::UnixEpoch() + TimeDelta::FromSeconds(value) : Time();
}
template <>
int64_t ALLOW_UNUSED_TYPE TimeToWireFormat<4>(Time time) {
return time > Time::UnixEpoch() ? (time - Time::UnixEpoch()).InSeconds() : 0;
}
// Standard serialization format when using 8-byte wire format.
template <>
Time ALLOW_UNUSED_TYPE TimeFromWireFormat<8>(int64_t value) {
return Time::FromInternalValue(value);
}
template <>
int64_t ALLOW_UNUSED_TYPE TimeToWireFormat<8>(Time time) {
return time.ToInternalValue();
}
struct SharedState {
enum LockState { UNLOCKED = 0, LOCKED = 1 };
explicit SharedState(AtomicType ivalue) { value.i = ivalue; }
SharedState(LockState lock_state, Time timestamp) {
int64_t wire_timestamp = TimeToWireFormat<sizeof(AtomicType)>(timestamp);
DCHECK_GE(wire_timestamp, 0);
DCHECK_EQ(lock_state & ~1, 0);
value.u = (static_cast<UAtomicType>(wire_timestamp) << 1) | lock_state;
}
LockState GetLockState() const { return static_cast<LockState>(value.u & 1); }
Time GetTimestamp() const {
return TimeFromWireFormat<sizeof(AtomicType)>(value.u >> 1);
}
// Bit 1: Lock state. Bit is set when locked.
// Bit 2..sizeof(AtomicType)*8: Usage timestamp. NULL time when locked or
// purged.
union {
AtomicType i;
UAtomicType u;
} value;
};
// Shared state is stored at offset 0 in shared memory segments.
SharedState* SharedStateFromSharedMemory(
const WritableSharedMemoryMapping& shared_memory) {
DCHECK(shared_memory.IsValid());
return static_cast<SharedState*>(shared_memory.memory());
}
// Round up |size| to a multiple of page size.
size_t AlignToPageSize(size_t size) {
return bits::Align(size, base::GetPageSize());
}
#if defined(OS_ANDROID)
bool UseAshmemUnpinningForDiscardableMemory() {
if (!ashmem_device_is_supported())
return false;
// If we are participating in the discardable memory backing trial, only
// enable ashmem unpinning when we are in the corresponding trial group.
if (base::DiscardableMemoryBackingFieldTrialIsEnabled()) {
return base::GetDiscardableMemoryBackingFieldTrialGroup() ==
base::DiscardableMemoryTrialGroup::kAshmem;
}
return true;
}
#endif // defined(OS_ANDROID)
} // namespace
DiscardableSharedMemory::DiscardableSharedMemory()
: mapped_size_(0), locked_page_count_(0) {
}
DiscardableSharedMemory::DiscardableSharedMemory(
UnsafeSharedMemoryRegion shared_memory_region)
: shared_memory_region_(std::move(shared_memory_region)),
mapped_size_(0),
locked_page_count_(0) {}
DiscardableSharedMemory::~DiscardableSharedMemory() = default;
bool DiscardableSharedMemory::CreateAndMap(size_t size) {
CheckedNumeric<size_t> checked_size = size;
checked_size += AlignToPageSize(sizeof(SharedState));
if (!checked_size.IsValid())
return false;
shared_memory_region_ =
UnsafeSharedMemoryRegion::Create(checked_size.ValueOrDie());
if (!shared_memory_region_.IsValid())
return false;
shared_memory_mapping_ = shared_memory_region_.Map();
if (!shared_memory_mapping_.IsValid())
return false;
mapped_size_ = shared_memory_mapping_.mapped_size() -
AlignToPageSize(sizeof(SharedState));
locked_page_count_ = AlignToPageSize(mapped_size_) / base::GetPageSize();
#if DCHECK_IS_ON()
for (size_t page = 0; page < locked_page_count_; ++page)
locked_pages_.insert(page);
#endif
DCHECK(last_known_usage_.is_null());
SharedState new_state(SharedState::LOCKED, Time());
subtle::Release_Store(
&SharedStateFromSharedMemory(shared_memory_mapping_)->value.i,
new_state.value.i);
return true;
}
bool DiscardableSharedMemory::Map(size_t size) {
DCHECK(!shared_memory_mapping_.IsValid());
if (shared_memory_mapping_.IsValid())
return false;
shared_memory_mapping_ = shared_memory_region_.MapAt(
0, AlignToPageSize(sizeof(SharedState)) + size);
if (!shared_memory_mapping_.IsValid())
return false;
mapped_size_ = shared_memory_mapping_.mapped_size() -
AlignToPageSize(sizeof(SharedState));
locked_page_count_ = AlignToPageSize(mapped_size_) / base::GetPageSize();
#if DCHECK_IS_ON()
for (size_t page = 0; page < locked_page_count_; ++page)
locked_pages_.insert(page);
#endif
return true;
}
bool DiscardableSharedMemory::Unmap() {
if (!shared_memory_mapping_.IsValid())
return false;
shared_memory_mapping_ = WritableSharedMemoryMapping();
locked_page_count_ = 0;
#if DCHECK_IS_ON()
locked_pages_.clear();
#endif
mapped_size_ = 0;
return true;
}
DiscardableSharedMemory::LockResult DiscardableSharedMemory::Lock(
size_t offset, size_t length) {
DCHECK_EQ(AlignToPageSize(offset), offset);
DCHECK_EQ(AlignToPageSize(length), length);
// Calls to this function must be synchronized properly.
DFAKE_SCOPED_LOCK(thread_collision_warner_);
DCHECK(shared_memory_mapping_.IsValid());
// We need to successfully acquire the platform independent lock before
// individual pages can be locked.
if (!locked_page_count_) {
// Return false when instance has been purged or not initialized properly
// by checking if |last_known_usage_| is NULL.
if (last_known_usage_.is_null())
return FAILED;
SharedState old_state(SharedState::UNLOCKED, last_known_usage_);
SharedState new_state(SharedState::LOCKED, Time());
SharedState result(subtle::Acquire_CompareAndSwap(
&SharedStateFromSharedMemory(shared_memory_mapping_)->value.i,
old_state.value.i, new_state.value.i));
if (result.value.u != old_state.value.u) {
// Update |last_known_usage_| in case the above CAS failed because of
// an incorrect timestamp.
last_known_usage_ = result.GetTimestamp();
return FAILED;
}
}
// Zero for length means "everything onward".
if (!length)
length = AlignToPageSize(mapped_size_) - offset;
size_t start = offset / base::GetPageSize();
size_t end = start + length / base::GetPageSize();
DCHECK_LE(start, end);
DCHECK_LE(end, AlignToPageSize(mapped_size_) / base::GetPageSize());
// Add pages to |locked_page_count_|.
// Note: Locking a page that is already locked is an error.
locked_page_count_ += end - start;
#if DCHECK_IS_ON()
// Detect incorrect usage by keeping track of exactly what pages are locked.
for (auto page = start; page < end; ++page) {
auto result = locked_pages_.insert(page);
DCHECK(result.second);
}
DCHECK_EQ(locked_pages_.size(), locked_page_count_);
#endif
// Always behave as if memory was purged when trying to lock a 0 byte segment.
if (!length)
return PURGED;
#if defined(OS_ANDROID)
// Ensure that the platform won't discard the required pages.
return LockPages(shared_memory_region_,
AlignToPageSize(sizeof(SharedState)) + offset, length);
#elif defined(OS_MACOSX)
// On macOS, there is no mechanism to lock pages. However, we do need to call
// madvise(MADV_FREE_REUSE) in order to correctly update accounting for memory
// footprint via task_info().
//
// Note that calling madvise(MADV_FREE_REUSE) on regions that haven't had
// madvise(MADV_FREE_REUSABLE) called on them has no effect.
//
// Note that the corresponding call to MADV_FREE_REUSABLE is in Purge(), since
// that's where the memory is actually released, rather than Unlock(), which
// is a no-op on macOS.
//
// For more information, see
// https://bugs.chromium.org/p/chromium/issues/detail?id=823915.
madvise(static_cast<char*>(shared_memory_mapping_.memory()) +
AlignToPageSize(sizeof(SharedState)),
AlignToPageSize(mapped_size_), MADV_FREE_REUSE);
return DiscardableSharedMemory::SUCCESS;
#else
return DiscardableSharedMemory::SUCCESS;
#endif
}
void DiscardableSharedMemory::Unlock(size_t offset, size_t length) {
DCHECK_EQ(AlignToPageSize(offset), offset);
DCHECK_EQ(AlignToPageSize(length), length);
// Calls to this function must be synchronized properly.
DFAKE_SCOPED_LOCK(thread_collision_warner_);
// Passing zero for |length| means "everything onward". Note that |length| may
// still be zero after this calculation, e.g. if |mapped_size_| is zero.
if (!length)
length = AlignToPageSize(mapped_size_) - offset;
DCHECK(shared_memory_mapping_.IsValid());
// Allow the pages to be discarded by the platform, if supported.
UnlockPages(shared_memory_region_,
AlignToPageSize(sizeof(SharedState)) + offset, length);
size_t start = offset / base::GetPageSize();
size_t end = start + length / base::GetPageSize();
DCHECK_LE(start, end);
DCHECK_LE(end, AlignToPageSize(mapped_size_) / base::GetPageSize());
// Remove pages from |locked_page_count_|.
// Note: Unlocking a page that is not locked is an error.
DCHECK_GE(locked_page_count_, end - start);
locked_page_count_ -= end - start;
#if DCHECK_IS_ON()
// Detect incorrect usage by keeping track of exactly what pages are locked.
for (auto page = start; page < end; ++page) {
auto erased_count = locked_pages_.erase(page);
DCHECK_EQ(1u, erased_count);
}
DCHECK_EQ(locked_pages_.size(), locked_page_count_);
#endif
// Early out and avoid releasing the platform independent lock if some pages
// are still locked.
if (locked_page_count_)
return;
Time current_time = Now();
DCHECK(!current_time.is_null());
SharedState old_state(SharedState::LOCKED, Time());
SharedState new_state(SharedState::UNLOCKED, current_time);
// Note: timestamp cannot be NULL as that is a unique value used when
// locked or purged.
DCHECK(!new_state.GetTimestamp().is_null());
// Timestamp precision should at least be accurate to the second.
DCHECK_EQ((new_state.GetTimestamp() - Time::UnixEpoch()).InSeconds(),
(current_time - Time::UnixEpoch()).InSeconds());
SharedState result(subtle::Release_CompareAndSwap(
&SharedStateFromSharedMemory(shared_memory_mapping_)->value.i,
old_state.value.i, new_state.value.i));
DCHECK_EQ(old_state.value.u, result.value.u);
last_known_usage_ = current_time;
}
void* DiscardableSharedMemory::memory() const {
return static_cast<uint8_t*>(shared_memory_mapping_.memory()) +
AlignToPageSize(sizeof(SharedState));
}
bool DiscardableSharedMemory::Purge(Time current_time) {
// Calls to this function must be synchronized properly.
DFAKE_SCOPED_LOCK(thread_collision_warner_);
DCHECK(shared_memory_mapping_.IsValid());
SharedState old_state(SharedState::UNLOCKED, last_known_usage_);
SharedState new_state(SharedState::UNLOCKED, Time());
SharedState result(subtle::Acquire_CompareAndSwap(
&SharedStateFromSharedMemory(shared_memory_mapping_)->value.i,
old_state.value.i, new_state.value.i));
// Update |last_known_usage_| to |current_time| if the memory is locked. This
// allows the caller to determine if purging failed because last known usage
// was incorrect or memory was locked. In the second case, the caller should
// most likely wait for some amount of time before attempting to purge the
// the memory again.
if (result.value.u != old_state.value.u) {
last_known_usage_ = result.GetLockState() == SharedState::LOCKED
? current_time
: result.GetTimestamp();
return false;
}
// The next section will release as much resource as can be done
// from the purging process, until the client process notices the
// purge and releases its own references.
// Note: this memory will not be accessed again. The segment will be
// freed asynchronously at a later time, so just do the best
// immediately.
#if defined(OS_POSIX) && !defined(OS_NACL)
// Linux and Android provide MADV_REMOVE which is preferred as it has a
// behavior that can be verified in tests. Other POSIX flavors (MacOSX, BSDs),
// provide MADV_FREE which has the same result but memory is purged lazily.
#if defined(OS_LINUX) || defined(OS_ANDROID)
#define MADV_PURGE_ARGUMENT MADV_REMOVE
#elif defined(OS_MACOSX)
// MADV_FREE_REUSABLE is similar to MADV_FREE, but also marks the pages with the
// reusable bit, which allows both Activity Monitor and memory-infra to
// correctly track the pages.
#define MADV_PURGE_ARGUMENT MADV_FREE_REUSABLE
#else
#define MADV_PURGE_ARGUMENT MADV_FREE
#endif
// Advise the kernel to remove resources associated with purged pages.
// Subsequent accesses of memory pages will succeed, but might result in
// zero-fill-on-demand pages.
if (madvise(static_cast<char*>(shared_memory_mapping_.memory()) +
AlignToPageSize(sizeof(SharedState)),
AlignToPageSize(mapped_size_), MADV_PURGE_ARGUMENT)) {
DPLOG(ERROR) << "madvise() failed";
}
#elif defined(OS_WIN)
// On Windows, discarded pages are not returned to the system immediately and
// not guaranteed to be zeroed when returned to the application.
using DiscardVirtualMemoryFunction =
DWORD(WINAPI*)(PVOID virtualAddress, SIZE_T size);
static DiscardVirtualMemoryFunction discard_virtual_memory =
reinterpret_cast<DiscardVirtualMemoryFunction>(GetProcAddress(
GetModuleHandle(L"Kernel32.dll"), "DiscardVirtualMemory"));
char* address = static_cast<char*>(shared_memory_mapping_.memory()) +
AlignToPageSize(sizeof(SharedState));
size_t length = AlignToPageSize(mapped_size_);
// Use DiscardVirtualMemory when available because it releases faster than
// MEM_RESET.
DWORD ret = ERROR_NOT_SUPPORTED;
if (discard_virtual_memory) {
ret = discard_virtual_memory(address, length);
}
// DiscardVirtualMemory is buggy in Win10 SP0, so fall back to MEM_RESET on
// failure.
if (ret != ERROR_SUCCESS) {
void* ptr = VirtualAlloc(address, length, MEM_RESET, PAGE_READWRITE);
CHECK(ptr);
}
#elif defined(OS_FUCHSIA)
// De-commit via our VMAR, rather than relying on the VMO handle, since the
// handle may have been closed after the memory was mapped into this process.
uint64_t address_int = reinterpret_cast<uint64_t>(
static_cast<char*>(shared_memory_mapping_.memory()) +
AlignToPageSize(sizeof(SharedState)));
zx_status_t status = zx::vmar::root_self()->op_range(
ZX_VMO_OP_DECOMMIT, address_int, AlignToPageSize(mapped_size_), nullptr,
0);
ZX_DCHECK(status == ZX_OK, status) << "zx_vmo_op_range(ZX_VMO_OP_DECOMMIT)";
#endif // defined(OS_FUCHSIA)
last_known_usage_ = Time();
return true;
}
bool DiscardableSharedMemory::IsMemoryResident() const {
DCHECK(shared_memory_mapping_.IsValid());
SharedState result(subtle::NoBarrier_Load(
&SharedStateFromSharedMemory(shared_memory_mapping_)->value.i));
return result.GetLockState() == SharedState::LOCKED ||
!result.GetTimestamp().is_null();
}
bool DiscardableSharedMemory::IsMemoryLocked() const {
DCHECK(shared_memory_mapping_.IsValid());
SharedState result(subtle::NoBarrier_Load(
&SharedStateFromSharedMemory(shared_memory_mapping_)->value.i));
return result.GetLockState() == SharedState::LOCKED;
}
void DiscardableSharedMemory::Close() {
shared_memory_region_ = UnsafeSharedMemoryRegion();
}
void DiscardableSharedMemory::CreateSharedMemoryOwnershipEdge(
trace_event::MemoryAllocatorDump* local_segment_dump,
trace_event::ProcessMemoryDump* pmd,
bool is_owned) const {
auto* shared_memory_dump = SharedMemoryTracker::GetOrCreateSharedMemoryDump(
shared_memory_mapping_, pmd);
// TODO(ssid): Clean this by a new api to inherit size of parent dump once the
// we send the full PMD and calculate sizes inside chrome, crbug.com/704203.
size_t resident_size = shared_memory_dump->GetSizeInternal();
local_segment_dump->AddScalar(trace_event::MemoryAllocatorDump::kNameSize,
trace_event::MemoryAllocatorDump::kUnitsBytes,
resident_size);
// By creating an edge with a higher |importance| (w.r.t non-owned dumps)
// the tracing UI will account the effective size of the segment to the
// client instead of manager.
// TODO(ssid): Define better constants in MemoryAllocatorDump for importance
// values, crbug.com/754793.
const int kImportance = is_owned ? 2 : 0;
auto shared_memory_guid = shared_memory_mapping_.guid();
local_segment_dump->AddString("id", "hash", shared_memory_guid.ToString());
// Owned discardable segments which are allocated by client process, could
// have been cleared by the discardable manager. So, the segment need not
// exist in memory and weak dumps are created to indicate the UI that the dump
// should exist only if the manager also created the global dump edge.
if (is_owned) {
pmd->CreateWeakSharedMemoryOwnershipEdge(local_segment_dump->guid(),
shared_memory_guid, kImportance);
} else {
pmd->CreateSharedMemoryOwnershipEdge(local_segment_dump->guid(),
shared_memory_guid, kImportance);
}
}
// static
DiscardableSharedMemory::LockResult DiscardableSharedMemory::LockPages(
const UnsafeSharedMemoryRegion& region,
size_t offset,
size_t length) {
#if defined(OS_ANDROID)
if (region.IsValid()) {
if (UseAshmemUnpinningForDiscardableMemory()) {
int pin_result =
ashmem_pin_region(region.GetPlatformHandle(), offset, length);
if (pin_result == ASHMEM_WAS_PURGED)
return PURGED;
if (pin_result < 0)
return FAILED;
}
}
#endif
return SUCCESS;
}
// static
void DiscardableSharedMemory::UnlockPages(
const UnsafeSharedMemoryRegion& region,
size_t offset,
size_t length) {
#if defined(OS_ANDROID)
if (region.IsValid()) {
if (UseAshmemUnpinningForDiscardableMemory()) {
int unpin_result =
ashmem_unpin_region(region.GetPlatformHandle(), offset, length);
DCHECK_EQ(0, unpin_result);
}
}
#endif
}
Time DiscardableSharedMemory::Now() const {
return Time::Now();
}
#if defined(OS_ANDROID)
// static
bool DiscardableSharedMemory::IsAshmemDeviceSupportedForTesting() {
return UseAshmemUnpinningForDiscardableMemory();
}
#endif
} // namespace base

View file

@ -0,0 +1,193 @@
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_MEMORY_DISCARDABLE_SHARED_MEMORY_H_
#define BASE_MEMORY_DISCARDABLE_SHARED_MEMORY_H_
#include <stddef.h>
#include "base/base_export.h"
#include "base/logging.h"
#include "base/macros.h"
#include "base/memory/shared_memory_mapping.h"
#include "base/memory/unsafe_shared_memory_region.h"
#include "base/threading/thread_collision_warner.h"
#include "base/time/time.h"
#include "build/build_config.h"
#if DCHECK_IS_ON()
#include <set>
#endif
// Linux (including Android) support the MADV_REMOVE argument with madvise()
// which has the behavior of reliably causing zero-fill-on-demand pages to
// be returned after a call. Here we define
// DISCARDABLE_SHARED_MEMORY_ZERO_FILL_ON_DEMAND_PAGES_AFTER_PURGE on Linux
// and Android to indicate that this type of behavior can be expected on
// those platforms. Note that madvise() will still be used on other POSIX
// platforms but doesn't provide the zero-fill-on-demand pages guarantee.
#if defined(OS_LINUX) || defined(OS_ANDROID)
#define DISCARDABLE_SHARED_MEMORY_ZERO_FILL_ON_DEMAND_PAGES_AFTER_PURGE
#endif
namespace base {
namespace trace_event {
class MemoryAllocatorDump;
class ProcessMemoryDump;
} // namespace trace_event
// Platform abstraction for discardable shared memory.
//
// This class is not thread-safe. Clients are responsible for synchronizing
// access to an instance of this class.
class BASE_EXPORT DiscardableSharedMemory {
public:
enum LockResult { SUCCESS, PURGED, FAILED };
DiscardableSharedMemory();
// Create a new DiscardableSharedMemory object from an existing, open shared
// memory file. Memory must be locked.
explicit DiscardableSharedMemory(UnsafeSharedMemoryRegion region);
// Closes any open files.
virtual ~DiscardableSharedMemory();
// Creates and maps a locked DiscardableSharedMemory object with |size|.
// Returns true on success and false on failure.
bool CreateAndMap(size_t size);
// Maps the locked discardable memory into the caller's address space.
// Returns true on success, false otherwise.
bool Map(size_t size);
// Unmaps the discardable shared memory from the caller's address space.
// Unmapping won't unlock previously locked range.
// Returns true if successful; returns false on error or if the memory is
// not mapped.
bool Unmap();
// The actual size of the mapped memory (may be larger than requested).
size_t mapped_size() const { return mapped_size_; }
// Returns a duplicated shared memory region for this DiscardableSharedMemory
// object.
UnsafeSharedMemoryRegion DuplicateRegion() const {
return shared_memory_region_.Duplicate();
}
// Returns an ID for the shared memory region. This is ID of the mapped region
// consistent across all processes and is valid as long as the region is not
// unmapped.
const UnguessableToken& mapped_id() const {
return shared_memory_mapping_.guid();
}
// Locks a range of memory so that it will not be purged by the system.
// The range of memory must be unlocked. The result of trying to lock an
// already locked range is undefined. |offset| and |length| must both be
// a multiple of the page size as returned by GetPageSize().
// Passing 0 for |length| means "everything onward".
// Returns SUCCESS if range was successfully locked and the memory is still
// resident, PURGED if range was successfully locked but has been purged
// since last time it was locked and FAILED if range could not be locked.
// Locking can fail for two reasons; object might have been purged, our
// last known usage timestamp might be out of date. Last known usage time
// is updated to the actual last usage timestamp if memory is still resident
// or 0 if not.
LockResult Lock(size_t offset, size_t length);
// Unlock a previously successfully locked range of memory. The range of
// memory must be locked. The result of trying to unlock a not
// previously locked range is undefined.
// |offset| and |length| must both be a multiple of the page size as returned
// by GetPageSize().
// Passing 0 for |length| means "everything onward".
void Unlock(size_t offset, size_t length);
// Gets a pointer to the opened discardable memory space. Discardable memory
// must have been mapped via Map().
void* memory() const;
// Returns the last known usage time for DiscardableSharedMemory object. This
// may be earlier than the "true" usage time when memory has been used by a
// different process. Returns NULL time if purged.
Time last_known_usage() const { return last_known_usage_; }
// This returns true and sets |last_known_usage_| to 0 if
// DiscardableSharedMemory object was successfully purged. Purging can fail
// for two reasons; object might be locked or our last known usage timestamp
// might be out of date. Last known usage time is updated to |current_time|
// if locked or the actual last usage timestamp if unlocked. It is often
// necessary to call this function twice for the object to successfully be
// purged. First call, updates |last_known_usage_|. Second call, successfully
// purges the object using the updated |last_known_usage_|.
// Note: there is no guarantee that multiple calls to this function will
// successfully purge object. DiscardableSharedMemory object might be locked
// or another thread/process might be able to lock and unlock it in between
// each call.
bool Purge(Time current_time);
// Returns true if memory is still resident.
bool IsMemoryResident() const;
// Returns true if memory is locked.
bool IsMemoryLocked() const;
// Closes the open discardable memory segment.
// It is safe to call Close repeatedly.
void Close();
// For tracing: Creates ownership edge to the underlying shared memory dump
// which is cross process in the given |pmd|. |local_segment_dump| is the dump
// associated with the local discardable shared memory segment and |is_owned|
// is true when the current process owns the segment and the effective memory
// is assigned to the current process.
void CreateSharedMemoryOwnershipEdge(
trace_event::MemoryAllocatorDump* local_segment_dump,
trace_event::ProcessMemoryDump* pmd,
bool is_owned) const;
#if defined(OS_ANDROID)
// Returns true if the Ashmem device is supported on this system.
// Only use this for unit-testing.
static bool IsAshmemDeviceSupportedForTesting();
#endif
private:
// LockPages/UnlockPages are platform-native discardable page management
// helper functions. Both expect |offset| to be specified relative to the
// base address at which |memory| is mapped, and that |offset| and |length|
// are page-aligned by the caller.
// Returns SUCCESS on platforms which do not support discardable pages.
static LockResult LockPages(const UnsafeSharedMemoryRegion& region,
size_t offset,
size_t length);
// UnlockPages() is a no-op on platforms not supporting discardable pages.
static void UnlockPages(const UnsafeSharedMemoryRegion& region,
size_t offset,
size_t length);
// Virtual for tests.
virtual Time Now() const;
UnsafeSharedMemoryRegion shared_memory_region_;
WritableSharedMemoryMapping shared_memory_mapping_;
size_t mapped_size_;
size_t locked_page_count_;
#if DCHECK_IS_ON()
std::set<size_t> locked_pages_;
#endif
// Implementation is not thread-safe but still usable if clients are
// synchronized somehow. Use a collision warner to detect incorrect usage.
DFAKE_MUTEX(thread_collision_warner_);
Time last_known_usage_;
DISALLOW_COPY_AND_ASSIGN(DiscardableSharedMemory);
};
} // namespace base
#endif // BASE_MEMORY_DISCARDABLE_SHARED_MEMORY_H_

View file

@ -0,0 +1,25 @@
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_MEMORY_FREE_DELETER_H_
#define BASE_MEMORY_FREE_DELETER_H_
#include <stdlib.h>
namespace base {
// Function object which invokes 'free' on its parameter, which must be
// a pointer. Can be used to store malloc-allocated pointers in std::unique_ptr:
//
// std::unique_ptr<int, base::FreeDeleter> foo_ptr(
// static_cast<int*>(malloc(sizeof(int))));
struct FreeDeleter {
inline void operator()(void* ptr) const {
free(ptr);
}
};
} // namespace base
#endif // BASE_MEMORY_FREE_DELETER_H_

View file

@ -0,0 +1,60 @@
// Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <inttypes.h>
#include <sys/mman.h>
#include "base/memory/madv_free_discardable_memory_allocator_posix.h"
#include "base/process/process_metrics.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/stringprintf.h"
#include "base/threading/thread_task_runner_handle.h"
#include "base/trace_event/memory_dump_manager.h"
namespace base {
MadvFreeDiscardableMemoryAllocatorPosix::
MadvFreeDiscardableMemoryAllocatorPosix() {
// Don't register dump provider if ThreadTaskRunnerHandle is not set, such as
// in tests and Android Webview.
if (base::ThreadTaskRunnerHandle::IsSet()) {
trace_event::MemoryDumpManager::GetInstance()->RegisterDumpProvider(
this, "MadvFreeDiscardableMemoryAllocator",
ThreadTaskRunnerHandle::Get());
}
}
MadvFreeDiscardableMemoryAllocatorPosix::
~MadvFreeDiscardableMemoryAllocatorPosix() {
trace_event::MemoryDumpManager::GetInstance()->UnregisterDumpProvider(this);
}
std::unique_ptr<DiscardableMemory>
MadvFreeDiscardableMemoryAllocatorPosix::AllocateLockedDiscardableMemory(
size_t size) {
return std::make_unique<MadvFreeDiscardableMemoryPosix>(size,
&bytes_allocated_);
}
size_t MadvFreeDiscardableMemoryAllocatorPosix::GetBytesAllocated() const {
return bytes_allocated_;
}
bool MadvFreeDiscardableMemoryAllocatorPosix::OnMemoryDump(
const trace_event::MemoryDumpArgs& args,
trace_event::ProcessMemoryDump* pmd) {
if (args.level_of_detail !=
base::trace_event::MemoryDumpLevelOfDetail::BACKGROUND) {
return true;
}
base::trace_event::MemoryAllocatorDump* total_dump =
pmd->CreateAllocatorDump("discardable/madv_free_allocated");
total_dump->AddScalar(base::trace_event::MemoryAllocatorDump::kNameSize,
base::trace_event::MemoryAllocatorDump::kUnitsBytes,
GetBytesAllocated());
return true;
}
} // namespace base

View file

@ -0,0 +1,52 @@
// Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_MEMORY_MADV_FREE_DISCARDABLE_MEMORY_ALLOCATOR_POSIX_H_
#define BASE_MEMORY_MADV_FREE_DISCARDABLE_MEMORY_ALLOCATOR_POSIX_H_
#include <stddef.h>
#include <atomic>
#include <memory>
#include "base/base_export.h"
#include "base/bind.h"
#include "base/callback.h"
#include "base/logging.h"
#include "base/macros.h"
#include "base/memory/discardable_memory.h"
#include "base/memory/discardable_memory_allocator.h"
#include "base/memory/madv_free_discardable_memory_posix.h"
#include "base/trace_event/memory_dump_provider.h"
#include "build/build_config.h"
namespace base {
class BASE_EXPORT MadvFreeDiscardableMemoryAllocatorPosix
: public DiscardableMemoryAllocator,
public base::trace_event::MemoryDumpProvider {
public:
MadvFreeDiscardableMemoryAllocatorPosix();
~MadvFreeDiscardableMemoryAllocatorPosix() override;
std::unique_ptr<DiscardableMemory> AllocateLockedDiscardableMemory(
size_t size) override;
size_t GetBytesAllocated() const override;
void ReleaseFreeMemory() override {
// Do nothing, since MADV_FREE discardable memory does not keep any memory
// overhead that can be released.
}
bool OnMemoryDump(const trace_event::MemoryDumpArgs& args,
trace_event::ProcessMemoryDump* pmd) override;
private:
std::atomic<size_t> bytes_allocated_{0};
DISALLOW_COPY_AND_ASSIGN(MadvFreeDiscardableMemoryAllocatorPosix);
};
} // namespace base
#endif // BASE_MEMORY_MADV_FREE_DISCARDABLE_MEMORY_ALLOCATOR_POSIX_H_

View file

@ -0,0 +1,327 @@
// Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <errno.h>
#include <inttypes.h>
#include <sys/mman.h>
#include <sys/types.h>
#include <sys/utsname.h>
#include <atomic>
#include "base/atomicops.h"
#include "base/bits.h"
#include "base/callback.h"
#include "base/memory/madv_free_discardable_memory_allocator_posix.h"
#include "base/memory/madv_free_discardable_memory_posix.h"
#include "base/process/process_metrics.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/stringprintf.h"
#include "base/trace_event/memory_allocator_dump.h"
#include "base/trace_event/memory_dump_manager.h"
#if defined(ADDRESS_SANITIZER)
#include <sanitizer/asan_interface.h>
#endif // defined(ADDRESS_SANITIZER)
namespace {
constexpr intptr_t kPageMagicCookie = 1;
void* AllocatePages(size_t size_in_pages) {
void* data = mmap(nullptr, size_in_pages * base::GetPageSize(),
PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
PCHECK(data != MAP_FAILED);
return data;
}
// Checks if the system supports usage of MADV_FREE as a backing for discardable
// memory.
base::MadvFreeSupport ProbePlatformMadvFreeSupport() {
// Note: If the compiling system does not have headers for Linux 4.5+, then
// the MADV_FREE define will not exist and the probe will default to
// unsupported, regardless of whether the target system actually supports
// MADV_FREE.
#if !defined(OS_MACOSX) && defined(MADV_FREE)
uint8_t* dummy_page = static_cast<uint8_t*>(AllocatePages(1));
dummy_page[0] = 1;
base::MadvFreeSupport support = base::MadvFreeSupport::kUnsupported;
// Check if the MADV_FREE advice value exists.
int retval = madvise(dummy_page, base::GetPageSize(), MADV_FREE);
if (!retval) {
// For Linux 4.5 to 4.12, MADV_FREE on a swapless system will lead to memory
// being immediately discarded. Verify that the memory was not discarded.
if (dummy_page[0]) {
support = base::MadvFreeSupport::kSupported;
}
}
PCHECK(!munmap(dummy_page, base::GetPageSize()));
return support;
#endif
return base::MadvFreeSupport::kUnsupported;
}
} // namespace
namespace base {
MadvFreeDiscardableMemoryPosix::MadvFreeDiscardableMemoryPosix(
size_t size_in_bytes,
std::atomic<size_t>* allocator_byte_count)
: size_in_bytes_(size_in_bytes),
allocated_pages_((size_in_bytes_ + base::GetPageSize() - 1) /
base::GetPageSize()),
allocator_byte_count_(allocator_byte_count),
page_first_word_((size_in_bytes_ + base::GetPageSize() - 1) /
base::GetPageSize()) {
data_ = AllocatePages(allocated_pages_);
(*allocator_byte_count_) += size_in_bytes_;
}
MadvFreeDiscardableMemoryPosix::~MadvFreeDiscardableMemoryPosix() {
if (Deallocate()) {
DVLOG(1) << "Region evicted during destructor with " << allocated_pages_
<< " pages";
}
}
bool MadvFreeDiscardableMemoryPosix::Lock() {
DFAKE_SCOPED_LOCK(thread_collision_warner_);
DCHECK(!is_locked_);
// Locking fails if the memory has been deallocated.
if (!data_)
return false;
#if defined(ADDRESS_SANITIZER)
// We need to unpoison here since locking pages writes to them.
// Note that even if locking fails, we want to unpoison anyways after
// deallocation.
ASAN_UNPOISON_MEMORY_REGION(data_, allocated_pages_ * base::GetPageSize());
#endif // defined(ADDRESS_SANITIZER)
size_t page_index;
for (page_index = 0; page_index < allocated_pages_; ++page_index) {
if (!LockPage(page_index))
break;
}
if (page_index < allocated_pages_) {
DVLOG(1) << "Region eviction discovered during lock with "
<< allocated_pages_ << " pages";
Deallocate();
return false;
}
DCHECK(IsResident());
is_locked_ = true;
return true;
}
void MadvFreeDiscardableMemoryPosix::Unlock() {
DFAKE_SCOPED_LOCK(thread_collision_warner_);
DCHECK(is_locked_);
DCHECK(data_ != nullptr);
for (size_t page_index = 0; page_index < allocated_pages_; ++page_index) {
UnlockPage(page_index);
}
#ifdef MADV_FREE
if (!keep_memory_for_testing_) {
int retval =
madvise(data_, allocated_pages_ * base::GetPageSize(), MADV_FREE);
DPCHECK(!retval);
}
#endif
#if defined(ADDRESS_SANITIZER)
ASAN_POISON_MEMORY_REGION(data_, allocated_pages_ * base::GetPageSize());
#endif // defined(ADDRESS_SANITIZER)
is_locked_ = false;
}
void* MadvFreeDiscardableMemoryPosix::data() const {
DFAKE_SCOPED_LOCK(thread_collision_warner_);
DCHECK(is_locked_);
DCHECK(data_ != nullptr);
return data_;
}
bool MadvFreeDiscardableMemoryPosix::LockPage(size_t page_index) {
// We require the byte-level representation of std::atomic<intptr_t> to be
// equivalent to that of an intptr_t. Since std::atomic<intptr_t> has standard
// layout, having equal size is sufficient but not necessary for them to have
// the same byte-level representation.
static_assert(sizeof(intptr_t) == sizeof(std::atomic<intptr_t>),
"Incompatible layout of std::atomic.");
DCHECK(std::atomic<intptr_t>{}.is_lock_free());
std::atomic<intptr_t>* page_as_atomic =
reinterpret_cast<std::atomic<intptr_t>*>(
static_cast<uint8_t*>(data_) + page_index * base::GetPageSize());
intptr_t expected = kPageMagicCookie;
// Recall that we set the first word of the page to |kPageMagicCookie|
// (non-zero) during unlocking. Thus, if the value has changed, the page has
// been discarded. Restore the page's original first word from before
// unlocking only if the page has not been discarded.
if (!std::atomic_compare_exchange_strong_explicit(
page_as_atomic, &expected,
static_cast<intptr_t>(page_first_word_[page_index]),
std::memory_order_relaxed, std::memory_order_relaxed)) {
return false;
}
return true;
}
void MadvFreeDiscardableMemoryPosix::UnlockPage(size_t page_index) {
DCHECK(std::atomic<intptr_t>{}.is_lock_free());
std::atomic<intptr_t>* page_as_atomic =
reinterpret_cast<std::atomic<intptr_t>*>(
static_cast<uint8_t*>(data_) + page_index * base::GetPageSize());
// Store the first word of the page for use during unlocking.
page_first_word_[page_index].store(*page_as_atomic,
std::memory_order_relaxed);
// Store a non-zero value into the first word of the page, so we can tell when
// the page is discarded during locking.
page_as_atomic->store(kPageMagicCookie, std::memory_order_relaxed);
}
void MadvFreeDiscardableMemoryPosix::DiscardPage(size_t page_index) {
DFAKE_SCOPED_LOCK(thread_collision_warner_);
DCHECK(!is_locked_);
DCHECK(page_index < allocated_pages_);
int retval =
madvise(static_cast<uint8_t*>(data_) + base::GetPageSize() * page_index,
base::GetPageSize(), MADV_DONTNEED);
DPCHECK(!retval);
}
bool MadvFreeDiscardableMemoryPosix::IsLockedForTesting() const {
DFAKE_SCOPED_LOCK(thread_collision_warner_);
return is_locked_;
}
void MadvFreeDiscardableMemoryPosix::DiscardForTesting() {
DFAKE_SCOPED_LOCK(thread_collision_warner_);
DCHECK(!is_locked_);
int retval =
madvise(data_, base::GetPageSize() * allocated_pages_, MADV_DONTNEED);
DPCHECK(!retval);
}
trace_event::MemoryAllocatorDump*
MadvFreeDiscardableMemoryPosix::CreateMemoryAllocatorDump(
const char* name,
trace_event::ProcessMemoryDump* pmd) const {
DFAKE_SCOPED_LOCK(thread_collision_warner_);
using base::trace_event::MemoryAllocatorDump;
std::string allocator_dump_name = base::StringPrintf(
"discardable/segment_0x%" PRIXPTR, reinterpret_cast<uintptr_t>(this));
MemoryAllocatorDump* allocator_dump =
pmd->CreateAllocatorDump(allocator_dump_name);
bool is_discarded = IsDiscarded();
MemoryAllocatorDump* dump = pmd->CreateAllocatorDump(name);
// The effective_size is the amount of unused space as a result of being
// page-aligned.
dump->AddScalar(MemoryAllocatorDump::kNameSize,
MemoryAllocatorDump::kUnitsBytes,
is_discarded ? 0U : static_cast<uint64_t>(size_in_bytes_));
allocator_dump->AddScalar(
MemoryAllocatorDump::kNameSize, MemoryAllocatorDump::kUnitsBytes,
is_discarded
? 0U
: static_cast<uint64_t>(allocated_pages_ * base::GetPageSize()));
allocator_dump->AddScalar(MemoryAllocatorDump::kNameObjectCount,
MemoryAllocatorDump::kUnitsObjects, 1U);
allocator_dump->AddScalar(
"wasted_size", MemoryAllocatorDump::kUnitsBytes,
static_cast<uint64_t>(allocated_pages_ * base::GetPageSize() -
size_in_bytes_));
allocator_dump->AddScalar("locked_size", MemoryAllocatorDump::kUnitsBytes,
is_locked_ ? size_in_bytes_ : 0U);
allocator_dump->AddScalar("page_count", MemoryAllocatorDump::kUnitsObjects,
static_cast<uint64_t>(allocated_pages_));
// The amount of space that is discarded, but not unmapped (i.e. the memory
// was discarded while unlocked, but the pages are still mapped in memory
// since Deallocate() has not been called yet). This instance is discarded if
// it is unlocked and not all pages are resident in memory.
allocator_dump->AddScalar(
"discarded_size", MemoryAllocatorDump::kUnitsBytes,
is_discarded ? allocated_pages_ * base::GetPageSize() : 0U);
pmd->AddSuballocation(dump->guid(), allocator_dump_name);
return dump;
}
bool MadvFreeDiscardableMemoryPosix::IsValid() const {
DFAKE_SCOPED_RECURSIVE_LOCK(thread_collision_warner_);
return data_ != nullptr;
}
void MadvFreeDiscardableMemoryPosix::SetKeepMemoryForTesting(bool keep_memory) {
DFAKE_SCOPED_LOCK(thread_collision_warner_);
DCHECK(is_locked_);
keep_memory_for_testing_ = keep_memory;
}
bool MadvFreeDiscardableMemoryPosix::IsResident() const {
DFAKE_SCOPED_RECURSIVE_LOCK(thread_collision_warner_);
#ifdef OS_MACOSX
std::vector<char> vec(allocated_pages_);
#else
std::vector<unsigned char> vec(allocated_pages_);
#endif
int retval =
mincore(data_, allocated_pages_ * base::GetPageSize(), vec.data());
DPCHECK(retval == 0 || errno == EAGAIN);
for (size_t i = 0; i < allocated_pages_; ++i) {
if (!(vec[i] & 1))
return false;
}
return true;
}
bool MadvFreeDiscardableMemoryPosix::IsDiscarded() const {
return !is_locked_ && !IsResident();
}
bool MadvFreeDiscardableMemoryPosix::Deallocate() {
DFAKE_SCOPED_RECURSIVE_LOCK(thread_collision_warner_);
if (data_) {
#if defined(ADDRESS_SANITIZER)
ASAN_UNPOISON_MEMORY_REGION(data_, allocated_pages_ * base::GetPageSize());
#endif // defined(ADDRESS_SANITIZER)
int retval = munmap(data_, allocated_pages_ * base::GetPageSize());
PCHECK(!retval);
data_ = nullptr;
(*allocator_byte_count_) -= size_in_bytes_;
return true;
}
return false;
}
MadvFreeSupport GetMadvFreeSupport() {
static MadvFreeSupport kMadvFreeSupport = ProbePlatformMadvFreeSupport();
return kMadvFreeSupport;
}
} // namespace base

View file

@ -0,0 +1,128 @@
// Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_MEMORY_MADV_FREE_DISCARDABLE_MEMORY_POSIX_H_
#define BASE_MEMORY_MADV_FREE_DISCARDABLE_MEMORY_POSIX_H_
#include <stddef.h>
#include <atomic>
#include <cstdint>
#include "base/base_export.h"
#include "base/callback.h"
#include "base/logging.h"
#include "base/macros.h"
#include "base/memory/discardable_memory.h"
#include "base/sequence_checker.h"
#include "base/threading/thread_collision_warner.h"
#include "build/build_config.h"
namespace base {
// Discardable memory backed by the MADV_FREE advice value, available since
// Linux 4.5.
//
// When unlocked, this implementation of discardable memory will
// apply the MADV_FREE advice value to all pages within the allocated range,
// causing pages to be discarded instead of swapped upon memory pressure.
// When pages are discarded, they become zero-fill-on-demand pages.
// Attempting to unlock an already-unlocked instance is undefined behaviour.
//
// When locked, all pages will be checked for eviction. If any page has
// been discarded, the entire allocated range is unmapped and the lock fails.
// After a failed lock, the instance remains unlocked but any further attempts
// to lock will fail. Additionally, the discardable memory instance is
// invalidated and access to memory obtained via data() is undefined behaviour.
// Attempting to lock an already-locked instance is undefined behaviour. If no
// page in the allocated range has been discarded, then lock succeeds and the
// allocated range of memory is available for use without any page fault,
// additional allocations, or memory zeroing.
//
// If DCHECK_IS_ON(), additional checks are added to ensure that the discardable
// memory instance is being used correctly. These checks are not present by
// default, as some incur a significant performance penalty or do not warrant
// crashing the process. These checks are:
// - Do not allow lock while already locked or unlock while already unlocked
// - Do not allow memory access via data() if instance is deallocated after
// Lock() (although invalid memory can still be accessed through existing
// pointers)
// - After Unlock(), disallow read or write of memory pointed to by data()
// with PROT_NONE until next Lock()
//
// Caveats:
// [1]: The smallest allocation unit is the size of a page, so it is
// unsuitable for small allocations.
//
// [2]: The size of a discardable memory instance must be greater than 0 bytes.
//
class BASE_EXPORT MadvFreeDiscardableMemoryPosix : public DiscardableMemory {
public:
MadvFreeDiscardableMemoryPosix(size_t size_in_pages,
std::atomic<size_t>* allocator_byte_count);
~MadvFreeDiscardableMemoryPosix() override;
bool Lock() override;
void Unlock() override;
void* data() const override;
bool IsLockedForTesting() const;
void DiscardForTesting() override;
trace_event::MemoryAllocatorDump* CreateMemoryAllocatorDump(
const char* name,
trace_event::ProcessMemoryDump* pmd) const override;
protected:
size_t GetPageCount() const { return allocated_pages_; }
bool IsValid() const;
void SetKeepMemoryForTesting(bool keep_memory);
// Force page discard by applying MADV_DONTNEED hint on a page.
// Has the same effect as if the page was naturally discarded during
// memory pressure due to MADV_FREE (i.e. zero-fill-on-demand pages for
// anonymous private mappings).
// Note that MADV_DONTNEED takes effect immediately for non-shared mappings.
void DiscardPage(size_t page_index);
private:
bool LockPage(size_t page_index);
void UnlockPage(size_t page_index);
bool Deallocate();
// Gets whether this instance has been discarded (but not yet unmapped).
bool IsDiscarded() const;
// Get whether all pages in this discardable memory instance are resident.
bool IsResident() const;
const size_t size_in_bytes_;
const size_t allocated_pages_;
// Pointer to allocator memory usage metric for updating upon allocation and
// destruction.
std::atomic<size_t>* allocator_byte_count_;
void* data_;
bool is_locked_ = true;
// If true, MADV_FREE will not be set on Unlock().
bool keep_memory_for_testing_ = false;
// Stores the first word of a page for use during locking.
std::vector<std::atomic<intptr_t>> page_first_word_;
DFAKE_MUTEX(thread_collision_warner_);
DISALLOW_COPY_AND_ASSIGN(MadvFreeDiscardableMemoryPosix);
};
enum class MadvFreeSupport { kUnsupported, kSupported };
BASE_EXPORT MadvFreeSupport GetMadvFreeSupport();
} // namespace base
#endif // BASE_MEMORY_MADV_FREE_DISCARDABLE_MEMORY_POSIX_H_

View file

@ -0,0 +1,130 @@
// Copyright 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/memory/memory_pressure_listener.h"
#include "base/observer_list_threadsafe.h"
#include "base/trace_event/trace_event.h"
namespace base {
namespace {
// This class is thread safe and internally synchronized.
class MemoryPressureObserver {
public:
// There is at most one MemoryPressureObserver and it is never deleted.
~MemoryPressureObserver() = delete;
void AddObserver(MemoryPressureListener* listener, bool sync) {
async_observers_->AddObserver(listener);
if (sync) {
AutoLock lock(sync_observers_lock_);
sync_observers_.AddObserver(listener);
}
}
void RemoveObserver(MemoryPressureListener* listener) {
async_observers_->RemoveObserver(listener);
AutoLock lock(sync_observers_lock_);
sync_observers_.RemoveObserver(listener);
}
void Notify(
MemoryPressureListener::MemoryPressureLevel memory_pressure_level) {
async_observers_->Notify(FROM_HERE, &MemoryPressureListener::Notify,
memory_pressure_level);
AutoLock lock(sync_observers_lock_);
for (auto& observer : sync_observers_)
observer.SyncNotify(memory_pressure_level);
}
private:
const scoped_refptr<ObserverListThreadSafe<MemoryPressureListener>>
async_observers_ =
base::MakeRefCounted<ObserverListThreadSafe<MemoryPressureListener>>(
ObserverListPolicy::EXISTING_ONLY);
ObserverList<MemoryPressureListener>::Unchecked sync_observers_;
Lock sync_observers_lock_;
};
// Gets the shared MemoryPressureObserver singleton instance.
MemoryPressureObserver* GetMemoryPressureObserver() {
static auto* const observer = new MemoryPressureObserver();
return observer;
}
subtle::Atomic32 g_notifications_suppressed = 0;
} // namespace
MemoryPressureListener::MemoryPressureListener(
const MemoryPressureListener::MemoryPressureCallback& callback)
: callback_(callback) {
GetMemoryPressureObserver()->AddObserver(this, false);
}
MemoryPressureListener::MemoryPressureListener(
const MemoryPressureListener::MemoryPressureCallback& callback,
const MemoryPressureListener::SyncMemoryPressureCallback&
sync_memory_pressure_callback)
: callback_(callback),
sync_memory_pressure_callback_(sync_memory_pressure_callback) {
GetMemoryPressureObserver()->AddObserver(this, true);
}
MemoryPressureListener::~MemoryPressureListener() {
GetMemoryPressureObserver()->RemoveObserver(this);
}
void MemoryPressureListener::Notify(MemoryPressureLevel memory_pressure_level) {
callback_.Run(memory_pressure_level);
}
void MemoryPressureListener::SyncNotify(
MemoryPressureLevel memory_pressure_level) {
if (!sync_memory_pressure_callback_.is_null()) {
sync_memory_pressure_callback_.Run(memory_pressure_level);
}
}
// static
void MemoryPressureListener::NotifyMemoryPressure(
MemoryPressureLevel memory_pressure_level) {
DCHECK_NE(memory_pressure_level, MEMORY_PRESSURE_LEVEL_NONE);
TRACE_EVENT_INSTANT1(TRACE_DISABLED_BY_DEFAULT("memory-infra"),
"MemoryPressureListener::NotifyMemoryPressure",
TRACE_EVENT_SCOPE_THREAD, "level",
memory_pressure_level);
if (AreNotificationsSuppressed())
return;
DoNotifyMemoryPressure(memory_pressure_level);
}
// static
bool MemoryPressureListener::AreNotificationsSuppressed() {
return subtle::Acquire_Load(&g_notifications_suppressed) == 1;
}
// static
void MemoryPressureListener::SetNotificationsSuppressed(bool suppress) {
subtle::Release_Store(&g_notifications_suppressed, suppress ? 1 : 0);
}
// static
void MemoryPressureListener::SimulatePressureNotification(
MemoryPressureLevel memory_pressure_level) {
// Notify all listeners even if regular pressure notifications are suppressed.
DoNotifyMemoryPressure(memory_pressure_level);
}
// static
void MemoryPressureListener::DoNotifyMemoryPressure(
MemoryPressureLevel memory_pressure_level) {
DCHECK_NE(memory_pressure_level, MEMORY_PRESSURE_LEVEL_NONE);
GetMemoryPressureObserver()->Notify(memory_pressure_level);
}
} // namespace base

View file

@ -0,0 +1,103 @@
// Copyright 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// MemoryPressure provides static APIs for handling memory pressure on
// platforms that have such signals, such as Android and ChromeOS.
// The app will try to discard buffers that aren't deemed essential (individual
// modules will implement their own policy).
#ifndef BASE_MEMORY_MEMORY_PRESSURE_LISTENER_H_
#define BASE_MEMORY_MEMORY_PRESSURE_LISTENER_H_
#include "base/base_export.h"
#include "base/callback.h"
#include "base/macros.h"
namespace base {
// To start listening, create a new instance, passing a callback to a
// function that takes a MemoryPressureLevel parameter. To stop listening,
// simply delete the listener object. The implementation guarantees
// that the callback will always be called on the thread that created
// the listener.
// Note that even on the same thread, the callback is not guaranteed to be
// called synchronously within the system memory pressure broadcast.
// Please see notes in MemoryPressureLevel enum below: some levels are
// absolutely critical, and if not enough memory is returned to the system,
// it'll potentially kill the app, and then later the app will have to be
// cold-started.
//
// Example:
//
// void OnMemoryPressure(MemoryPressureLevel memory_pressure_level) {
// ...
// }
//
// // Start listening.
// auto listener = std::make_unique<MemoryPressureListener>(
// base::BindRepeating(&OnMemoryPressure));
//
// ...
//
// // Stop listening.
// listener.reset();
//
class BASE_EXPORT MemoryPressureListener {
public:
// A Java counterpart will be generated for this enum.
// GENERATED_JAVA_ENUM_PACKAGE: org.chromium.base
enum MemoryPressureLevel {
// No problems, there is enough memory to use. This event is not sent via
// callback, but the enum is used in other places to find out the current
// state of the system.
MEMORY_PRESSURE_LEVEL_NONE,
// Modules are advised to free buffers that are cheap to re-allocate and not
// immediately needed.
MEMORY_PRESSURE_LEVEL_MODERATE,
// At this level, modules are advised to free all possible memory. The
// alternative is to be killed by the system, which means all memory will
// have to be re-created, plus the cost of a cold start.
MEMORY_PRESSURE_LEVEL_CRITICAL,
};
using MemoryPressureCallback = RepeatingCallback<void(MemoryPressureLevel)>;
using SyncMemoryPressureCallback =
RepeatingCallback<void(MemoryPressureLevel)>;
explicit MemoryPressureListener(
const MemoryPressureCallback& memory_pressure_callback);
MemoryPressureListener(
const MemoryPressureCallback& memory_pressure_callback,
const SyncMemoryPressureCallback& sync_memory_pressure_callback);
~MemoryPressureListener();
// Intended for use by the platform specific implementation.
static void NotifyMemoryPressure(MemoryPressureLevel memory_pressure_level);
// These methods should not be used anywhere else but in memory measurement
// code, where they are intended to maintain stable conditions across
// measurements.
static bool AreNotificationsSuppressed();
static void SetNotificationsSuppressed(bool suppressed);
static void SimulatePressureNotification(
MemoryPressureLevel memory_pressure_level);
void Notify(MemoryPressureLevel memory_pressure_level);
void SyncNotify(MemoryPressureLevel memory_pressure_level);
private:
static void DoNotifyMemoryPressure(MemoryPressureLevel memory_pressure_level);
MemoryPressureCallback callback_;
SyncMemoryPressureCallback sync_memory_pressure_callback_;
DISALLOW_COPY_AND_ASSIGN(MemoryPressureListener);
};
} // namespace base
#endif // BASE_MEMORY_MEMORY_PRESSURE_LISTENER_H_

View file

@ -0,0 +1,74 @@
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/memory/memory_pressure_monitor.h"
#include "base/logging.h"
#include "base/metrics/histogram_macros.h"
namespace base {
namespace {
MemoryPressureMonitor* g_monitor = nullptr;
// Enumeration of UMA memory pressure levels. This needs to be kept in sync with
// histograms.xml and the memory pressure levels defined in
// MemoryPressureListener.
enum MemoryPressureLevelUMA {
UMA_MEMORY_PRESSURE_LEVEL_NONE = 0,
UMA_MEMORY_PRESSURE_LEVEL_MODERATE = 1,
UMA_MEMORY_PRESSURE_LEVEL_CRITICAL = 2,
// This must be the last value in the enum.
UMA_MEMORY_PRESSURE_LEVEL_COUNT,
};
// Converts a memory pressure level to an UMA enumeration value.
MemoryPressureLevelUMA MemoryPressureLevelToUmaEnumValue(
base::MemoryPressureListener::MemoryPressureLevel level) {
switch (level) {
case MemoryPressureListener::MEMORY_PRESSURE_LEVEL_NONE:
return UMA_MEMORY_PRESSURE_LEVEL_NONE;
case MemoryPressureListener::MEMORY_PRESSURE_LEVEL_MODERATE:
return UMA_MEMORY_PRESSURE_LEVEL_MODERATE;
case MemoryPressureListener::MEMORY_PRESSURE_LEVEL_CRITICAL:
return UMA_MEMORY_PRESSURE_LEVEL_CRITICAL;
}
NOTREACHED();
return UMA_MEMORY_PRESSURE_LEVEL_NONE;
}
} // namespace
const base::TimeDelta MemoryPressureMonitor::kUMAMemoryPressureLevelPeriod =
base::TimeDelta::FromSeconds(5);
MemoryPressureMonitor::MemoryPressureMonitor() {
DCHECK(!g_monitor);
g_monitor = this;
}
MemoryPressureMonitor::~MemoryPressureMonitor() {
DCHECK(g_monitor);
g_monitor = nullptr;
}
// static
MemoryPressureMonitor* MemoryPressureMonitor::Get() {
return g_monitor;
}
void MemoryPressureMonitor::RecordMemoryPressure(
base::MemoryPressureListener::MemoryPressureLevel level,
int ticks) {
// Use the more primitive STATIC_HISTOGRAM_POINTER_BLOCK macro because the
// simple UMA_HISTOGRAM macros don't expose 'AddCount' functionality.
STATIC_HISTOGRAM_POINTER_BLOCK(
"Memory.PressureLevel",
AddCount(MemoryPressureLevelToUmaEnumValue(level), ticks),
base::LinearHistogram::FactoryGet(
"Memory.PressureLevel", 1, UMA_MEMORY_PRESSURE_LEVEL_COUNT,
UMA_MEMORY_PRESSURE_LEVEL_COUNT + 1,
base::HistogramBase::kUmaTargetedHistogramFlag));
}
} // namespace base

View file

@ -0,0 +1,58 @@
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_MEMORY_MEMORY_PRESSURE_MONITOR_H_
#define BASE_MEMORY_MEMORY_PRESSURE_MONITOR_H_
#include "base/base_export.h"
#include "base/callback.h"
#include "base/macros.h"
#include "base/memory/memory_pressure_listener.h"
#include "base/time/time.h"
namespace base {
// TODO(chrisha): Make this a concrete class with per-OS implementations rather
// than an abstract base class.
// Declares the interface for a MemoryPressureMonitor. There are multiple
// OS specific implementations of this class. An instance of the memory
// pressure observer is created at the process level, tracks memory usage, and
// pushes memory state change notifications to the static function
// base::MemoryPressureListener::NotifyMemoryPressure. This is turn notifies
// all MemoryPressureListener instances via a callback.
class BASE_EXPORT MemoryPressureMonitor {
public:
using MemoryPressureLevel = base::MemoryPressureListener::MemoryPressureLevel;
using DispatchCallback =
base::RepeatingCallback<void(MemoryPressureLevel level)>;
virtual ~MemoryPressureMonitor();
// Return the singleton MemoryPressureMonitor.
static MemoryPressureMonitor* Get();
// Record memory pressure UMA statistic. A tick is 5 seconds.
static void RecordMemoryPressure(MemoryPressureLevel level, int ticks);
// Defines the time between UMA events, currently 5s.
static const base::TimeDelta kUMAMemoryPressureLevelPeriod;
// Returns the currently observed memory pressure.
virtual MemoryPressureLevel GetCurrentPressureLevel() const = 0;
// Sets a notification callback. The default callback invokes
// base::MemoryPressureListener::NotifyMemoryPressure.
virtual void SetDispatchCallback(const DispatchCallback& callback) = 0;
protected:
MemoryPressureMonitor();
private:
DISALLOW_COPY_AND_ASSIGN(MemoryPressureMonitor);
};
} // namespace base
#endif // BASE_MEMORY_MEMORY_PRESSURE_MONITOR_H_

View file

@ -0,0 +1,81 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/memory/platform_shared_memory_region.h"
#include "base/memory/shared_memory_mapping.h"
#include "base/memory/shared_memory_security_policy.h"
#include "base/metrics/histogram_functions.h"
#include "base/numerics/checked_math.h"
namespace base {
namespace subtle {
namespace {
void RecordMappingWasBlockedHistogram(bool blocked) {
base::UmaHistogramBoolean("SharedMemory.MapBlockedForSecurity", blocked);
}
} // namespace
// static
PlatformSharedMemoryRegion PlatformSharedMemoryRegion::CreateWritable(
size_t size) {
return Create(Mode::kWritable, size);
}
// static
PlatformSharedMemoryRegion PlatformSharedMemoryRegion::CreateUnsafe(
size_t size) {
return Create(Mode::kUnsafe, size);
}
PlatformSharedMemoryRegion::PlatformSharedMemoryRegion() = default;
PlatformSharedMemoryRegion::PlatformSharedMemoryRegion(
PlatformSharedMemoryRegion&& other) = default;
PlatformSharedMemoryRegion& PlatformSharedMemoryRegion::operator=(
PlatformSharedMemoryRegion&& other) = default;
PlatformSharedMemoryRegion::~PlatformSharedMemoryRegion() = default;
PlatformSharedMemoryRegion::ScopedPlatformHandle
PlatformSharedMemoryRegion::PassPlatformHandle() {
return std::move(handle_);
}
bool PlatformSharedMemoryRegion::MapAt(off_t offset,
size_t size,
void** memory,
size_t* mapped_size) const {
if (!IsValid())
return false;
if (size == 0)
return false;
size_t end_byte;
if (!CheckAdd(offset, size).AssignIfValid(&end_byte) || end_byte > size_) {
return false;
}
if (!SharedMemorySecurityPolicy::AcquireReservationForMapping(size)) {
RecordMappingWasBlockedHistogram(/*blocked=*/true);
return false;
} else {
RecordMappingWasBlockedHistogram(/*blocked=*/false);
}
bool success = MapAtInternal(offset, size, memory, mapped_size);
if (success) {
DCHECK_EQ(
0U, reinterpret_cast<uintptr_t>(*memory) & (kMapMinimumAlignment - 1));
} else {
SharedMemorySecurityPolicy::ReleaseReservationForMapping(size);
}
return success;
}
} // namespace subtle
} // namespace base

View file

@ -0,0 +1,301 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_MEMORY_PLATFORM_SHARED_MEMORY_REGION_H_
#define BASE_MEMORY_PLATFORM_SHARED_MEMORY_REGION_H_
#include <utility>
#include "base/compiler_specific.h"
#include "base/gtest_prod_util.h"
#include "base/macros.h"
#include "base/unguessable_token.h"
#include "build/build_config.h"
#if defined(OS_MACOSX) && !defined(OS_IOS)
#include <mach/mach.h>
#include "base/mac/scoped_mach_port.h"
#elif defined(OS_FUCHSIA)
#include <lib/zx/vmo.h>
#elif defined(OS_WIN)
#include "base/win/scoped_handle.h"
#include "base/win/windows_types.h"
#elif defined(OS_POSIX)
#include <sys/types.h>
#include "base/file_descriptor_posix.h"
#include "base/files/scoped_file.h"
#endif
#if defined(OS_LINUX)
namespace content {
class SandboxIPCHandler;
}
#endif
namespace base {
namespace subtle {
#if defined(OS_POSIX) && (!defined(OS_MACOSX) || defined(OS_IOS)) && \
!defined(OS_ANDROID)
// Helper structs to keep two descriptors on POSIX. It's needed to support
// ConvertToReadOnly().
struct BASE_EXPORT FDPair {
// The main shared memory descriptor that is used for mapping. May be either
// writable or read-only, depending on region's mode.
int fd;
// The read-only descriptor, valid only in kWritable mode. Replaces |fd| when
// a region is converted to read-only.
int readonly_fd;
};
struct BASE_EXPORT ScopedFDPair {
ScopedFDPair();
ScopedFDPair(ScopedFD in_fd, ScopedFD in_readonly_fd);
ScopedFDPair(ScopedFDPair&&);
ScopedFDPair& operator=(ScopedFDPair&&);
~ScopedFDPair();
FDPair get() const;
ScopedFD fd;
ScopedFD readonly_fd;
};
#endif
// Implementation class for shared memory regions.
//
// This class does the following:
//
// - Wraps and owns a shared memory region platform handle.
// - Provides a way to allocate a new region of platform shared memory of given
// size.
// - Provides a way to create mapping of the region in the current process'
// address space, under special access-control constraints (see Mode).
// - Provides methods to help transferring the handle across process boundaries.
// - Holds a 128-bit unique identifier used to uniquely identify the same
// kernel region resource across processes (used for memory tracking).
// - Has a method to retrieve the region's size in bytes.
//
// IMPORTANT NOTE: Users should never use this directly, but
// ReadOnlySharedMemoryRegion, WritableSharedMemoryRegion or
// UnsafeSharedMemoryRegion since this is an implementation class.
class BASE_EXPORT PlatformSharedMemoryRegion {
public:
// Permission mode of the platform handle. Each mode corresponds to one of the
// typed shared memory classes:
//
// * ReadOnlySharedMemoryRegion: A region that can only create read-only
// mappings.
//
// * WritableSharedMemoryRegion: A region that can only create writable
// mappings. The region can be demoted to ReadOnlySharedMemoryRegion without
// the possibility of promoting back to writable.
//
// * UnsafeSharedMemoryRegion: A region that can only create writable
// mappings. The region cannot be demoted to ReadOnlySharedMemoryRegion.
enum class Mode {
kReadOnly, // ReadOnlySharedMemoryRegion
kWritable, // WritableSharedMemoryRegion
kUnsafe, // UnsafeSharedMemoryRegion
kMaxValue = kUnsafe
};
// Errors that can occur during Shared Memory construction.
// These match tools/metrics/histograms/enums.xml.
// This enum is append-only.
enum class CreateError {
SUCCESS = 0,
SIZE_ZERO = 1,
SIZE_TOO_LARGE = 2,
INITIALIZE_ACL_FAILURE = 3,
INITIALIZE_SECURITY_DESC_FAILURE = 4,
SET_SECURITY_DESC_FAILURE = 5,
CREATE_FILE_MAPPING_FAILURE = 6,
REDUCE_PERMISSIONS_FAILURE = 7,
ALREADY_EXISTS = 8,
ALLOCATE_FILE_REGION_FAILURE = 9,
FSTAT_FAILURE = 10,
INODES_MISMATCH = 11,
GET_SHMEM_TEMP_DIR_FAILURE = 12,
kMaxValue = GET_SHMEM_TEMP_DIR_FAILURE
};
#if defined(OS_LINUX)
// Structure to limit access to executable region creation.
struct ExecutableRegion {
private:
// Creates a new shared memory region the unsafe mode (writable and not and
// convertible to read-only), and in addition marked executable. A ScopedFD
// to this region is returned. Any any mapping will have to be done
// manually, including setting executable permissions if necessary
//
// This is only used to support sandbox_ipc_linux.cc, and should not be used
// anywhere else in chrome. This is restricted via AllowCreateExecutable.
// TODO(crbug.com/982879): remove this when NaCl is unshipped.
//
// Returns an invalid ScopedFD if the call fails.
static ScopedFD CreateFD(size_t size);
friend class content::SandboxIPCHandler;
};
#endif
// Platform-specific shared memory type used by this class.
#if defined(OS_MACOSX) && !defined(OS_IOS)
using PlatformHandle = mach_port_t;
using ScopedPlatformHandle = mac::ScopedMachSendRight;
#elif defined(OS_FUCHSIA)
using PlatformHandle = zx::unowned_vmo;
using ScopedPlatformHandle = zx::vmo;
#elif defined(OS_WIN)
using PlatformHandle = HANDLE;
using ScopedPlatformHandle = win::ScopedHandle;
#elif defined(OS_ANDROID)
using PlatformHandle = int;
using ScopedPlatformHandle = ScopedFD;
#else
using PlatformHandle = FDPair;
using ScopedPlatformHandle = ScopedFDPair;
#endif
// The minimum alignment in bytes that any mapped address produced by Map()
// and MapAt() is guaranteed to have.
enum { kMapMinimumAlignment = 32 };
// Creates a new PlatformSharedMemoryRegion with corresponding mode and size.
// Creating in kReadOnly mode isn't supported because then there will be no
// way to modify memory content.
static PlatformSharedMemoryRegion CreateWritable(size_t size);
static PlatformSharedMemoryRegion CreateUnsafe(size_t size);
// Returns a new PlatformSharedMemoryRegion that takes ownership of the
// |handle|. All parameters must be taken from another valid
// PlatformSharedMemoryRegion instance, e.g. |size| must be equal to the
// actual region size as allocated by the kernel.
// Closes the |handle| and returns an invalid instance if passed parameters
// are invalid.
static PlatformSharedMemoryRegion Take(ScopedPlatformHandle handle,
Mode mode,
size_t size,
const UnguessableToken& guid);
#if defined(OS_POSIX) && !defined(OS_ANDROID) && \
!(defined(OS_MACOSX) && !defined(OS_IOS))
// Specialized version of Take() for POSIX that takes only one file descriptor
// instead of pair. Cannot be used with kWritable |mode|.
static PlatformSharedMemoryRegion Take(ScopedFD handle,
Mode mode,
size_t size,
const UnguessableToken& guid);
#endif
// Default constructor initializes an invalid instance, i.e. an instance that
// doesn't wrap any valid platform handle.
PlatformSharedMemoryRegion();
// Move operations are allowed.
PlatformSharedMemoryRegion(PlatformSharedMemoryRegion&&);
PlatformSharedMemoryRegion& operator=(PlatformSharedMemoryRegion&&);
// Destructor closes the platform handle. Does nothing if the handle is
// invalid.
~PlatformSharedMemoryRegion();
// Passes ownership of the platform handle to the caller. The current instance
// becomes invalid. It's the responsibility of the caller to close the
// handle. If the current instance is invalid, ScopedPlatformHandle will also
// be invalid.
ScopedPlatformHandle PassPlatformHandle() WARN_UNUSED_RESULT;
// Returns the platform handle. The current instance keeps ownership of this
// handle.
PlatformHandle GetPlatformHandle() const;
// Whether the platform handle is valid.
bool IsValid() const;
// Duplicates the platform handle and creates a new PlatformSharedMemoryRegion
// with the same |mode_|, |size_| and |guid_| that owns this handle. Returns
// invalid region on failure, the current instance remains valid.
// Can be called only in kReadOnly and kUnsafe modes, CHECK-fails if is
// called in kWritable mode.
PlatformSharedMemoryRegion Duplicate() const;
// Converts the region to read-only. Returns whether the operation succeeded.
// Makes the current instance invalid on failure. Can be called only in
// kWritable mode, all other modes will CHECK-fail. The object will have
// kReadOnly mode after this call on success.
bool ConvertToReadOnly();
#if defined(OS_MACOSX) && !defined(OS_IOS)
// Same as above, but |mapped_addr| is used as a hint to avoid additional
// mapping of the memory object.
// |mapped_addr| must be mapped location of |memory_object_|. If the location
// is unknown, |mapped_addr| should be |nullptr|.
bool ConvertToReadOnly(void* mapped_addr);
#endif // defined(OS_MACOSX) && !defined(OS_IOS)
// Converts the region to unsafe. Returns whether the operation succeeded.
// Makes the current instance invalid on failure. Can be called only in
// kWritable mode, all other modes will CHECK-fail. The object will have
// kUnsafe mode after this call on success.
bool ConvertToUnsafe();
// Maps |size| bytes of the shared memory region starting with the given
// |offset| into the caller's address space. |offset| must be aligned to value
// of |SysInfo::VMAllocationGranularity()|. Fails if requested bytes are out
// of the region limits.
// Returns true and sets |memory| and |mapped_size| on success, returns false
// and leaves output parameters in unspecified state otherwise. The mapped
// address is guaranteed to have an alignment of at least
// |kMapMinimumAlignment|.
bool MapAt(off_t offset,
size_t size,
void** memory,
size_t* mapped_size) const;
const UnguessableToken& GetGUID() const { return guid_; }
size_t GetSize() const { return size_; }
Mode GetMode() const { return mode_; }
private:
FRIEND_TEST_ALL_PREFIXES(PlatformSharedMemoryRegionTest,
CreateReadOnlyRegionDeathTest);
FRIEND_TEST_ALL_PREFIXES(PlatformSharedMemoryRegionTest,
CheckPlatformHandlePermissionsCorrespondToMode);
static PlatformSharedMemoryRegion Create(Mode mode,
size_t size
#if defined(OS_LINUX)
,
bool executable = false
#endif
);
static bool CheckPlatformHandlePermissionsCorrespondToMode(
PlatformHandle handle,
Mode mode,
size_t size);
PlatformSharedMemoryRegion(ScopedPlatformHandle handle,
Mode mode,
size_t size,
const UnguessableToken& guid);
bool MapAtInternal(off_t offset,
size_t size,
void** memory,
size_t* mapped_size) const;
ScopedPlatformHandle handle_;
Mode mode_ = Mode::kReadOnly;
size_t size_ = 0;
UnguessableToken guid_;
DISALLOW_COPY_AND_ASSIGN(PlatformSharedMemoryRegion);
};
} // namespace subtle
} // namespace base
#endif // BASE_MEMORY_PLATFORM_SHARED_MEMORY_REGION_H_

View file

@ -0,0 +1,206 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/memory/platform_shared_memory_region.h"
#include <sys/mman.h>
#include "base/bits.h"
#include "base/memory/shared_memory_tracker.h"
#include "base/metrics/histogram_macros.h"
#include "base/posix/eintr_wrapper.h"
#include "base/process/process_metrics.h"
#include "third_party/ashmem/ashmem.h"
namespace base {
namespace subtle {
// For Android, we use ashmem to implement SharedMemory. ashmem_create_region
// will automatically pin the region. We never explicitly call pin/unpin. When
// all the file descriptors from different processes associated with the region
// are closed, the memory buffer will go away.
namespace {
int GetAshmemRegionProtectionMask(int fd) {
int prot = ashmem_get_prot_region(fd);
if (prot < 0) {
// TODO(crbug.com/838365): convert to DLOG when bug fixed.
PLOG(ERROR) << "ashmem_get_prot_region failed";
return -1;
}
return prot;
}
} // namespace
// static
PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Take(
ScopedFD fd,
Mode mode,
size_t size,
const UnguessableToken& guid) {
if (!fd.is_valid())
return {};
if (size == 0)
return {};
if (size > static_cast<size_t>(std::numeric_limits<int>::max()))
return {};
CHECK(CheckPlatformHandlePermissionsCorrespondToMode(fd.get(), mode, size));
return PlatformSharedMemoryRegion(std::move(fd), mode, size, guid);
}
int PlatformSharedMemoryRegion::GetPlatformHandle() const {
return handle_.get();
}
bool PlatformSharedMemoryRegion::IsValid() const {
return handle_.is_valid();
}
PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Duplicate() const {
if (!IsValid())
return {};
CHECK_NE(mode_, Mode::kWritable)
<< "Duplicating a writable shared memory region is prohibited";
ScopedFD duped_fd(HANDLE_EINTR(dup(handle_.get())));
if (!duped_fd.is_valid()) {
DPLOG(ERROR) << "dup(" << handle_.get() << ") failed";
return {};
}
return PlatformSharedMemoryRegion(std::move(duped_fd), mode_, size_, guid_);
}
bool PlatformSharedMemoryRegion::ConvertToReadOnly() {
if (!IsValid())
return false;
CHECK_EQ(mode_, Mode::kWritable)
<< "Only writable shared memory region can be converted to read-only";
ScopedFD handle_copy(handle_.release());
int prot = GetAshmemRegionProtectionMask(handle_copy.get());
if (prot < 0)
return false;
prot &= ~PROT_WRITE;
int ret = ashmem_set_prot_region(handle_copy.get(), prot);
if (ret != 0) {
DPLOG(ERROR) << "ashmem_set_prot_region failed";
return false;
}
handle_ = std::move(handle_copy);
mode_ = Mode::kReadOnly;
return true;
}
bool PlatformSharedMemoryRegion::ConvertToUnsafe() {
if (!IsValid())
return false;
CHECK_EQ(mode_, Mode::kWritable)
<< "Only writable shared memory region can be converted to unsafe";
mode_ = Mode::kUnsafe;
return true;
}
bool PlatformSharedMemoryRegion::MapAtInternal(off_t offset,
size_t size,
void** memory,
size_t* mapped_size) const {
// IMPORTANT: Even if the mapping is readonly and the mapped data is not
// changing, the region must ALWAYS be mapped with MAP_SHARED, otherwise with
// ashmem the mapping is equivalent to a private anonymous mapping.
bool write_allowed = mode_ != Mode::kReadOnly;
*memory = mmap(nullptr, size, PROT_READ | (write_allowed ? PROT_WRITE : 0),
MAP_SHARED, handle_.get(), offset);
bool mmap_succeeded = *memory && *memory != reinterpret_cast<void*>(-1);
if (!mmap_succeeded) {
DPLOG(ERROR) << "mmap " << handle_.get() << " failed";
return false;
}
*mapped_size = size;
return true;
}
// static
PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Create(Mode mode,
size_t size) {
if (size == 0) {
return {};
}
// Align size as required by ashmem_create_region() API documentation. This
// operation may overflow so check that the result doesn't decrease.
size_t rounded_size = bits::Align(size, GetPageSize());
if (rounded_size < size ||
rounded_size > static_cast<size_t>(std::numeric_limits<int>::max())) {
return {};
}
CHECK_NE(mode, Mode::kReadOnly) << "Creating a region in read-only mode will "
"lead to this region being non-modifiable";
UnguessableToken guid = UnguessableToken::Create();
int fd = ashmem_create_region(
SharedMemoryTracker::GetDumpNameForTracing(guid).c_str(), rounded_size);
if (fd < 0) {
DPLOG(ERROR) << "ashmem_create_region failed";
return {};
}
ScopedFD scoped_fd(fd);
int err = ashmem_set_prot_region(scoped_fd.get(), PROT_READ | PROT_WRITE);
if (err < 0) {
DPLOG(ERROR) << "ashmem_set_prot_region failed";
return {};
}
return PlatformSharedMemoryRegion(std::move(scoped_fd), mode, size, guid);
}
bool PlatformSharedMemoryRegion::CheckPlatformHandlePermissionsCorrespondToMode(
PlatformHandle handle,
Mode mode,
size_t size) {
int prot = GetAshmemRegionProtectionMask(handle);
if (prot < 0)
return false;
bool is_read_only = (prot & PROT_WRITE) == 0;
bool expected_read_only = mode == Mode::kReadOnly;
if (is_read_only != expected_read_only) {
// TODO(crbug.com/838365): convert to DLOG when bug fixed.
LOG(ERROR) << "Ashmem region has a wrong protection mask: it is"
<< (is_read_only ? " " : " not ") << "read-only but it should"
<< (expected_read_only ? " " : " not ") << "be";
return false;
}
return true;
}
PlatformSharedMemoryRegion::PlatformSharedMemoryRegion(
ScopedFD fd,
Mode mode,
size_t size,
const UnguessableToken& guid)
: handle_(std::move(fd)), mode_(mode), size_(size), guid_(guid) {}
} // namespace subtle
} // namespace base

View file

@ -0,0 +1,193 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/memory/platform_shared_memory_region.h"
#include <lib/zx/vmar.h>
#include <zircon/process.h>
#include <zircon/rights.h>
#include "base/bits.h"
#include "base/fuchsia/fuchsia_logging.h"
#include "base/process/process_metrics.h"
namespace base {
namespace subtle {
static constexpr int kNoWriteOrExec =
ZX_DEFAULT_VMO_RIGHTS &
~(ZX_RIGHT_WRITE | ZX_RIGHT_EXECUTE | ZX_RIGHT_SET_PROPERTY);
// static
PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Take(
zx::vmo handle,
Mode mode,
size_t size,
const UnguessableToken& guid) {
if (!handle.is_valid())
return {};
if (size == 0)
return {};
if (size > static_cast<size_t>(std::numeric_limits<int>::max()))
return {};
CHECK(CheckPlatformHandlePermissionsCorrespondToMode(zx::unowned_vmo(handle),
mode, size));
return PlatformSharedMemoryRegion(std::move(handle), mode, size, guid);
}
zx::unowned_vmo PlatformSharedMemoryRegion::GetPlatformHandle() const {
return zx::unowned_vmo(handle_);
}
bool PlatformSharedMemoryRegion::IsValid() const {
return handle_.is_valid();
}
PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Duplicate() const {
if (!IsValid())
return {};
CHECK_NE(mode_, Mode::kWritable)
<< "Duplicating a writable shared memory region is prohibited";
zx::vmo duped_handle;
zx_status_t status = handle_.duplicate(ZX_RIGHT_SAME_RIGHTS, &duped_handle);
if (status != ZX_OK) {
ZX_DLOG(ERROR, status) << "zx_handle_duplicate";
return {};
}
return PlatformSharedMemoryRegion(std::move(duped_handle), mode_, size_,
guid_);
}
bool PlatformSharedMemoryRegion::ConvertToReadOnly() {
if (!IsValid())
return false;
CHECK_EQ(mode_, Mode::kWritable)
<< "Only writable shared memory region can be converted to read-only";
zx_status_t status = handle_.replace(kNoWriteOrExec, &handle_);
if (status != ZX_OK) {
ZX_DLOG(ERROR, status) << "zx_handle_replace";
return false;
}
mode_ = Mode::kReadOnly;
return true;
}
bool PlatformSharedMemoryRegion::ConvertToUnsafe() {
if (!IsValid())
return false;
CHECK_EQ(mode_, Mode::kWritable)
<< "Only writable shared memory region can be converted to unsafe";
mode_ = Mode::kUnsafe;
return true;
}
bool PlatformSharedMemoryRegion::MapAtInternal(off_t offset,
size_t size,
void** memory,
size_t* mapped_size) const {
uintptr_t addr;
zx_vm_option_t options = ZX_VM_REQUIRE_NON_RESIZABLE | ZX_VM_PERM_READ;
if (mode_ != Mode::kReadOnly)
options |= ZX_VM_PERM_WRITE;
zx_status_t status = zx::vmar::root_self()->map(
/*vmar_offset=*/0, handle_, offset, size, options, &addr);
if (status != ZX_OK) {
ZX_DLOG(ERROR, status) << "zx_vmar_map";
return false;
}
*memory = reinterpret_cast<void*>(addr);
*mapped_size = size;
return true;
}
// static
PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Create(Mode mode,
size_t size) {
if (size == 0)
return {};
// Aligning may overflow so check that the result doesn't decrease.
size_t rounded_size = bits::Align(size, GetPageSize());
if (rounded_size < size ||
rounded_size > static_cast<size_t>(std::numeric_limits<int>::max())) {
return {};
}
CHECK_NE(mode, Mode::kReadOnly) << "Creating a region in read-only mode will "
"lead to this region being non-modifiable";
zx::vmo vmo;
zx_status_t status = zx::vmo::create(rounded_size, 0, &vmo);
if (status != ZX_OK) {
ZX_DLOG(ERROR, status) << "zx_vmo_create";
return {};
}
// TODO(crbug.com/991805): Take base::Location from the caller and use it to
// generate the name here.
constexpr char kVmoName[] = "cr-shared-memory-region";
status = vmo.set_property(ZX_PROP_NAME, kVmoName, strlen(kVmoName));
ZX_DCHECK(status == ZX_OK, status);
const int kNoExecFlags = ZX_DEFAULT_VMO_RIGHTS & ~ZX_RIGHT_EXECUTE;
status = vmo.replace(kNoExecFlags, &vmo);
if (status != ZX_OK) {
ZX_DLOG(ERROR, status) << "zx_handle_replace";
return {};
}
return PlatformSharedMemoryRegion(std::move(vmo), mode, size,
UnguessableToken::Create());
}
// static
bool PlatformSharedMemoryRegion::CheckPlatformHandlePermissionsCorrespondToMode(
PlatformHandle handle,
Mode mode,
size_t size) {
zx_info_handle_basic_t basic = {};
zx_status_t status = handle->get_info(ZX_INFO_HANDLE_BASIC, &basic,
sizeof(basic), nullptr, nullptr);
if (status != ZX_OK) {
// TODO(crbug.com/838365): convert to DLOG when bug fixed.
ZX_LOG(ERROR, status) << "zx_object_get_info";
return false;
}
bool is_read_only = (basic.rights & kNoWriteOrExec) == basic.rights;
bool expected_read_only = mode == Mode::kReadOnly;
if (is_read_only != expected_read_only) {
// TODO(crbug.com/838365): convert to DLOG when bug fixed.
LOG(ERROR) << "VMO object has wrong access rights: it is"
<< (is_read_only ? " " : " not ") << "read-only but it should"
<< (expected_read_only ? " " : " not ") << "be";
return false;
}
return true;
}
PlatformSharedMemoryRegion::PlatformSharedMemoryRegion(
zx::vmo handle,
Mode mode,
size_t size,
const UnguessableToken& guid)
: handle_(std::move(handle)), mode_(mode), size_(size), guid_(guid) {}
} // namespace subtle
} // namespace base

View file

@ -0,0 +1,231 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/memory/platform_shared_memory_region.h"
#include <mach/mach_vm.h>
#include "base/mac/mach_logging.h"
#include "base/mac/scoped_mach_vm.h"
#include "base/metrics/histogram_functions.h"
#include "base/metrics/histogram_macros.h"
#include "build/build_config.h"
#if defined(OS_IOS)
#error "MacOS only - iOS uses platform_shared_memory_region_posix.cc"
#endif
namespace base {
namespace subtle {
namespace {
} // namespace
// static
PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Take(
mac::ScopedMachSendRight handle,
Mode mode,
size_t size,
const UnguessableToken& guid) {
if (!handle.is_valid())
return {};
if (size == 0)
return {};
if (size > static_cast<size_t>(std::numeric_limits<int>::max()))
return {};
CHECK(
CheckPlatformHandlePermissionsCorrespondToMode(handle.get(), mode, size));
return PlatformSharedMemoryRegion(std::move(handle), mode, size, guid);
}
mach_port_t PlatformSharedMemoryRegion::GetPlatformHandle() const {
return handle_.get();
}
bool PlatformSharedMemoryRegion::IsValid() const {
return handle_.is_valid();
}
PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Duplicate() const {
if (!IsValid())
return {};
CHECK_NE(mode_, Mode::kWritable)
<< "Duplicating a writable shared memory region is prohibited";
// Increment the ref count.
kern_return_t kr = mach_port_mod_refs(mach_task_self(), handle_.get(),
MACH_PORT_RIGHT_SEND, 1);
if (kr != KERN_SUCCESS) {
MACH_DLOG(ERROR, kr) << "mach_port_mod_refs";
return {};
}
return PlatformSharedMemoryRegion(mac::ScopedMachSendRight(handle_.get()),
mode_, size_, guid_);
}
bool PlatformSharedMemoryRegion::ConvertToReadOnly() {
return ConvertToReadOnly(nullptr);
}
bool PlatformSharedMemoryRegion::ConvertToReadOnly(void* mapped_addr) {
if (!IsValid())
return false;
CHECK_EQ(mode_, Mode::kWritable)
<< "Only writable shared memory region can be converted to read-only";
mac::ScopedMachSendRight handle_copy(handle_.release());
void* temp_addr = mapped_addr;
mac::ScopedMachVM scoped_memory;
if (!temp_addr) {
// Intentionally lower current prot and max prot to |VM_PROT_READ|.
kern_return_t kr = mach_vm_map(
mach_task_self(), reinterpret_cast<mach_vm_address_t*>(&temp_addr),
size_, 0, VM_FLAGS_ANYWHERE, handle_copy.get(), 0, FALSE, VM_PROT_READ,
VM_PROT_READ, VM_INHERIT_NONE);
if (kr != KERN_SUCCESS) {
MACH_DLOG(ERROR, kr) << "mach_vm_map";
return false;
}
scoped_memory.reset(reinterpret_cast<vm_address_t>(temp_addr),
mach_vm_round_page(size_));
}
// Make new memory object.
memory_object_size_t allocation_size = size_;
mac::ScopedMachSendRight named_right;
kern_return_t kr = mach_make_memory_entry_64(
mach_task_self(), &allocation_size,
reinterpret_cast<memory_object_offset_t>(temp_addr), VM_PROT_READ,
mac::ScopedMachSendRight::Receiver(named_right).get(), MACH_PORT_NULL);
if (kr != KERN_SUCCESS) {
MACH_DLOG(ERROR, kr) << "mach_make_memory_entry_64";
return false;
}
DCHECK_GE(allocation_size, size_);
handle_ = std::move(named_right);
mode_ = Mode::kReadOnly;
return true;
}
bool PlatformSharedMemoryRegion::ConvertToUnsafe() {
if (!IsValid())
return false;
CHECK_EQ(mode_, Mode::kWritable)
<< "Only writable shared memory region can be converted to unsafe";
mode_ = Mode::kUnsafe;
return true;
}
bool PlatformSharedMemoryRegion::MapAtInternal(off_t offset,
size_t size,
void** memory,
size_t* mapped_size) const {
bool write_allowed = mode_ != Mode::kReadOnly;
vm_prot_t vm_prot_write = write_allowed ? VM_PROT_WRITE : 0;
kern_return_t kr = mach_vm_map(
mach_task_self(),
reinterpret_cast<mach_vm_address_t*>(memory), // Output parameter
size,
0, // Alignment mask
VM_FLAGS_ANYWHERE, handle_.get(), offset,
FALSE, // Copy
VM_PROT_READ | vm_prot_write, // Current protection
VM_PROT_READ | vm_prot_write, // Maximum protection
VM_INHERIT_NONE);
if (kr != KERN_SUCCESS) {
MACH_DLOG(ERROR, kr) << "mach_vm_map";
return false;
}
*mapped_size = size;
return true;
}
// static
PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Create(Mode mode,
size_t size) {
if (size == 0) {
return {};
}
if (size > static_cast<size_t>(std::numeric_limits<int>::max())) {
return {};
}
CHECK_NE(mode, Mode::kReadOnly) << "Creating a region in read-only mode will "
"lead to this region being non-modifiable";
mach_vm_size_t vm_size = size;
mac::ScopedMachSendRight named_right;
kern_return_t kr = mach_make_memory_entry_64(
mach_task_self(), &vm_size,
0, // Address.
MAP_MEM_NAMED_CREATE | VM_PROT_READ | VM_PROT_WRITE,
mac::ScopedMachSendRight::Receiver(named_right).get(),
MACH_PORT_NULL); // Parent handle.
// Crash as soon as shm allocation fails to debug the issue
// https://crbug.com/872237.
MACH_CHECK(kr == KERN_SUCCESS, kr) << "mach_make_memory_entry_64";
DCHECK_GE(vm_size, size);
return PlatformSharedMemoryRegion(std::move(named_right), mode, size,
UnguessableToken::Create());
}
// static
bool PlatformSharedMemoryRegion::CheckPlatformHandlePermissionsCorrespondToMode(
PlatformHandle handle,
Mode mode,
size_t size) {
mach_vm_address_t temp_addr = 0;
kern_return_t kr =
mach_vm_map(mach_task_self(), &temp_addr, size, 0, VM_FLAGS_ANYWHERE,
handle, 0, FALSE, VM_PROT_READ | VM_PROT_WRITE,
VM_PROT_READ | VM_PROT_WRITE, VM_INHERIT_NONE);
if (kr == KERN_SUCCESS) {
kern_return_t kr_deallocate =
mach_vm_deallocate(mach_task_self(), temp_addr, size);
// TODO(crbug.com/838365): convert to DLOG when bug fixed.
MACH_LOG_IF(ERROR, kr_deallocate != KERN_SUCCESS, kr_deallocate)
<< "mach_vm_deallocate";
} else if (kr != KERN_INVALID_RIGHT) {
MACH_LOG(ERROR, kr) << "mach_vm_map";
return false;
}
bool is_read_only = kr == KERN_INVALID_RIGHT;
bool expected_read_only = mode == Mode::kReadOnly;
if (is_read_only != expected_read_only) {
// TODO(crbug.com/838365): convert to DLOG when bug fixed.
LOG(ERROR) << "VM region has a wrong protection mask: it is"
<< (is_read_only ? " " : " not ") << "read-only but it should"
<< (expected_read_only ? " " : " not ") << "be";
return false;
}
return true;
}
PlatformSharedMemoryRegion::PlatformSharedMemoryRegion(
mac::ScopedMachSendRight handle,
Mode mode,
size_t size,
const UnguessableToken& guid)
: handle_(std::move(handle)), mode_(mode), size_(size), guid_(guid) {}
} // namespace subtle
} // namespace base

View file

@ -0,0 +1,343 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/memory/platform_shared_memory_region.h"
#include <fcntl.h>
#include <sys/mman.h>
#include "base/files/file.h"
#include "base/files/file_util.h"
#include "base/metrics/histogram_macros.h"
#include "base/threading/thread_restrictions.h"
#include "build/build_config.h"
namespace base {
namespace subtle {
namespace {
struct ScopedPathUnlinkerTraits {
static const FilePath* InvalidValue() { return nullptr; }
static void Free(const FilePath* path) {
if (unlink(path->value().c_str()))
PLOG(WARNING) << "unlink";
}
};
// Unlinks the FilePath when the object is destroyed.
using ScopedPathUnlinker =
ScopedGeneric<const FilePath*, ScopedPathUnlinkerTraits>;
#if !defined(OS_NACL)
bool CheckFDAccessMode(int fd, int expected_mode) {
int fd_status = fcntl(fd, F_GETFL);
if (fd_status == -1) {
// TODO(crbug.com/838365): convert to DLOG when bug fixed.
PLOG(ERROR) << "fcntl(" << fd << ", F_GETFL) failed";
return false;
}
int mode = fd_status & O_ACCMODE;
if (mode != expected_mode) {
// TODO(crbug.com/838365): convert to DLOG when bug fixed.
LOG(ERROR) << "Descriptor access mode (" << mode
<< ") differs from expected (" << expected_mode << ")";
return false;
}
return true;
}
#endif // !defined(OS_NACL)
} // namespace
ScopedFDPair::ScopedFDPair() = default;
ScopedFDPair::ScopedFDPair(ScopedFDPair&&) = default;
ScopedFDPair& ScopedFDPair::operator=(ScopedFDPair&&) = default;
ScopedFDPair::~ScopedFDPair() = default;
ScopedFDPair::ScopedFDPair(ScopedFD in_fd, ScopedFD in_readonly_fd)
: fd(std::move(in_fd)), readonly_fd(std::move(in_readonly_fd)) {}
FDPair ScopedFDPair::get() const {
return {fd.get(), readonly_fd.get()};
}
#if defined(OS_LINUX)
// static
ScopedFD PlatformSharedMemoryRegion::ExecutableRegion::CreateFD(size_t size) {
PlatformSharedMemoryRegion region =
Create(Mode::kUnsafe, size, true /* executable */);
if (region.IsValid())
return region.PassPlatformHandle().fd;
return ScopedFD();
}
#endif // defined(OS_LINUX)
// static
PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Take(
ScopedFDPair handle,
Mode mode,
size_t size,
const UnguessableToken& guid) {
if (!handle.fd.is_valid())
return {};
if (size == 0)
return {};
if (size > static_cast<size_t>(std::numeric_limits<int>::max()))
return {};
CHECK(
CheckPlatformHandlePermissionsCorrespondToMode(handle.get(), mode, size));
switch (mode) {
case Mode::kReadOnly:
case Mode::kUnsafe:
if (handle.readonly_fd.is_valid()) {
handle.readonly_fd.reset();
DLOG(WARNING) << "Readonly handle shouldn't be valid for a "
"non-writable memory region; closing";
}
break;
case Mode::kWritable:
if (!handle.readonly_fd.is_valid()) {
DLOG(ERROR)
<< "Readonly handle must be valid for writable memory region";
return {};
}
break;
default:
DLOG(ERROR) << "Invalid permission mode: " << static_cast<int>(mode);
return {};
}
return PlatformSharedMemoryRegion(std::move(handle), mode, size, guid);
}
// static
PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Take(
ScopedFD handle,
Mode mode,
size_t size,
const UnguessableToken& guid) {
CHECK_NE(mode, Mode::kWritable);
return Take(ScopedFDPair(std::move(handle), ScopedFD()), mode, size, guid);
}
FDPair PlatformSharedMemoryRegion::GetPlatformHandle() const {
return handle_.get();
}
bool PlatformSharedMemoryRegion::IsValid() const {
return handle_.fd.is_valid() &&
(mode_ == Mode::kWritable ? handle_.readonly_fd.is_valid() : true);
}
PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Duplicate() const {
if (!IsValid())
return {};
CHECK_NE(mode_, Mode::kWritable)
<< "Duplicating a writable shared memory region is prohibited";
ScopedFD duped_fd(HANDLE_EINTR(dup(handle_.fd.get())));
if (!duped_fd.is_valid()) {
DPLOG(ERROR) << "dup(" << handle_.fd.get() << ") failed";
return {};
}
return PlatformSharedMemoryRegion({std::move(duped_fd), ScopedFD()}, mode_,
size_, guid_);
}
bool PlatformSharedMemoryRegion::ConvertToReadOnly() {
if (!IsValid())
return false;
CHECK_EQ(mode_, Mode::kWritable)
<< "Only writable shared memory region can be converted to read-only";
handle_.fd.reset(handle_.readonly_fd.release());
mode_ = Mode::kReadOnly;
return true;
}
bool PlatformSharedMemoryRegion::ConvertToUnsafe() {
if (!IsValid())
return false;
CHECK_EQ(mode_, Mode::kWritable)
<< "Only writable shared memory region can be converted to unsafe";
handle_.readonly_fd.reset();
mode_ = Mode::kUnsafe;
return true;
}
bool PlatformSharedMemoryRegion::MapAtInternal(off_t offset,
size_t size,
void** memory,
size_t* mapped_size) const {
bool write_allowed = mode_ != Mode::kReadOnly;
*memory = mmap(nullptr, size, PROT_READ | (write_allowed ? PROT_WRITE : 0),
MAP_SHARED, handle_.fd.get(), offset);
bool mmap_succeeded = *memory && *memory != MAP_FAILED;
if (!mmap_succeeded) {
DPLOG(ERROR) << "mmap " << handle_.fd.get() << " failed";
return false;
}
*mapped_size = size;
return true;
}
// static
PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Create(Mode mode,
size_t size
#if defined(OS_LINUX)
,
bool executable
#endif
) {
#if defined(OS_NACL)
// Untrusted code can't create descriptors or handles.
return {};
#else
if (size == 0) {
return {};
}
if (size > static_cast<size_t>(std::numeric_limits<int>::max())) {
return {};
}
CHECK_NE(mode, Mode::kReadOnly) << "Creating a region in read-only mode will "
"lead to this region being non-modifiable";
// This function theoretically can block on the disk, but realistically
// the temporary files we create will just go into the buffer cache
// and be deleted before they ever make it out to disk.
ThreadRestrictions::ScopedAllowIO allow_io;
// We don't use shm_open() API in order to support the --disable-dev-shm-usage
// flag.
FilePath directory;
if (!GetShmemTempDir(
#if defined(OS_LINUX)
executable,
#else
false /* executable */,
#endif
&directory)) {
return {};
}
FilePath path;
ScopedFD fd = CreateAndOpenFdForTemporaryFileInDir(directory, &path);
File shm_file(fd.release());
if (!shm_file.IsValid()) {
PLOG(ERROR) << "Creating shared memory in " << path.value() << " failed";
FilePath dir = path.DirName();
if (access(dir.value().c_str(), W_OK | X_OK) < 0) {
PLOG(ERROR) << "Unable to access(W_OK|X_OK) " << dir.value();
if (dir.value() == "/dev/shm") {
LOG(FATAL) << "This is frequently caused by incorrect permissions on "
<< "/dev/shm. Try 'sudo chmod 1777 /dev/shm' to fix.";
}
}
return {};
}
// Deleting the file prevents anyone else from mapping it in (making it
// private), and prevents the need for cleanup (once the last fd is
// closed, it is truly freed).
ScopedPathUnlinker path_unlinker(&path);
ScopedFD readonly_fd;
if (mode == Mode::kWritable) {
// Also open as readonly so that we can ConvertToReadOnly().
readonly_fd.reset(HANDLE_EINTR(open(path.value().c_str(), O_RDONLY)));
if (!readonly_fd.is_valid()) {
DPLOG(ERROR) << "open(\"" << path.value() << "\", O_RDONLY) failed";
return {};
}
}
if (!AllocateFileRegion(&shm_file, 0, size)) {
return {};
}
if (readonly_fd.is_valid()) {
stat_wrapper_t shm_stat;
if (File::Fstat(shm_file.GetPlatformFile(), &shm_stat) != 0) {
DPLOG(ERROR) << "fstat(fd) failed";
return {};
}
stat_wrapper_t readonly_stat;
if (File::Fstat(readonly_fd.get(), &readonly_stat) != 0) {
DPLOG(ERROR) << "fstat(readonly_fd) failed";
return {};
}
if (shm_stat.st_dev != readonly_stat.st_dev ||
shm_stat.st_ino != readonly_stat.st_ino) {
LOG(ERROR) << "Writable and read-only inodes don't match; bailing";
return {};
}
}
return PlatformSharedMemoryRegion(
{ScopedFD(shm_file.TakePlatformFile()), std::move(readonly_fd)}, mode,
size, UnguessableToken::Create());
#endif // !defined(OS_NACL)
}
bool PlatformSharedMemoryRegion::CheckPlatformHandlePermissionsCorrespondToMode(
PlatformHandle handle,
Mode mode,
size_t size) {
#if !defined(OS_NACL)
if (!CheckFDAccessMode(handle.fd,
mode == Mode::kReadOnly ? O_RDONLY : O_RDWR)) {
return false;
}
if (mode == Mode::kWritable)
return CheckFDAccessMode(handle.readonly_fd, O_RDONLY);
// The second descriptor must be invalid in kReadOnly and kUnsafe modes.
if (handle.readonly_fd != -1) {
// TODO(crbug.com/838365): convert to DLOG when bug fixed.
LOG(ERROR) << "The second descriptor must be invalid";
return false;
}
return true;
#else
// fcntl(_, F_GETFL) is not implemented on NaCl.
// We also cannot try to mmap() a region as writable and look at the return
// status because the plugin process crashes if system mmap() fails.
return true;
#endif // !defined(OS_NACL)
}
PlatformSharedMemoryRegion::PlatformSharedMemoryRegion(
ScopedFDPair handle,
Mode mode,
size_t size,
const UnguessableToken& guid)
: handle_(std::move(handle)), mode_(mode), size_(size), guid_(guid) {}
} // namespace subtle
} // namespace base

View file

@ -0,0 +1,321 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/memory/platform_shared_memory_region.h"
#include <aclapi.h>
#include <stddef.h>
#include <stdint.h>
#include "base/allocator/partition_allocator/page_allocator.h"
#include "base/bits.h"
#include "base/metrics/histogram_functions.h"
#include "base/metrics/histogram_macros.h"
#include "base/process/process_handle.h"
#include "base/rand_util.h"
#include "base/strings/string_util.h"
#include "base/strings/stringprintf.h"
#include "base/strings/utf_string_conversions.h"
#include "base/win/windows_version.h"
namespace base {
namespace subtle {
namespace {
typedef enum _SECTION_INFORMATION_CLASS {
SectionBasicInformation,
} SECTION_INFORMATION_CLASS;
typedef struct _SECTION_BASIC_INFORMATION {
PVOID BaseAddress;
ULONG Attributes;
LARGE_INTEGER Size;
} SECTION_BASIC_INFORMATION, *PSECTION_BASIC_INFORMATION;
typedef ULONG(__stdcall* NtQuerySectionType)(
HANDLE SectionHandle,
SECTION_INFORMATION_CLASS SectionInformationClass,
PVOID SectionInformation,
ULONG SectionInformationLength,
PULONG ResultLength);
// Returns the length of the memory section starting at the supplied address.
size_t GetMemorySectionSize(void* address) {
MEMORY_BASIC_INFORMATION memory_info;
if (!::VirtualQuery(address, &memory_info, sizeof(memory_info)))
return 0;
return memory_info.RegionSize -
(static_cast<char*>(address) -
static_cast<char*>(memory_info.AllocationBase));
}
// Checks if the section object is safe to map. At the moment this just means
// it's not an image section.
bool IsSectionSafeToMap(HANDLE handle) {
static NtQuerySectionType nt_query_section_func =
reinterpret_cast<NtQuerySectionType>(
::GetProcAddress(::GetModuleHandle(L"ntdll.dll"), "NtQuerySection"));
DCHECK(nt_query_section_func);
// The handle must have SECTION_QUERY access for this to succeed.
SECTION_BASIC_INFORMATION basic_information = {};
ULONG status =
nt_query_section_func(handle, SectionBasicInformation, &basic_information,
sizeof(basic_information), nullptr);
if (status)
return false;
return (basic_information.Attributes & SEC_IMAGE) != SEC_IMAGE;
}
// Returns a HANDLE on success and |nullptr| on failure.
// This function is similar to CreateFileMapping, but removes the permissions
// WRITE_DAC, WRITE_OWNER, READ_CONTROL, and DELETE.
//
// A newly created file mapping has two sets of permissions. It has access
// control permissions (WRITE_DAC, WRITE_OWNER, READ_CONTROL, and DELETE) and
// file permissions (FILE_MAP_READ, FILE_MAP_WRITE, etc.). The Chrome sandbox
// prevents HANDLEs with the WRITE_DAC permission from being duplicated into
// unprivileged processes.
//
// In order to remove the access control permissions, after being created the
// handle is duplicated with only the file access permissions.
HANDLE CreateFileMappingWithReducedPermissions(SECURITY_ATTRIBUTES* sa,
size_t rounded_size,
LPCWSTR name) {
HANDLE h = CreateFileMapping(INVALID_HANDLE_VALUE, sa, PAGE_READWRITE, 0,
static_cast<DWORD>(rounded_size), name);
if (!h) {
return nullptr;
}
HANDLE h2;
ProcessHandle process = GetCurrentProcess();
BOOL success = ::DuplicateHandle(
process, h, process, &h2, FILE_MAP_READ | FILE_MAP_WRITE | SECTION_QUERY,
FALSE, 0);
BOOL rv = ::CloseHandle(h);
DCHECK(rv);
if (!success) {
return nullptr;
}
return h2;
}
} // namespace
// static
PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Take(
win::ScopedHandle handle,
Mode mode,
size_t size,
const UnguessableToken& guid) {
if (!handle.IsValid())
return {};
if (size == 0)
return {};
if (size > static_cast<size_t>(std::numeric_limits<int>::max()))
return {};
if (!IsSectionSafeToMap(handle.Get()))
return {};
CHECK(
CheckPlatformHandlePermissionsCorrespondToMode(handle.Get(), mode, size));
return PlatformSharedMemoryRegion(std::move(handle), mode, size, guid);
}
HANDLE PlatformSharedMemoryRegion::GetPlatformHandle() const {
return handle_.Get();
}
bool PlatformSharedMemoryRegion::IsValid() const {
return handle_.IsValid();
}
PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Duplicate() const {
if (!IsValid())
return {};
CHECK_NE(mode_, Mode::kWritable)
<< "Duplicating a writable shared memory region is prohibited";
HANDLE duped_handle;
ProcessHandle process = GetCurrentProcess();
BOOL success =
::DuplicateHandle(process, handle_.Get(), process, &duped_handle, 0,
FALSE, DUPLICATE_SAME_ACCESS);
if (!success)
return {};
return PlatformSharedMemoryRegion(win::ScopedHandle(duped_handle), mode_,
size_, guid_);
}
bool PlatformSharedMemoryRegion::ConvertToReadOnly() {
if (!IsValid())
return false;
CHECK_EQ(mode_, Mode::kWritable)
<< "Only writable shared memory region can be converted to read-only";
win::ScopedHandle handle_copy(handle_.Take());
HANDLE duped_handle;
ProcessHandle process = GetCurrentProcess();
BOOL success =
::DuplicateHandle(process, handle_copy.Get(), process, &duped_handle,
FILE_MAP_READ | SECTION_QUERY, FALSE, 0);
if (!success)
return false;
handle_.Set(duped_handle);
mode_ = Mode::kReadOnly;
return true;
}
bool PlatformSharedMemoryRegion::ConvertToUnsafe() {
if (!IsValid())
return false;
CHECK_EQ(mode_, Mode::kWritable)
<< "Only writable shared memory region can be converted to unsafe";
mode_ = Mode::kUnsafe;
return true;
}
bool PlatformSharedMemoryRegion::MapAtInternal(off_t offset,
size_t size,
void** memory,
size_t* mapped_size) const {
bool write_allowed = mode_ != Mode::kReadOnly;
// Try to map the shared memory. On the first failure, release any reserved
// address space for a single entry.
for (int i = 0; i < 2; ++i) {
*memory = MapViewOfFile(
handle_.Get(), FILE_MAP_READ | (write_allowed ? FILE_MAP_WRITE : 0),
static_cast<uint64_t>(offset) >> 32, static_cast<DWORD>(offset), size);
if (*memory)
break;
ReleaseReservation();
}
if (!*memory) {
DPLOG(ERROR) << "Failed executing MapViewOfFile";
return false;
}
*mapped_size = GetMemorySectionSize(*memory);
return true;
}
// static
PlatformSharedMemoryRegion PlatformSharedMemoryRegion::Create(Mode mode,
size_t size) {
// TODO(crbug.com/210609): NaCl forces us to round up 64k here, wasting 32k
// per mapping on average.
static const size_t kSectionSize = 65536;
if (size == 0) {
return {};
}
// Aligning may overflow so check that the result doesn't decrease.
size_t rounded_size = bits::Align(size, kSectionSize);
if (rounded_size < size ||
rounded_size > static_cast<size_t>(std::numeric_limits<int>::max())) {
return {};
}
CHECK_NE(mode, Mode::kReadOnly) << "Creating a region in read-only mode will "
"lead to this region being non-modifiable";
// Add an empty DACL to enforce anonymous read-only sections.
ACL dacl;
SECURITY_DESCRIPTOR sd;
if (!InitializeAcl(&dacl, sizeof(dacl), ACL_REVISION)) {
return {};
}
if (!InitializeSecurityDescriptor(&sd, SECURITY_DESCRIPTOR_REVISION)) {
return {};
}
if (!SetSecurityDescriptorDacl(&sd, TRUE, &dacl, FALSE)) {
return {};
}
string16 name;
if (win::GetVersion() < win::Version::WIN8_1) {
// Windows < 8.1 ignores DACLs on certain unnamed objects (like shared
// sections). So, we generate a random name when we need to enforce
// read-only.
uint64_t rand_values[4];
RandBytes(&rand_values, sizeof(rand_values));
name = ASCIIToUTF16(StringPrintf("CrSharedMem_%016llx%016llx%016llx%016llx",
rand_values[0], rand_values[1],
rand_values[2], rand_values[3]));
DCHECK(!name.empty());
}
SECURITY_ATTRIBUTES sa = {sizeof(sa), &sd, FALSE};
// Ask for the file mapping with reduced permisions to avoid passing the
// access control permissions granted by default into unpriviledged process.
HANDLE h = CreateFileMappingWithReducedPermissions(
&sa, rounded_size, name.empty() ? nullptr : as_wcstr(name));
if (h == nullptr) {
// The error is logged within CreateFileMappingWithReducedPermissions().
return {};
}
win::ScopedHandle scoped_h(h);
// Check if the shared memory pre-exists.
if (GetLastError() == ERROR_ALREADY_EXISTS) {
return {};
}
return PlatformSharedMemoryRegion(std::move(scoped_h), mode, size,
UnguessableToken::Create());
}
// static
bool PlatformSharedMemoryRegion::CheckPlatformHandlePermissionsCorrespondToMode(
PlatformHandle handle,
Mode mode,
size_t size) {
// Call ::DuplicateHandle() with FILE_MAP_WRITE as a desired access to check
// if the |handle| has a write access.
ProcessHandle process = GetCurrentProcess();
HANDLE duped_handle;
BOOL success = ::DuplicateHandle(process, handle, process, &duped_handle,
FILE_MAP_WRITE, FALSE, 0);
if (success) {
BOOL rv = ::CloseHandle(duped_handle);
DCHECK(rv);
}
bool is_read_only = !success;
bool expected_read_only = mode == Mode::kReadOnly;
if (is_read_only != expected_read_only) {
DLOG(ERROR) << "File mapping handle has wrong access rights: it is"
<< (is_read_only ? " " : " not ") << "read-only but it should"
<< (expected_read_only ? " " : " not ") << "be";
return false;
}
return true;
}
PlatformSharedMemoryRegion::PlatformSharedMemoryRegion(
win::ScopedHandle handle,
Mode mode,
size_t size,
const UnguessableToken& guid)
: handle_(std::move(handle)), mode_(mode), size_(size), guid_(guid) {}
} // namespace subtle
} // namespace base

View file

@ -0,0 +1,23 @@
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_MEMORY_PTR_UTIL_H_
#define BASE_MEMORY_PTR_UTIL_H_
#include <memory>
#include <utility>
namespace base {
// Helper to transfer ownership of a raw pointer to a std::unique_ptr<T>.
// Note that std::unique_ptr<T> has very different semantics from
// std::unique_ptr<T[]>: do not use this helper for array allocations.
template <typename T>
std::unique_ptr<T> WrapUnique(T* ptr) {
return std::unique_ptr<T>(ptr);
}
} // namespace base
#endif // BASE_MEMORY_PTR_UTIL_H_

View file

@ -0,0 +1,48 @@
// Copyright (c) 2011 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_MEMORY_RAW_SCOPED_REFPTR_MISMATCH_CHECKER_H_
#define BASE_MEMORY_RAW_SCOPED_REFPTR_MISMATCH_CHECKER_H_
#include <type_traits>
#include "base/template_util.h"
// It is dangerous to post a task with a T* argument where T is a subtype of
// RefCounted(Base|ThreadSafeBase), since by the time the parameter is used, the
// object may already have been deleted since it was not held with a
// scoped_refptr. Example: http://crbug.com/27191
// The following set of traits are designed to generate a compile error
// whenever this antipattern is attempted.
namespace base {
// This is a base internal implementation file used by task.h and callback.h.
// Not for public consumption, so we wrap it in namespace internal.
namespace internal {
template <typename T, typename = void>
struct IsRefCountedType : std::false_type {};
template <typename T>
struct IsRefCountedType<T,
void_t<decltype(std::declval<T*>()->AddRef()),
decltype(std::declval<T*>()->Release())>>
: std::true_type {};
// Human readable translation: you needed to be a scoped_refptr if you are a raw
// pointer type and are convertible to a RefCounted(Base|ThreadSafeBase) type.
template <typename T>
struct NeedsScopedRefptrButGetsRawPtr
: conjunction<std::is_pointer<T>,
IsRefCountedType<std::remove_pointer_t<T>>> {
static_assert(!std::is_reference<T>::value,
"NeedsScopedRefptrButGetsRawPtr requires non-reference type.");
};
} // namespace internal
} // namespace base
#endif // BASE_MEMORY_RAW_SCOPED_REFPTR_MISMATCH_CHECKER_H_

View file

@ -0,0 +1,102 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/memory/read_only_shared_memory_region.h"
#include <utility>
#include "build/build_config.h"
namespace base {
ReadOnlySharedMemoryRegion::CreateFunction*
ReadOnlySharedMemoryRegion::create_hook_ = nullptr;
// static
MappedReadOnlyRegion ReadOnlySharedMemoryRegion::Create(size_t size) {
if (create_hook_)
return create_hook_(size);
subtle::PlatformSharedMemoryRegion handle =
subtle::PlatformSharedMemoryRegion::CreateWritable(size);
if (!handle.IsValid())
return {};
void* memory_ptr = nullptr;
size_t mapped_size = 0;
if (!handle.MapAt(0, handle.GetSize(), &memory_ptr, &mapped_size))
return {};
WritableSharedMemoryMapping mapping(memory_ptr, size, mapped_size,
handle.GetGUID());
#if defined(OS_MACOSX) && !defined(OS_IOS)
handle.ConvertToReadOnly(memory_ptr);
#else
handle.ConvertToReadOnly();
#endif // defined(OS_MACOSX) && !defined(OS_IOS)
ReadOnlySharedMemoryRegion region(std::move(handle));
if (!region.IsValid() || !mapping.IsValid())
return {};
return {std::move(region), std::move(mapping)};
}
// static
ReadOnlySharedMemoryRegion ReadOnlySharedMemoryRegion::Deserialize(
subtle::PlatformSharedMemoryRegion handle) {
return ReadOnlySharedMemoryRegion(std::move(handle));
}
// static
subtle::PlatformSharedMemoryRegion
ReadOnlySharedMemoryRegion::TakeHandleForSerialization(
ReadOnlySharedMemoryRegion region) {
return std::move(region.handle_);
}
ReadOnlySharedMemoryRegion::ReadOnlySharedMemoryRegion() = default;
ReadOnlySharedMemoryRegion::ReadOnlySharedMemoryRegion(
ReadOnlySharedMemoryRegion&& region) = default;
ReadOnlySharedMemoryRegion& ReadOnlySharedMemoryRegion::operator=(
ReadOnlySharedMemoryRegion&& region) = default;
ReadOnlySharedMemoryRegion::~ReadOnlySharedMemoryRegion() = default;
ReadOnlySharedMemoryRegion ReadOnlySharedMemoryRegion::Duplicate() const {
return ReadOnlySharedMemoryRegion(handle_.Duplicate());
}
ReadOnlySharedMemoryMapping ReadOnlySharedMemoryRegion::Map() const {
return MapAt(0, handle_.GetSize());
}
ReadOnlySharedMemoryMapping ReadOnlySharedMemoryRegion::MapAt(
off_t offset,
size_t size) const {
if (!IsValid())
return {};
void* memory = nullptr;
size_t mapped_size = 0;
if (!handle_.MapAt(offset, size, &memory, &mapped_size))
return {};
return ReadOnlySharedMemoryMapping(memory, size, mapped_size,
handle_.GetGUID());
}
bool ReadOnlySharedMemoryRegion::IsValid() const {
return handle_.IsValid();
}
ReadOnlySharedMemoryRegion::ReadOnlySharedMemoryRegion(
subtle::PlatformSharedMemoryRegion handle)
: handle_(std::move(handle)) {
if (handle_.IsValid()) {
CHECK_EQ(handle_.GetMode(),
subtle::PlatformSharedMemoryRegion::Mode::kReadOnly);
}
}
} // namespace base

View file

@ -0,0 +1,136 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_MEMORY_READ_ONLY_SHARED_MEMORY_REGION_H_
#define BASE_MEMORY_READ_ONLY_SHARED_MEMORY_REGION_H_
#include <utility>
#include "base/macros.h"
#include "base/memory/platform_shared_memory_region.h"
#include "base/memory/shared_memory_mapping.h"
namespace base {
struct MappedReadOnlyRegion;
// Scoped move-only handle to a region of platform shared memory. The instance
// owns the platform handle it wraps. Mappings created by this region are
// read-only. These mappings remain valid even after the region handle is moved
// or destroyed.
class BASE_EXPORT ReadOnlySharedMemoryRegion {
public:
using MappingType = ReadOnlySharedMemoryMapping;
// Creates a new ReadOnlySharedMemoryRegion instance of a given size along
// with the WritableSharedMemoryMapping which provides the only way to modify
// the content of the newly created region. The returned region and mapping
// are guaranteed to either be both valid or both invalid. Use
// |MappedReadOnlyRegion::IsValid()| as a shortcut for checking creation
// success.
//
// This means that the caller's process is the only process that can modify
// the region content. If you need to pass write access to another process,
// consider using WritableSharedMemoryRegion or UnsafeSharedMemoryRegion.
static MappedReadOnlyRegion Create(size_t size);
using CreateFunction = decltype(Create);
// Returns a ReadOnlySharedMemoryRegion built from a platform-specific handle
// that was taken from another ReadOnlySharedMemoryRegion instance. Returns an
// invalid region iff the |handle| is invalid. CHECK-fails if the |handle|
// isn't read-only.
// This should be used only by the code passing handles across process
// boundaries.
static ReadOnlySharedMemoryRegion Deserialize(
subtle::PlatformSharedMemoryRegion handle);
// Extracts a platform handle from the region. Ownership is transferred to the
// returned region object.
// This should be used only for sending the handle from the current process to
// another.
static subtle::PlatformSharedMemoryRegion TakeHandleForSerialization(
ReadOnlySharedMemoryRegion region);
// Default constructor initializes an invalid instance.
ReadOnlySharedMemoryRegion();
// Move operations are allowed.
ReadOnlySharedMemoryRegion(ReadOnlySharedMemoryRegion&&);
ReadOnlySharedMemoryRegion& operator=(ReadOnlySharedMemoryRegion&&);
// Destructor closes shared memory region if valid.
// All created mappings will remain valid.
~ReadOnlySharedMemoryRegion();
// Duplicates the underlying platform handle and creates a new
// ReadOnlySharedMemoryRegion instance that owns this handle. Returns a valid
// ReadOnlySharedMemoryRegion on success, invalid otherwise. The current
// region instance remains valid in any case.
ReadOnlySharedMemoryRegion Duplicate() const;
// Maps the shared memory region into the caller's address space with
// read-only access. The mapped address is guaranteed to have an alignment of
// at least |subtle::PlatformSharedMemoryRegion::kMapMinimumAlignment|.
// Returns a valid ReadOnlySharedMemoryMapping instance on success, invalid
// otherwise.
ReadOnlySharedMemoryMapping Map() const;
// Same as above, but maps only |size| bytes of the shared memory region
// starting with the given |offset|. |offset| must be aligned to value of
// |SysInfo::VMAllocationGranularity()|. Returns an invalid mapping if
// requested bytes are out of the region limits.
ReadOnlySharedMemoryMapping MapAt(off_t offset, size_t size) const;
// Whether the underlying platform handle is valid.
bool IsValid() const;
// Returns the maximum mapping size that can be created from this region.
size_t GetSize() const {
DCHECK(IsValid());
return handle_.GetSize();
}
// Returns 128-bit GUID of the region.
const UnguessableToken& GetGUID() const {
DCHECK(IsValid());
return handle_.GetGUID();
}
// Returns a platform shared memory handle. |this| remains the owner of the
// handle.
subtle::PlatformSharedMemoryRegion::PlatformHandle GetPlatformHandle() const {
DCHECK(IsValid());
return handle_.GetPlatformHandle();
}
private:
friend class SharedMemoryHooks;
explicit ReadOnlySharedMemoryRegion(
subtle::PlatformSharedMemoryRegion handle);
static void set_create_hook(CreateFunction* hook) { create_hook_ = hook; }
static CreateFunction* create_hook_;
subtle::PlatformSharedMemoryRegion handle_;
DISALLOW_COPY_AND_ASSIGN(ReadOnlySharedMemoryRegion);
};
// Helper struct for return value of ReadOnlySharedMemoryRegion::Create().
struct MappedReadOnlyRegion {
ReadOnlySharedMemoryRegion region;
WritableSharedMemoryMapping mapping;
// Helper function to check return value of
// ReadOnlySharedMemoryRegion::Create(). |region| and |mapping| either both
// valid or invalid.
bool IsValid() const {
DCHECK_EQ(region.IsValid(), mapping.IsValid());
return region.IsValid() && mapping.IsValid();
}
};
} // namespace base
#endif // BASE_MEMORY_READ_ONLY_SHARED_MEMORY_REGION_H_

View file

@ -0,0 +1,101 @@
// Copyright (c) 2011 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/memory/ref_counted.h"
#include <limits>
#include <type_traits>
#include "base/threading/thread_collision_warner.h"
namespace base {
namespace {
#if DCHECK_IS_ON()
std::atomic_int g_cross_thread_ref_count_access_allow_count(0);
#endif
} // namespace
namespace subtle {
bool RefCountedThreadSafeBase::HasOneRef() const {
return ref_count_.IsOne();
}
bool RefCountedThreadSafeBase::HasAtLeastOneRef() const {
return !ref_count_.IsZero();
}
#if DCHECK_IS_ON()
RefCountedThreadSafeBase::~RefCountedThreadSafeBase() {
DCHECK(in_dtor_) << "RefCountedThreadSafe object deleted without "
"calling Release()";
}
#endif
// For security and correctness, we check the arithmetic on ref counts.
//
// In an attempt to avoid binary bloat (from inlining the `CHECK`), we define
// these functions out-of-line. However, compilers are wily. Further testing may
// show that `NOINLINE` helps or hurts.
//
#if defined(ARCH_CPU_64_BITS)
void RefCountedBase::AddRefImpl() const {
// An attacker could induce use-after-free bugs, and potentially exploit them,
// by creating so many references to a ref-counted object that the reference
// count overflows. On 32-bit architectures, there is not enough address space
// to succeed. But on 64-bit architectures, it might indeed be possible.
// Therefore, we can elide the check for arithmetic overflow on 32-bit, but we
// must check on 64-bit.
//
// Make sure the addition didn't wrap back around to 0. This form of check
// works because we assert that `ref_count_` is an unsigned integer type.
CHECK(++ref_count_ != 0);
}
void RefCountedBase::ReleaseImpl() const {
// Make sure the subtraction didn't wrap back around from 0 to the max value.
// That could cause memory leaks, and may induce application-semantic
// correctness or safety bugs. (E.g. what if we really needed that object to
// be destroyed at the right time?)
//
// Note that unlike with overflow, underflow could also happen on 32-bit
// architectures. Arguably, we should do this check on32-bit machines too.
CHECK(--ref_count_ != std::numeric_limits<decltype(ref_count_)>::max());
}
#endif
#if !defined(ARCH_CPU_X86_FAMILY)
bool RefCountedThreadSafeBase::Release() const {
return ReleaseImpl();
}
void RefCountedThreadSafeBase::AddRef() const {
AddRefImpl();
}
void RefCountedThreadSafeBase::AddRefWithCheck() const {
AddRefWithCheckImpl();
}
#endif
#if DCHECK_IS_ON()
bool RefCountedBase::CalledOnValidSequence() const {
return sequence_checker_.CalledOnValidSequence() ||
g_cross_thread_ref_count_access_allow_count.load() != 0;
}
#endif
} // namespace subtle
#if DCHECK_IS_ON()
ScopedAllowCrossThreadRefCountAccess::ScopedAllowCrossThreadRefCountAccess() {
++g_cross_thread_ref_count_access_allow_count;
}
ScopedAllowCrossThreadRefCountAccess::~ScopedAllowCrossThreadRefCountAccess() {
--g_cross_thread_ref_count_access_allow_count;
}
#endif
} // namespace base

View file

@ -0,0 +1,463 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_MEMORY_REF_COUNTED_H_
#define BASE_MEMORY_REF_COUNTED_H_
#include <stddef.h>
#include <utility>
#include "base/atomic_ref_count.h"
#include "base/base_export.h"
#include "base/compiler_specific.h"
#include "base/gtest_prod_util.h"
#include "base/logging.h"
#include "base/macros.h"
#include "base/memory/scoped_refptr.h"
#include "base/sequence_checker.h"
#include "base/threading/thread_collision_warner.h"
#include "build/build_config.h"
namespace base {
namespace subtle {
class BASE_EXPORT RefCountedBase {
public:
bool HasOneRef() const { return ref_count_ == 1; }
bool HasAtLeastOneRef() const { return ref_count_ >= 1; }
protected:
explicit RefCountedBase(StartRefCountFromZeroTag) {
#if DCHECK_IS_ON()
sequence_checker_.DetachFromSequence();
#endif
}
explicit RefCountedBase(StartRefCountFromOneTag) : ref_count_(1) {
#if DCHECK_IS_ON()
needs_adopt_ref_ = true;
sequence_checker_.DetachFromSequence();
#endif
}
~RefCountedBase() {
#if DCHECK_IS_ON()
DCHECK(in_dtor_) << "RefCounted object deleted without calling Release()";
#endif
}
void AddRef() const {
// TODO(maruel): Add back once it doesn't assert 500 times/sec.
// Current thread books the critical section "AddRelease"
// without release it.
// DFAKE_SCOPED_LOCK_THREAD_LOCKED(add_release_);
#if DCHECK_IS_ON()
DCHECK(!in_dtor_);
DCHECK(!needs_adopt_ref_)
<< "This RefCounted object is created with non-zero reference count."
<< " The first reference to such a object has to be made by AdoptRef or"
<< " MakeRefCounted.";
if (ref_count_ >= 1) {
DCHECK(CalledOnValidSequence());
}
#endif
AddRefImpl();
}
// Returns true if the object should self-delete.
bool Release() const {
ReleaseImpl();
// TODO(maruel): Add back once it doesn't assert 500 times/sec.
// Current thread books the critical section "AddRelease"
// without release it.
// DFAKE_SCOPED_LOCK_THREAD_LOCKED(add_release_);
#if DCHECK_IS_ON()
DCHECK(!in_dtor_);
if (ref_count_ == 0)
in_dtor_ = true;
if (ref_count_ >= 1)
DCHECK(CalledOnValidSequence());
if (ref_count_ == 1)
sequence_checker_.DetachFromSequence();
#endif
return ref_count_ == 0;
}
// Returns true if it is safe to read or write the object, from a thread
// safety standpoint. Should be DCHECK'd from the methods of RefCounted
// classes if there is a danger of objects being shared across threads.
//
// This produces fewer false positives than adding a separate SequenceChecker
// into the subclass, because it automatically detaches from the sequence when
// the reference count is 1 (and never fails if there is only one reference).
//
// This means unlike a separate SequenceChecker, it will permit a singly
// referenced object to be passed between threads (not holding a reference on
// the sending thread), but will trap if the sending thread holds onto a
// reference, or if the object is accessed from multiple threads
// simultaneously.
bool IsOnValidSequence() const {
#if DCHECK_IS_ON()
return ref_count_ <= 1 || CalledOnValidSequence();
#else
return true;
#endif
}
private:
template <typename U>
friend scoped_refptr<U> base::AdoptRef(U*);
FRIEND_TEST_ALL_PREFIXES(RefCountedDeathTest, TestOverflowCheck);
void Adopted() const {
#if DCHECK_IS_ON()
DCHECK(needs_adopt_ref_);
needs_adopt_ref_ = false;
#endif
}
#if defined(ARCH_CPU_64_BITS)
void AddRefImpl() const;
void ReleaseImpl() const;
#else
void AddRefImpl() const { ++ref_count_; }
void ReleaseImpl() const { --ref_count_; }
#endif
#if DCHECK_IS_ON()
bool CalledOnValidSequence() const;
#endif
mutable uint32_t ref_count_ = 0;
static_assert(std::is_unsigned<decltype(ref_count_)>::value,
"ref_count_ must be an unsigned type.");
#if DCHECK_IS_ON()
mutable bool needs_adopt_ref_ = false;
mutable bool in_dtor_ = false;
mutable SequenceChecker sequence_checker_;
#endif
DFAKE_MUTEX(add_release_);
DISALLOW_COPY_AND_ASSIGN(RefCountedBase);
};
class BASE_EXPORT RefCountedThreadSafeBase {
public:
bool HasOneRef() const;
bool HasAtLeastOneRef() const;
protected:
explicit constexpr RefCountedThreadSafeBase(StartRefCountFromZeroTag) {}
explicit constexpr RefCountedThreadSafeBase(StartRefCountFromOneTag)
: ref_count_(1) {
#if DCHECK_IS_ON()
needs_adopt_ref_ = true;
#endif
}
#if DCHECK_IS_ON()
~RefCountedThreadSafeBase();
#else
~RefCountedThreadSafeBase() = default;
#endif
// Release and AddRef are suitable for inlining on X86 because they generate
// very small code sequences. On other platforms (ARM), it causes a size
// regression and is probably not worth it.
#if defined(ARCH_CPU_X86_FAMILY)
// Returns true if the object should self-delete.
bool Release() const { return ReleaseImpl(); }
void AddRef() const { AddRefImpl(); }
void AddRefWithCheck() const { AddRefWithCheckImpl(); }
#else
// Returns true if the object should self-delete.
bool Release() const;
void AddRef() const;
void AddRefWithCheck() const;
#endif
private:
template <typename U>
friend scoped_refptr<U> base::AdoptRef(U*);
void Adopted() const {
#if DCHECK_IS_ON()
DCHECK(needs_adopt_ref_);
needs_adopt_ref_ = false;
#endif
}
ALWAYS_INLINE void AddRefImpl() const {
#if DCHECK_IS_ON()
DCHECK(!in_dtor_);
DCHECK(!needs_adopt_ref_)
<< "This RefCounted object is created with non-zero reference count."
<< " The first reference to such a object has to be made by AdoptRef or"
<< " MakeRefCounted.";
#endif
ref_count_.Increment();
}
ALWAYS_INLINE void AddRefWithCheckImpl() const {
#if DCHECK_IS_ON()
DCHECK(!in_dtor_);
DCHECK(!needs_adopt_ref_)
<< "This RefCounted object is created with non-zero reference count."
<< " The first reference to such a object has to be made by AdoptRef or"
<< " MakeRefCounted.";
#endif
CHECK(ref_count_.Increment() > 0);
}
ALWAYS_INLINE bool ReleaseImpl() const {
#if DCHECK_IS_ON()
DCHECK(!in_dtor_);
DCHECK(!ref_count_.IsZero());
#endif
if (!ref_count_.Decrement()) {
#if DCHECK_IS_ON()
in_dtor_ = true;
#endif
return true;
}
return false;
}
mutable AtomicRefCount ref_count_{0};
#if DCHECK_IS_ON()
mutable bool needs_adopt_ref_ = false;
mutable bool in_dtor_ = false;
#endif
DISALLOW_COPY_AND_ASSIGN(RefCountedThreadSafeBase);
};
} // namespace subtle
// ScopedAllowCrossThreadRefCountAccess disables the check documented on
// RefCounted below for rare pre-existing use cases where thread-safety was
// guaranteed through other means (e.g. explicit sequencing of calls across
// execution sequences when bouncing between threads in order). New callers
// should refrain from using this (callsites handling thread-safety through
// locks should use RefCountedThreadSafe per the overhead of its atomics being
// negligible compared to locks anyways and callsites doing explicit sequencing
// should properly std::move() the ref to avoid hitting this check).
// TODO(tzik): Cleanup existing use cases and remove
// ScopedAllowCrossThreadRefCountAccess.
class BASE_EXPORT ScopedAllowCrossThreadRefCountAccess final {
public:
#if DCHECK_IS_ON()
ScopedAllowCrossThreadRefCountAccess();
~ScopedAllowCrossThreadRefCountAccess();
#else
ScopedAllowCrossThreadRefCountAccess() {}
~ScopedAllowCrossThreadRefCountAccess() {}
#endif
};
//
// A base class for reference counted classes. Otherwise, known as a cheap
// knock-off of WebKit's RefCounted<T> class. To use this, just extend your
// class from it like so:
//
// class MyFoo : public base::RefCounted<MyFoo> {
// ...
// private:
// friend class base::RefCounted<MyFoo>;
// ~MyFoo();
// };
//
// You should always make your destructor non-public, to avoid any code deleting
// the object accidently while there are references to it.
//
//
// The ref count manipulation to RefCounted is NOT thread safe and has DCHECKs
// to trap unsafe cross thread usage. A subclass instance of RefCounted can be
// passed to another execution sequence only when its ref count is 1. If the ref
// count is more than 1, the RefCounted class verifies the ref updates are made
// on the same execution sequence as the previous ones. The subclass can also
// manually call IsOnValidSequence to trap other non-thread-safe accesses; see
// the documentation for that method.
//
//
// The reference count starts from zero by default, and we intended to migrate
// to start-from-one ref count. Put REQUIRE_ADOPTION_FOR_REFCOUNTED_TYPE() to
// the ref counted class to opt-in.
//
// If an object has start-from-one ref count, the first scoped_refptr need to be
// created by base::AdoptRef() or base::MakeRefCounted(). We can use
// base::MakeRefCounted() to create create both type of ref counted object.
//
// The motivations to use start-from-one ref count are:
// - Start-from-one ref count doesn't need the ref count increment for the
// first reference.
// - It can detect an invalid object acquisition for a being-deleted object
// that has zero ref count. That tends to happen on custom deleter that
// delays the deletion.
// TODO(tzik): Implement invalid acquisition detection.
// - Behavior parity to Blink's WTF::RefCounted, whose count starts from one.
// And start-from-one ref count is a step to merge WTF::RefCounted into
// base::RefCounted.
//
#define REQUIRE_ADOPTION_FOR_REFCOUNTED_TYPE() \
static constexpr ::base::subtle::StartRefCountFromOneTag \
kRefCountPreference = ::base::subtle::kStartRefCountFromOneTag
template <class T, typename Traits>
class RefCounted;
template <typename T>
struct DefaultRefCountedTraits {
static void Destruct(const T* x) {
RefCounted<T, DefaultRefCountedTraits>::DeleteInternal(x);
}
};
template <class T, typename Traits = DefaultRefCountedTraits<T>>
class RefCounted : public subtle::RefCountedBase {
public:
static constexpr subtle::StartRefCountFromZeroTag kRefCountPreference =
subtle::kStartRefCountFromZeroTag;
RefCounted() : subtle::RefCountedBase(T::kRefCountPreference) {}
void AddRef() const {
subtle::RefCountedBase::AddRef();
}
void Release() const {
if (subtle::RefCountedBase::Release()) {
// Prune the code paths which the static analyzer may take to simulate
// object destruction. Use-after-free errors aren't possible given the
// lifetime guarantees of the refcounting system.
ANALYZER_SKIP_THIS_PATH();
Traits::Destruct(static_cast<const T*>(this));
}
}
protected:
~RefCounted() = default;
private:
friend struct DefaultRefCountedTraits<T>;
template <typename U>
static void DeleteInternal(const U* x) {
delete x;
}
DISALLOW_COPY_AND_ASSIGN(RefCounted);
};
// Forward declaration.
template <class T, typename Traits> class RefCountedThreadSafe;
// Default traits for RefCountedThreadSafe<T>. Deletes the object when its ref
// count reaches 0. Overload to delete it on a different thread etc.
template<typename T>
struct DefaultRefCountedThreadSafeTraits {
static void Destruct(const T* x) {
// Delete through RefCountedThreadSafe to make child classes only need to be
// friend with RefCountedThreadSafe instead of this struct, which is an
// implementation detail.
RefCountedThreadSafe<T,
DefaultRefCountedThreadSafeTraits>::DeleteInternal(x);
}
};
//
// A thread-safe variant of RefCounted<T>
//
// class MyFoo : public base::RefCountedThreadSafe<MyFoo> {
// ...
// };
//
// If you're using the default trait, then you should add compile time
// asserts that no one else is deleting your object. i.e.
// private:
// friend class base::RefCountedThreadSafe<MyFoo>;
// ~MyFoo();
//
// We can use REQUIRE_ADOPTION_FOR_REFCOUNTED_TYPE() with RefCountedThreadSafe
// too. See the comment above the RefCounted definition for details.
template <class T, typename Traits = DefaultRefCountedThreadSafeTraits<T> >
class RefCountedThreadSafe : public subtle::RefCountedThreadSafeBase {
public:
static constexpr subtle::StartRefCountFromZeroTag kRefCountPreference =
subtle::kStartRefCountFromZeroTag;
explicit RefCountedThreadSafe()
: subtle::RefCountedThreadSafeBase(T::kRefCountPreference) {}
void AddRef() const { AddRefImpl(T::kRefCountPreference); }
void Release() const {
if (subtle::RefCountedThreadSafeBase::Release()) {
ANALYZER_SKIP_THIS_PATH();
Traits::Destruct(static_cast<const T*>(this));
}
}
protected:
~RefCountedThreadSafe() = default;
private:
friend struct DefaultRefCountedThreadSafeTraits<T>;
template <typename U>
static void DeleteInternal(const U* x) {
delete x;
}
void AddRefImpl(subtle::StartRefCountFromZeroTag) const {
subtle::RefCountedThreadSafeBase::AddRef();
}
void AddRefImpl(subtle::StartRefCountFromOneTag) const {
subtle::RefCountedThreadSafeBase::AddRefWithCheck();
}
DISALLOW_COPY_AND_ASSIGN(RefCountedThreadSafe);
};
//
// A thread-safe wrapper for some piece of data so we can place other
// things in scoped_refptrs<>.
//
template<typename T>
class RefCountedData
: public base::RefCountedThreadSafe< base::RefCountedData<T> > {
public:
RefCountedData() : data() {}
RefCountedData(const T& in_value) : data(in_value) {}
RefCountedData(T&& in_value) : data(std::move(in_value)) {}
T data;
private:
friend class base::RefCountedThreadSafe<base::RefCountedData<T> >;
~RefCountedData() = default;
};
template <typename T>
bool operator==(const RefCountedData<T>& lhs, const RefCountedData<T>& rhs) {
return lhs.data == rhs.data;
}
template <typename T>
bool operator!=(const RefCountedData<T>& lhs, const RefCountedData<T>& rhs) {
return !(lhs == rhs);
}
} // namespace base
#endif // BASE_MEMORY_REF_COUNTED_H_

View file

@ -0,0 +1,90 @@
// Copyright 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_MEMORY_REF_COUNTED_DELETE_ON_SEQUENCE_H_
#define BASE_MEMORY_REF_COUNTED_DELETE_ON_SEQUENCE_H_
#include <utility>
#include "base/location.h"
#include "base/logging.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "base/sequenced_task_runner.h"
namespace base {
// RefCountedDeleteOnSequence is similar to RefCountedThreadSafe, and ensures
// that the object will be deleted on a specified sequence.
//
// Sample usage:
// class Foo : public RefCountedDeleteOnSequence<Foo> {
//
// Foo(scoped_refptr<SequencedTaskRunner> task_runner)
// : RefCountedDeleteOnSequence<Foo>(std::move(task_runner)) {}
// ...
// private:
// friend class RefCountedDeleteOnSequence<Foo>;
// friend class DeleteHelper<Foo>;
//
// ~Foo();
// };
template <class T>
class RefCountedDeleteOnSequence : public subtle::RefCountedThreadSafeBase {
public:
static constexpr subtle::StartRefCountFromZeroTag kRefCountPreference =
subtle::kStartRefCountFromZeroTag;
// A SequencedTaskRunner for the current sequence can be acquired by calling
// SequencedTaskRunnerHandle::Get().
RefCountedDeleteOnSequence(
scoped_refptr<SequencedTaskRunner> owning_task_runner)
: subtle::RefCountedThreadSafeBase(T::kRefCountPreference),
owning_task_runner_(std::move(owning_task_runner)) {
DCHECK(owning_task_runner_);
}
void AddRef() const { AddRefImpl(T::kRefCountPreference); }
void Release() const {
if (subtle::RefCountedThreadSafeBase::Release())
DestructOnSequence();
}
protected:
friend class DeleteHelper<RefCountedDeleteOnSequence>;
~RefCountedDeleteOnSequence() = default;
SequencedTaskRunner* owning_task_runner() {
return owning_task_runner_.get();
}
const SequencedTaskRunner* owning_task_runner() const {
return owning_task_runner_.get();
}
private:
void DestructOnSequence() const {
const T* t = static_cast<const T*>(this);
if (owning_task_runner_->RunsTasksInCurrentSequence())
delete t;
else
owning_task_runner_->DeleteSoon(FROM_HERE, t);
}
void AddRefImpl(subtle::StartRefCountFromZeroTag) const {
subtle::RefCountedThreadSafeBase::AddRef();
}
void AddRefImpl(subtle::StartRefCountFromOneTag) const {
subtle::RefCountedThreadSafeBase::AddRefWithCheck();
}
const scoped_refptr<SequencedTaskRunner> owning_task_runner_;
DISALLOW_COPY_AND_ASSIGN(RefCountedDeleteOnSequence);
};
} // namespace base
#endif // BASE_MEMORY_REF_COUNTED_DELETE_ON_SEQUENCE_H_

View file

@ -0,0 +1,112 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/memory/ref_counted_memory.h"
#include <utility>
#include "base/logging.h"
#include "base/memory/read_only_shared_memory_region.h"
namespace base {
bool RefCountedMemory::Equals(
const scoped_refptr<RefCountedMemory>& other) const {
return other.get() &&
size() == other->size() &&
(memcmp(front(), other->front(), size()) == 0);
}
RefCountedMemory::RefCountedMemory() = default;
RefCountedMemory::~RefCountedMemory() = default;
const unsigned char* RefCountedStaticMemory::front() const {
return data_;
}
size_t RefCountedStaticMemory::size() const {
return length_;
}
RefCountedStaticMemory::~RefCountedStaticMemory() = default;
RefCountedBytes::RefCountedBytes() = default;
RefCountedBytes::RefCountedBytes(const std::vector<unsigned char>& initializer)
: data_(initializer) {
}
RefCountedBytes::RefCountedBytes(const unsigned char* p, size_t size)
: data_(p, p + size) {}
RefCountedBytes::RefCountedBytes(size_t size) : data_(size, 0) {}
scoped_refptr<RefCountedBytes> RefCountedBytes::TakeVector(
std::vector<unsigned char>* to_destroy) {
auto bytes = MakeRefCounted<RefCountedBytes>();
bytes->data_.swap(*to_destroy);
return bytes;
}
const unsigned char* RefCountedBytes::front() const {
// STL will assert if we do front() on an empty vector, but calling code
// expects a NULL.
return size() ? &data_.front() : nullptr;
}
size_t RefCountedBytes::size() const {
return data_.size();
}
RefCountedBytes::~RefCountedBytes() = default;
RefCountedString::RefCountedString() = default;
RefCountedString::~RefCountedString() = default;
// static
scoped_refptr<RefCountedString> RefCountedString::TakeString(
std::string* to_destroy) {
auto self = MakeRefCounted<RefCountedString>();
to_destroy->swap(self->data_);
return self;
}
const unsigned char* RefCountedString::front() const {
return data_.empty() ? nullptr
: reinterpret_cast<const unsigned char*>(data_.data());
}
size_t RefCountedString::size() const {
return data_.size();
}
RefCountedSharedMemoryMapping::RefCountedSharedMemoryMapping(
ReadOnlySharedMemoryMapping mapping)
: mapping_(std::move(mapping)), size_(mapping_.size()) {
DCHECK_GT(size_, 0U);
}
RefCountedSharedMemoryMapping::~RefCountedSharedMemoryMapping() = default;
const unsigned char* RefCountedSharedMemoryMapping::front() const {
return static_cast<const unsigned char*>(mapping_.memory());
}
size_t RefCountedSharedMemoryMapping::size() const {
return size_;
}
// static
scoped_refptr<RefCountedSharedMemoryMapping>
RefCountedSharedMemoryMapping::CreateFromWholeRegion(
const ReadOnlySharedMemoryRegion& region) {
ReadOnlySharedMemoryMapping mapping = region.Map();
if (!mapping.IsValid())
return nullptr;
return MakeRefCounted<RefCountedSharedMemoryMapping>(std::move(mapping));
}
} // namespace base

View file

@ -0,0 +1,175 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_MEMORY_REF_COUNTED_MEMORY_H_
#define BASE_MEMORY_REF_COUNTED_MEMORY_H_
#include <stddef.h>
#include <memory>
#include <string>
#include <vector>
#include "base/base_export.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "base/memory/shared_memory_mapping.h"
namespace base {
class ReadOnlySharedMemoryRegion;
// A generic interface to memory. This object is reference counted because most
// of its subclasses own the data they carry, and this interface needs to
// support heterogeneous containers of these different types of memory.
class BASE_EXPORT RefCountedMemory
: public RefCountedThreadSafe<RefCountedMemory> {
public:
// Retrieves a pointer to the beginning of the data we point to. If the data
// is empty, this will return NULL.
virtual const unsigned char* front() const = 0;
// Size of the memory pointed to.
virtual size_t size() const = 0;
// Returns true if |other| is byte for byte equal.
bool Equals(const scoped_refptr<RefCountedMemory>& other) const;
// Handy method to simplify calling front() with a reinterpret_cast.
template<typename T> const T* front_as() const {
return reinterpret_cast<const T*>(front());
}
// Alias for front() to make it possible for RefCountedMemory to implicitly
// convert to span.
const unsigned char* data() { return front(); }
protected:
friend class RefCountedThreadSafe<RefCountedMemory>;
RefCountedMemory();
virtual ~RefCountedMemory();
};
// An implementation of RefCountedMemory, where the ref counting does not
// matter.
class BASE_EXPORT RefCountedStaticMemory : public RefCountedMemory {
public:
RefCountedStaticMemory() : data_(nullptr), length_(0) {}
RefCountedStaticMemory(const void* data, size_t length)
: data_(static_cast<const unsigned char*>(length ? data : nullptr)),
length_(length) {}
// RefCountedMemory:
const unsigned char* front() const override;
size_t size() const override;
private:
~RefCountedStaticMemory() override;
const unsigned char* data_;
size_t length_;
DISALLOW_COPY_AND_ASSIGN(RefCountedStaticMemory);
};
// An implementation of RefCountedMemory, where the data is stored in a STL
// vector.
class BASE_EXPORT RefCountedBytes : public RefCountedMemory {
public:
RefCountedBytes();
// Constructs a RefCountedBytes object by copying from |initializer|.
explicit RefCountedBytes(const std::vector<unsigned char>& initializer);
// Constructs a RefCountedBytes object by copying |size| bytes from |p|.
RefCountedBytes(const unsigned char* p, size_t size);
// Constructs a RefCountedBytes object by zero-initializing a new vector of
// |size| bytes.
explicit RefCountedBytes(size_t size);
// Constructs a RefCountedBytes object by performing a swap. (To non
// destructively build a RefCountedBytes, use the constructor that takes a
// vector.)
static scoped_refptr<RefCountedBytes> TakeVector(
std::vector<unsigned char>* to_destroy);
// RefCountedMemory:
const unsigned char* front() const override;
size_t size() const override;
const std::vector<unsigned char>& data() const { return data_; }
std::vector<unsigned char>& data() { return data_; }
// Non-const versions of front() and front_as() that are simply shorthand for
// data().data().
unsigned char* front() { return data_.data(); }
template <typename T>
T* front_as() {
return reinterpret_cast<T*>(front());
}
private:
~RefCountedBytes() override;
std::vector<unsigned char> data_;
DISALLOW_COPY_AND_ASSIGN(RefCountedBytes);
};
// An implementation of RefCountedMemory, where the bytes are stored in a STL
// string. Use this if your data naturally arrives in that format.
class BASE_EXPORT RefCountedString : public RefCountedMemory {
public:
RefCountedString();
// Constructs a RefCountedString object by performing a swap. (To non
// destructively build a RefCountedString, use the default constructor and
// copy into object->data()).
static scoped_refptr<RefCountedString> TakeString(std::string* to_destroy);
// RefCountedMemory:
const unsigned char* front() const override;
size_t size() const override;
const std::string& data() const { return data_; }
std::string& data() { return data_; }
private:
~RefCountedString() override;
std::string data_;
DISALLOW_COPY_AND_ASSIGN(RefCountedString);
};
// An implementation of RefCountedMemory, where the bytes are stored in
// ReadOnlySharedMemoryMapping.
class BASE_EXPORT RefCountedSharedMemoryMapping : public RefCountedMemory {
public:
// Constructs a RefCountedMemory object by taking ownership of an already
// mapped ReadOnlySharedMemoryMapping object.
explicit RefCountedSharedMemoryMapping(ReadOnlySharedMemoryMapping mapping);
// Convenience method to map all of |region| and take ownership of the
// mapping. Returns an empty scoped_refptr if the map operation fails.
static scoped_refptr<RefCountedSharedMemoryMapping> CreateFromWholeRegion(
const ReadOnlySharedMemoryRegion& region);
// RefCountedMemory:
const unsigned char* front() const override;
size_t size() const override;
private:
~RefCountedSharedMemoryMapping() override;
const ReadOnlySharedMemoryMapping mapping_;
const size_t size_;
DISALLOW_COPY_AND_ASSIGN(RefCountedSharedMemoryMapping);
};
} // namespace base
#endif // BASE_MEMORY_REF_COUNTED_MEMORY_H_

View file

@ -0,0 +1,25 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_MEMORY_SCOPED_POLICY_H_
#define BASE_MEMORY_SCOPED_POLICY_H_
namespace base {
namespace scoped_policy {
// Defines the ownership policy for a scoped object.
enum OwnershipPolicy {
// The scoped object takes ownership of an object by taking over an existing
// ownership claim.
ASSUME,
// The scoped object will retain the the object and any initial ownership is
// not changed.
RETAIN
};
} // namespace scoped_policy
} // namespace base
#endif // BASE_MEMORY_SCOPED_POLICY_H_

View file

@ -0,0 +1,375 @@
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_MEMORY_SCOPED_REFPTR_H_
#define BASE_MEMORY_SCOPED_REFPTR_H_
#include <stddef.h>
#include <iosfwd>
#include <type_traits>
#include <utility>
#include "base/compiler_specific.h"
#include "base/logging.h"
#include "base/macros.h"
template <class T>
class scoped_refptr;
namespace base {
template <class, typename>
class RefCounted;
template <class, typename>
class RefCountedThreadSafe;
class SequencedTaskRunner;
class WrappedPromise;
template <typename T>
scoped_refptr<T> AdoptRef(T* t);
namespace internal {
class BasePromise;
} // namespace internal
namespace subtle {
enum AdoptRefTag { kAdoptRefTag };
enum StartRefCountFromZeroTag { kStartRefCountFromZeroTag };
enum StartRefCountFromOneTag { kStartRefCountFromOneTag };
template <typename T, typename U, typename V>
constexpr bool IsRefCountPreferenceOverridden(const T*,
const RefCounted<U, V>*) {
return !std::is_same<std::decay_t<decltype(T::kRefCountPreference)>,
std::decay_t<decltype(U::kRefCountPreference)>>::value;
}
template <typename T, typename U, typename V>
constexpr bool IsRefCountPreferenceOverridden(
const T*,
const RefCountedThreadSafe<U, V>*) {
return !std::is_same<std::decay_t<decltype(T::kRefCountPreference)>,
std::decay_t<decltype(U::kRefCountPreference)>>::value;
}
constexpr bool IsRefCountPreferenceOverridden(...) {
return false;
}
} // namespace subtle
// Creates a scoped_refptr from a raw pointer without incrementing the reference
// count. Use this only for a newly created object whose reference count starts
// from 1 instead of 0.
template <typename T>
scoped_refptr<T> AdoptRef(T* obj) {
using Tag = std::decay_t<decltype(T::kRefCountPreference)>;
static_assert(std::is_same<subtle::StartRefCountFromOneTag, Tag>::value,
"Use AdoptRef only if the reference count starts from one.");
DCHECK(obj);
DCHECK(obj->HasOneRef());
obj->Adopted();
return scoped_refptr<T>(obj, subtle::kAdoptRefTag);
}
namespace subtle {
template <typename T>
scoped_refptr<T> AdoptRefIfNeeded(T* obj, StartRefCountFromZeroTag) {
return scoped_refptr<T>(obj);
}
template <typename T>
scoped_refptr<T> AdoptRefIfNeeded(T* obj, StartRefCountFromOneTag) {
return AdoptRef(obj);
}
} // namespace subtle
// Constructs an instance of T, which is a ref counted type, and wraps the
// object into a scoped_refptr<T>.
template <typename T, typename... Args>
scoped_refptr<T> MakeRefCounted(Args&&... args) {
T* obj = new T(std::forward<Args>(args)...);
return subtle::AdoptRefIfNeeded(obj, T::kRefCountPreference);
}
// Takes an instance of T, which is a ref counted type, and wraps the object
// into a scoped_refptr<T>.
template <typename T>
scoped_refptr<T> WrapRefCounted(T* t) {
return scoped_refptr<T>(t);
}
} // namespace base
//
// A smart pointer class for reference counted objects. Use this class instead
// of calling AddRef and Release manually on a reference counted object to
// avoid common memory leaks caused by forgetting to Release an object
// reference. Sample usage:
//
// class MyFoo : public RefCounted<MyFoo> {
// ...
// private:
// friend class RefCounted<MyFoo>; // Allow destruction by RefCounted<>.
// ~MyFoo(); // Destructor must be private/protected.
// };
//
// void some_function() {
// scoped_refptr<MyFoo> foo = MakeRefCounted<MyFoo>();
// foo->Method(param);
// // |foo| is released when this function returns
// }
//
// void some_other_function() {
// scoped_refptr<MyFoo> foo = MakeRefCounted<MyFoo>();
// ...
// foo.reset(); // explicitly releases |foo|
// ...
// if (foo)
// foo->Method(param);
// }
//
// The above examples show how scoped_refptr<T> acts like a pointer to T.
// Given two scoped_refptr<T> classes, it is also possible to exchange
// references between the two objects, like so:
//
// {
// scoped_refptr<MyFoo> a = MakeRefCounted<MyFoo>();
// scoped_refptr<MyFoo> b;
//
// b.swap(a);
// // now, |b| references the MyFoo object, and |a| references nullptr.
// }
//
// To make both |a| and |b| in the above example reference the same MyFoo
// object, simply use the assignment operator:
//
// {
// scoped_refptr<MyFoo> a = MakeRefCounted<MyFoo>();
// scoped_refptr<MyFoo> b;
//
// b = a;
// // now, |a| and |b| each own a reference to the same MyFoo object.
// }
//
// Also see Chromium's ownership and calling conventions:
// https://chromium.googlesource.com/chromium/src/+/lkgr/styleguide/c++/c++.md#object-ownership-and-calling-conventions
// Specifically:
// If the function (at least sometimes) takes a ref on a refcounted object,
// declare the param as scoped_refptr<T>. The caller can decide whether it
// wishes to transfer ownership (by calling std::move(t) when passing t) or
// retain its ref (by simply passing t directly).
// In other words, use scoped_refptr like you would a std::unique_ptr except
// in the odd case where it's required to hold on to a ref while handing one
// to another component (if a component merely needs to use t on the stack
// without keeping a ref: pass t as a raw T*).
template <class T>
class scoped_refptr {
public:
typedef T element_type;
constexpr scoped_refptr() = default;
// Allow implicit construction from nullptr.
constexpr scoped_refptr(std::nullptr_t) {}
// Constructs from a raw pointer. Note that this constructor allows implicit
// conversion from T* to scoped_refptr<T> which is strongly discouraged. If
// you are creating a new ref-counted object please use
// base::MakeRefCounted<T>() or base::WrapRefCounted<T>(). Otherwise you
// should move or copy construct from an existing scoped_refptr<T> to the
// ref-counted object.
scoped_refptr(T* p) : ptr_(p) {
if (ptr_)
AddRef(ptr_);
}
// Copy constructor. This is required in addition to the copy conversion
// constructor below.
scoped_refptr(const scoped_refptr& r) : scoped_refptr(r.ptr_) {}
// Copy conversion constructor.
template <typename U,
typename = typename std::enable_if<
std::is_convertible<U*, T*>::value>::type>
scoped_refptr(const scoped_refptr<U>& r) : scoped_refptr(r.ptr_) {}
// Move constructor. This is required in addition to the move conversion
// constructor below.
scoped_refptr(scoped_refptr&& r) noexcept : ptr_(r.ptr_) { r.ptr_ = nullptr; }
// Move conversion constructor.
template <typename U,
typename = typename std::enable_if<
std::is_convertible<U*, T*>::value>::type>
scoped_refptr(scoped_refptr<U>&& r) noexcept : ptr_(r.ptr_) {
r.ptr_ = nullptr;
}
~scoped_refptr() {
static_assert(!base::subtle::IsRefCountPreferenceOverridden(
static_cast<T*>(nullptr), static_cast<T*>(nullptr)),
"It's unsafe to override the ref count preference."
" Please remove REQUIRE_ADOPTION_FOR_REFCOUNTED_TYPE"
" from subclasses.");
if (ptr_)
Release(ptr_);
}
T* get() const { return ptr_; }
T& operator*() const {
DCHECK(ptr_);
return *ptr_;
}
T* operator->() const {
DCHECK(ptr_);
return ptr_;
}
scoped_refptr& operator=(std::nullptr_t) {
reset();
return *this;
}
scoped_refptr& operator=(T* p) { return *this = scoped_refptr(p); }
// Unified assignment operator.
scoped_refptr& operator=(scoped_refptr r) noexcept {
swap(r);
return *this;
}
// Sets managed object to null and releases reference to the previous managed
// object, if it existed.
void reset() { scoped_refptr().swap(*this); }
void swap(scoped_refptr& r) noexcept { std::swap(ptr_, r.ptr_); }
explicit operator bool() const { return ptr_ != nullptr; }
template <typename U>
bool operator==(const scoped_refptr<U>& rhs) const {
return ptr_ == rhs.get();
}
template <typename U>
bool operator!=(const scoped_refptr<U>& rhs) const {
return !operator==(rhs);
}
template <typename U>
bool operator<(const scoped_refptr<U>& rhs) const {
return ptr_ < rhs.get();
}
protected:
T* ptr_ = nullptr;
private:
template <typename U>
friend scoped_refptr<U> base::AdoptRef(U*);
friend class ::base::SequencedTaskRunner;
// Friend access so these classes can use the constructor below as part of a
// binary size optimization.
friend class ::base::internal::BasePromise;
friend class ::base::WrappedPromise;
// Returns the owned pointer (if any), releasing ownership to the caller. The
// caller is responsible for managing the lifetime of the reference.
T* release();
scoped_refptr(T* p, base::subtle::AdoptRefTag) : ptr_(p) {}
// Friend required for move constructors that set r.ptr_ to null.
template <typename U>
friend class scoped_refptr;
// Non-inline helpers to allow:
// class Opaque;
// extern template class scoped_refptr<Opaque>;
// Otherwise the compiler will complain that Opaque is an incomplete type.
static void AddRef(T* ptr);
static void Release(T* ptr);
};
template <typename T>
T* scoped_refptr<T>::release() {
T* ptr = ptr_;
ptr_ = nullptr;
return ptr;
}
// static
template <typename T>
void scoped_refptr<T>::AddRef(T* ptr) {
ptr->AddRef();
}
// static
template <typename T>
void scoped_refptr<T>::Release(T* ptr) {
ptr->Release();
}
template <typename T, typename U>
bool operator==(const scoped_refptr<T>& lhs, const U* rhs) {
return lhs.get() == rhs;
}
template <typename T, typename U>
bool operator==(const T* lhs, const scoped_refptr<U>& rhs) {
return lhs == rhs.get();
}
template <typename T>
bool operator==(const scoped_refptr<T>& lhs, std::nullptr_t null) {
return !static_cast<bool>(lhs);
}
template <typename T>
bool operator==(std::nullptr_t null, const scoped_refptr<T>& rhs) {
return !static_cast<bool>(rhs);
}
template <typename T, typename U>
bool operator!=(const scoped_refptr<T>& lhs, const U* rhs) {
return !operator==(lhs, rhs);
}
template <typename T, typename U>
bool operator!=(const T* lhs, const scoped_refptr<U>& rhs) {
return !operator==(lhs, rhs);
}
template <typename T>
bool operator!=(const scoped_refptr<T>& lhs, std::nullptr_t null) {
return !operator==(lhs, null);
}
template <typename T>
bool operator!=(std::nullptr_t null, const scoped_refptr<T>& rhs) {
return !operator==(null, rhs);
}
template <typename T>
std::ostream& operator<<(std::ostream& out, const scoped_refptr<T>& p) {
return out << p.get();
}
template <typename T>
void swap(scoped_refptr<T>& lhs, scoped_refptr<T>& rhs) noexcept {
lhs.swap(rhs);
}
#endif // BASE_MEMORY_SCOPED_REFPTR_H_

View file

@ -0,0 +1,52 @@
// Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_MEMORY_SHARED_MEMORY_HOOKS_H_
#define BASE_MEMORY_SHARED_MEMORY_HOOKS_H_
#include "base/memory/read_only_shared_memory_region.h"
#include "base/memory/unsafe_shared_memory_region.h"
#include "base/memory/writable_shared_memory_region.h"
// TODO(https://crbug.com/1062136): This can be removed when Cloud Print support
// is dropped.
namespace content {
struct MainFunctionParams;
} // namespace content
int CloudPrintServiceProcessMain(const content::MainFunctionParams& parameters);
namespace mojo {
class SharedMemoryUtils;
} // namespace mojo
namespace base {
class SharedMemoryHooks {
public:
SharedMemoryHooks() = delete;
private:
friend class SharedMemoryHooksTest;
friend int ::CloudPrintServiceProcessMain(
const content::MainFunctionParams& parameters);
friend mojo::SharedMemoryUtils;
// Allows shared memory region creation to be hooked. Useful for sandboxed
// processes that are restricted from invoking the platform APIs directly.
// Intentionally private so callers need to be explicitly friended.
static void SetCreateHooks(
ReadOnlySharedMemoryRegion::CreateFunction* read_only_hook,
UnsafeSharedMemoryRegion::CreateFunction* unsafe_hook,
WritableSharedMemoryRegion::CreateFunction* writable_hook) {
ReadOnlySharedMemoryRegion::set_create_hook(read_only_hook);
UnsafeSharedMemoryRegion::set_create_hook(unsafe_hook);
WritableSharedMemoryRegion::set_create_hook(writable_hook);
}
};
} // namespace base
#endif // BASE_MEMORY_SHARED_MEMORY_HOOKS_H_

View file

@ -0,0 +1,114 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/memory/shared_memory_mapping.h"
#include <utility>
#include "base/logging.h"
#include "base/memory/shared_memory_security_policy.h"
#include "base/memory/shared_memory_tracker.h"
#include "base/unguessable_token.h"
#include "build/build_config.h"
#if defined(OS_POSIX)
#include <sys/mman.h>
#endif
#if defined(OS_WIN)
#include <aclapi.h>
#endif
#if defined(OS_MACOSX) && !defined(OS_IOS)
#include <mach/mach_vm.h>
#include "base/mac/mach_logging.h"
#endif
#if defined(OS_FUCHSIA)
#include <lib/zx/vmar.h>
#include "base/fuchsia/fuchsia_logging.h"
#endif
namespace base {
SharedMemoryMapping::SharedMemoryMapping() = default;
SharedMemoryMapping::SharedMemoryMapping(SharedMemoryMapping&& mapping) noexcept
: memory_(std::exchange(mapping.memory_, nullptr)),
size_(mapping.size_),
mapped_size_(mapping.mapped_size_),
guid_(mapping.guid_) {}
SharedMemoryMapping& SharedMemoryMapping::operator=(
SharedMemoryMapping&& mapping) noexcept {
Unmap();
memory_ = std::exchange(mapping.memory_, nullptr);
size_ = mapping.size_;
mapped_size_ = mapping.mapped_size_;
guid_ = mapping.guid_;
return *this;
}
SharedMemoryMapping::~SharedMemoryMapping() {
Unmap();
}
SharedMemoryMapping::SharedMemoryMapping(void* memory,
size_t size,
size_t mapped_size,
const UnguessableToken& guid)
: memory_(memory), size_(size), mapped_size_(mapped_size), guid_(guid) {
SharedMemoryTracker::GetInstance()->IncrementMemoryUsage(*this);
}
void SharedMemoryMapping::Unmap() {
if (!IsValid())
return;
SharedMemorySecurityPolicy::ReleaseReservationForMapping(size_);
SharedMemoryTracker::GetInstance()->DecrementMemoryUsage(*this);
#if defined(OS_WIN)
if (!UnmapViewOfFile(memory_))
DPLOG(ERROR) << "UnmapViewOfFile";
#elif defined(OS_FUCHSIA)
uintptr_t addr = reinterpret_cast<uintptr_t>(memory_);
zx_status_t status = zx::vmar::root_self()->unmap(addr, mapped_size_);
if (status != ZX_OK)
ZX_DLOG(ERROR, status) << "zx_vmar_unmap";
#elif defined(OS_MACOSX) && !defined(OS_IOS)
kern_return_t kr = mach_vm_deallocate(
mach_task_self(), reinterpret_cast<mach_vm_address_t>(memory_),
mapped_size_);
MACH_DLOG_IF(ERROR, kr != KERN_SUCCESS, kr) << "mach_vm_deallocate";
#else
if (munmap(memory_, mapped_size_) < 0)
DPLOG(ERROR) << "munmap";
#endif
}
ReadOnlySharedMemoryMapping::ReadOnlySharedMemoryMapping() = default;
ReadOnlySharedMemoryMapping::ReadOnlySharedMemoryMapping(
ReadOnlySharedMemoryMapping&&) noexcept = default;
ReadOnlySharedMemoryMapping& ReadOnlySharedMemoryMapping::operator=(
ReadOnlySharedMemoryMapping&&) noexcept = default;
ReadOnlySharedMemoryMapping::ReadOnlySharedMemoryMapping(
void* address,
size_t size,
size_t mapped_size,
const UnguessableToken& guid)
: SharedMemoryMapping(address, size, mapped_size, guid) {}
WritableSharedMemoryMapping::WritableSharedMemoryMapping() = default;
WritableSharedMemoryMapping::WritableSharedMemoryMapping(
WritableSharedMemoryMapping&&) noexcept = default;
WritableSharedMemoryMapping& WritableSharedMemoryMapping::operator=(
WritableSharedMemoryMapping&&) noexcept = default;
WritableSharedMemoryMapping::WritableSharedMemoryMapping(
void* address,
size_t size,
size_t mapped_size,
const UnguessableToken& guid)
: SharedMemoryMapping(address, size, mapped_size, guid) {}
} // namespace base

View file

@ -0,0 +1,252 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_MEMORY_SHARED_MEMORY_MAPPING_H_
#define BASE_MEMORY_SHARED_MEMORY_MAPPING_H_
#include <cstddef>
#include <type_traits>
#include "base/containers/buffer_iterator.h"
#include "base/containers/span.h"
#include "base/macros.h"
#include "base/unguessable_token.h"
namespace base {
namespace subtle {
class PlatformSharedMemoryRegion;
} // namespace subtle
// Base class for scoped handles to a shared memory mapping created from a
// shared memory region. Created shared memory mappings remain valid even if the
// creator region is transferred or destroyed.
//
// Each mapping has an UnguessableToken that identifies the shared memory region
// it was created from. This is used for memory metrics, to avoid overcounting
// shared memory.
class BASE_EXPORT SharedMemoryMapping {
public:
// Default constructor initializes an invalid instance.
SharedMemoryMapping();
// Move operations are allowed.
SharedMemoryMapping(SharedMemoryMapping&& mapping) noexcept;
SharedMemoryMapping& operator=(SharedMemoryMapping&& mapping) noexcept;
// Unmaps the region if the mapping is valid.
virtual ~SharedMemoryMapping();
// Returns true iff the mapping is valid. False means there is no
// corresponding area of memory.
bool IsValid() const { return memory_ != nullptr; }
// Returns the logical size of the mapping in bytes. This is precisely the
// size requested by whoever created the mapping, and it is always less than
// or equal to |mapped_size()|. This is undefined for invalid instances.
size_t size() const {
DCHECK(IsValid());
return size_;
}
// Returns the actual size of the mapping in bytes. This is always at least
// as large as |size()| but may be larger due to platform mapping alignment
// constraints. This is undefined for invalid instances.
size_t mapped_size() const {
DCHECK(IsValid());
return mapped_size_;
}
// Returns 128-bit GUID of the region this mapping belongs to.
const UnguessableToken& guid() const {
DCHECK(IsValid());
return guid_;
}
protected:
SharedMemoryMapping(void* address,
size_t size,
size_t mapped_size,
const UnguessableToken& guid);
void* raw_memory_ptr() const { return memory_; }
private:
friend class SharedMemoryTracker;
void Unmap();
void* memory_ = nullptr;
size_t size_ = 0;
size_t mapped_size_ = 0;
UnguessableToken guid_;
DISALLOW_COPY_AND_ASSIGN(SharedMemoryMapping);
};
// Class modeling a read-only mapping of a shared memory region into the
// current process' address space. This is created by ReadOnlySharedMemoryRegion
// instances.
class BASE_EXPORT ReadOnlySharedMemoryMapping : public SharedMemoryMapping {
public:
// Default constructor initializes an invalid instance.
ReadOnlySharedMemoryMapping();
// Move operations are allowed.
ReadOnlySharedMemoryMapping(ReadOnlySharedMemoryMapping&&) noexcept;
ReadOnlySharedMemoryMapping& operator=(
ReadOnlySharedMemoryMapping&&) noexcept;
// Returns the base address of the mapping. This is read-only memory. This is
// page-aligned. This is nullptr for invalid instances.
const void* memory() const { return raw_memory_ptr(); }
// Returns a pointer to a page-aligned const T if the mapping is valid and
// large enough to contain a T, or nullptr otherwise.
template <typename T>
const T* GetMemoryAs() const {
static_assert(std::is_trivially_copyable<T>::value,
"Copying non-trivially-copyable object across memory spaces "
"is dangerous");
if (!IsValid())
return nullptr;
if (sizeof(T) > size())
return nullptr;
return static_cast<const T*>(raw_memory_ptr());
}
// Returns a span of const T. The number of elements is autodeduced from the
// size of the shared memory mapping. The number of elements may be
// autodeduced as zero, i.e. the mapping is invalid or the size of the mapping
// isn't large enough to contain even one T: in that case, an empty span
// will be returned. The first element, if any, is guaranteed to be
// page-aligned.
template <typename T>
span<const T> GetMemoryAsSpan() const {
static_assert(std::is_trivially_copyable<T>::value,
"Copying non-trivially-copyable object across memory spaces "
"is dangerous");
if (!IsValid())
return span<const T>();
size_t count = size() / sizeof(T);
return GetMemoryAsSpan<T>(count);
}
// Returns a span of const T with |count| elements if the mapping is valid and
// large enough to contain |count| elements, or an empty span otherwise. The
// first element, if any, is guaranteed to be page-aligned.
template <typename T>
span<const T> GetMemoryAsSpan(size_t count) const {
static_assert(std::is_trivially_copyable<T>::value,
"Copying non-trivially-copyable object across memory spaces "
"is dangerous");
if (!IsValid())
return span<const T>();
if (size() / sizeof(T) < count)
return span<const T>();
return span<const T>(static_cast<const T*>(raw_memory_ptr()), count);
}
// Returns a BufferIterator of const T.
template <typename T>
BufferIterator<const T> GetMemoryAsBufferIterator() const {
return BufferIterator<const T>(GetMemoryAsSpan<T>());
}
private:
friend class ReadOnlySharedMemoryRegion;
ReadOnlySharedMemoryMapping(void* address,
size_t size,
size_t mapped_size,
const UnguessableToken& guid);
DISALLOW_COPY_AND_ASSIGN(ReadOnlySharedMemoryMapping);
};
// Class modeling a writable mapping of a shared memory region into the
// current process' address space. This is created by *SharedMemoryRegion
// instances.
class BASE_EXPORT WritableSharedMemoryMapping : public SharedMemoryMapping {
public:
// Default constructor initializes an invalid instance.
WritableSharedMemoryMapping();
// Move operations are allowed.
WritableSharedMemoryMapping(WritableSharedMemoryMapping&&) noexcept;
WritableSharedMemoryMapping& operator=(
WritableSharedMemoryMapping&&) noexcept;
// Returns the base address of the mapping. This is writable memory. This is
// page-aligned. This is nullptr for invalid instances.
void* memory() const { return raw_memory_ptr(); }
// Returns a pointer to a page-aligned T if the mapping is valid and large
// enough to contain a T, or nullptr otherwise.
template <typename T>
T* GetMemoryAs() const {
static_assert(std::is_trivially_copyable<T>::value,
"Copying non-trivially-copyable object across memory spaces "
"is dangerous");
if (!IsValid())
return nullptr;
if (sizeof(T) > size())
return nullptr;
return static_cast<T*>(raw_memory_ptr());
}
// Returns a span of T. The number of elements is autodeduced from the size of
// the shared memory mapping. The number of elements may be autodeduced as
// zero, i.e. the mapping is invalid or the size of the mapping isn't large
// enough to contain even one T: in that case, an empty span will be returned.
// The first element, if any, is guaranteed to be page-aligned.
template <typename T>
span<T> GetMemoryAsSpan() const {
static_assert(std::is_trivially_copyable<T>::value,
"Copying non-trivially-copyable object across memory spaces "
"is dangerous");
if (!IsValid())
return span<T>();
size_t count = size() / sizeof(T);
return GetMemoryAsSpan<T>(count);
}
// Returns a span of T with |count| elements if the mapping is valid and large
// enough to contain |count| elements, or an empty span otherwise. The first
// element, if any, is guaranteed to be page-aligned.
template <typename T>
span<T> GetMemoryAsSpan(size_t count) const {
static_assert(std::is_trivially_copyable<T>::value,
"Copying non-trivially-copyable object across memory spaces "
"is dangerous");
if (!IsValid())
return span<T>();
if (size() / sizeof(T) < count)
return span<T>();
return span<T>(static_cast<T*>(raw_memory_ptr()), count);
}
// Returns a BufferIterator of T.
template <typename T>
BufferIterator<T> GetMemoryAsBufferIterator() {
return BufferIterator<T>(GetMemoryAsSpan<T>());
}
private:
friend WritableSharedMemoryMapping MapAtForTesting(
subtle::PlatformSharedMemoryRegion* region,
off_t offset,
size_t size);
friend class ReadOnlySharedMemoryRegion;
friend class WritableSharedMemoryRegion;
friend class UnsafeSharedMemoryRegion;
WritableSharedMemoryMapping(void* address,
size_t size,
size_t mapped_size,
const UnguessableToken& guid);
DISALLOW_COPY_AND_ASSIGN(WritableSharedMemoryMapping);
};
} // namespace base
#endif // BASE_MEMORY_SHARED_MEMORY_MAPPING_H_

View file

@ -0,0 +1,92 @@
// Copyright 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/memory/shared_memory_security_policy.h"
#include <algorithm>
#include <atomic>
#include "base/bits.h"
#include "base/numerics/checked_math.h"
#include "base/optional.h"
#include "base/process/process_metrics.h"
#include "build/build_config.h"
namespace base {
namespace {
// Note: pointers are 32 bits on all architectures in NaCl. See
// https://bugs.chromium.org/p/nativeclient/issues/detail?id=1162
#if defined(ARCH_CPU_32_BITS) || defined(OS_NACL)
// No effective limit on 32-bit, since there simply isn't enough address space
// for ASLR to be particularly effective.
constexpr size_t kTotalMappedSizeLimit = -1;
#elif defined(ARCH_CPU_64_BITS)
// 32 GB of mappings ought to be enough for anybody.
constexpr size_t kTotalMappedSizeLimit = 32ULL * 1024 * 1024 * 1024;
#endif
static std::atomic_size_t total_mapped_size_;
base::Optional<size_t> AlignWithPageSize(size_t size) {
#if defined(OS_WIN)
// TODO(crbug.com/210609): Matches alignment requirements defined in
// platform_shared_memory_region_win.cc:PlatformSharedMemoryRegion::Create.
// Remove this when NaCl is gone.
static const size_t kSectionSize = 65536;
const size_t page_size = std::max(kSectionSize, GetPageSize());
#else
const size_t page_size = GetPageSize();
#endif // defined(OS_WIN)
size_t rounded_size = bits::Align(size, page_size);
// Fail on overflow.
if (rounded_size < size)
return base::nullopt;
return rounded_size;
}
} // namespace
// static
bool SharedMemorySecurityPolicy::AcquireReservationForMapping(size_t size) {
size_t previous_mapped_size =
total_mapped_size_.load(std::memory_order_relaxed);
size_t total_mapped_size;
base::Optional<size_t> page_aligned_size = AlignWithPageSize(size);
if (!page_aligned_size)
return false;
// Relaxed memory ordering is all that's needed since all atomicity is all
// that's required. If the value is stale, compare_exchange_weak() will fail
// and the loop will retry the operation with an updated total mapped size.
do {
if (!CheckAdd(previous_mapped_size, *page_aligned_size)
.AssignIfValid(&total_mapped_size)) {
return false;
}
if (total_mapped_size >= kTotalMappedSizeLimit)
return false;
} while (!total_mapped_size_.compare_exchange_weak(
previous_mapped_size, total_mapped_size, std::memory_order_relaxed,
std::memory_order_relaxed));
return true;
}
// static
void SharedMemorySecurityPolicy::ReleaseReservationForMapping(size_t size) {
// Note #1: relaxed memory ordering is sufficient since atomicity is all
// that's required.
// Note #2: |size| should never overflow when aligned to page size, since
// this should only be called if |AcquireReservationForMapping()| returned
// true.
total_mapped_size_.fetch_sub(size, std::memory_order_relaxed);
}
} // namespace base

View file

@ -0,0 +1,37 @@
// Copyright 2020 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_MEMORY_SHARED_MEMORY_SECURITY_POLICY_H_
#define BASE_MEMORY_SHARED_MEMORY_SECURITY_POLICY_H_
#include <stddef.h>
#include "base/compiler_specific.h"
namespace base {
namespace subtle {
class PlatformSharedMemoryRegion;
} // namespace subtle
// Helper to enforce a limit for the total amount of shared memory that can be
// mapped. This can help prevent an attacker from spraying the address space of
// a process with shared memory mappings to bypass ASLR. For more details, see
// https://googleprojectzero.blogspot.com/2019/04/virtually-unlimited-memory-escaping.html
class SharedMemorySecurityPolicy {
private:
friend class subtle::PlatformSharedMemoryRegion;
friend class SharedMemoryMapping;
// Checks that a mapping with |size| can be created. Returns false if there is
// an overflow in internal calculations, or the max limit has been reached.
static bool AcquireReservationForMapping(size_t size) WARN_UNUSED_RESULT;
// Releases a reservation that was previously acquired.
static void ReleaseReservationForMapping(size_t size);
};
} // namespace base
#endif // BASE_MEMORY_SHARED_MEMORY_SECURITY_POLICY_H_

View file

@ -0,0 +1,121 @@
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/memory/shared_memory_tracker.h"
#include "base/strings/string_number_conversions.h"
#include "base/trace_event/memory_allocator_dump_guid.h"
#include "base/trace_event/memory_dump_manager.h"
#include "base/trace_event/process_memory_dump.h"
namespace base {
const char SharedMemoryTracker::kDumpRootName[] = "shared_memory";
// static
SharedMemoryTracker* SharedMemoryTracker::GetInstance() {
static SharedMemoryTracker* instance = new SharedMemoryTracker;
return instance;
}
// static
std::string SharedMemoryTracker::GetDumpNameForTracing(
const UnguessableToken& id) {
DCHECK(!id.is_empty());
return std::string(kDumpRootName) + "/" + id.ToString();
}
// static
trace_event::MemoryAllocatorDumpGuid
SharedMemoryTracker::GetGlobalDumpIdForTracing(const UnguessableToken& id) {
std::string dump_name = GetDumpNameForTracing(id);
return trace_event::MemoryAllocatorDumpGuid(dump_name);
}
const trace_event::MemoryAllocatorDump*
SharedMemoryTracker::GetOrCreateSharedMemoryDump(
const SharedMemoryMapping& shared_memory,
trace_event::ProcessMemoryDump* pmd) {
return GetOrCreateSharedMemoryDumpInternal(shared_memory.raw_memory_ptr(),
shared_memory.mapped_size(),
shared_memory.guid(), pmd);
}
void SharedMemoryTracker::IncrementMemoryUsage(
const SharedMemoryMapping& mapping) {
AutoLock hold(usages_lock_);
DCHECK(usages_.find(mapping.raw_memory_ptr()) == usages_.end());
usages_.emplace(mapping.raw_memory_ptr(),
UsageInfo(mapping.mapped_size(), mapping.guid()));
}
void SharedMemoryTracker::DecrementMemoryUsage(
const SharedMemoryMapping& mapping) {
AutoLock hold(usages_lock_);
DCHECK(usages_.find(mapping.raw_memory_ptr()) != usages_.end());
usages_.erase(mapping.raw_memory_ptr());
}
SharedMemoryTracker::SharedMemoryTracker() {
trace_event::MemoryDumpManager::GetInstance()->RegisterDumpProvider(
this, "SharedMemoryTracker", nullptr);
}
SharedMemoryTracker::~SharedMemoryTracker() = default;
bool SharedMemoryTracker::OnMemoryDump(const trace_event::MemoryDumpArgs& args,
trace_event::ProcessMemoryDump* pmd) {
AutoLock hold(usages_lock_);
for (const auto& usage : usages_) {
const trace_event::MemoryAllocatorDump* dump =
GetOrCreateSharedMemoryDumpInternal(
usage.first, usage.second.mapped_size, usage.second.mapped_id, pmd);
DCHECK(dump);
}
return true;
}
// static
const trace_event::MemoryAllocatorDump*
SharedMemoryTracker::GetOrCreateSharedMemoryDumpInternal(
void* mapped_memory,
size_t mapped_size,
const UnguessableToken& mapped_id,
trace_event::ProcessMemoryDump* pmd) {
const std::string dump_name = GetDumpNameForTracing(mapped_id);
trace_event::MemoryAllocatorDump* local_dump =
pmd->GetAllocatorDump(dump_name);
if (local_dump)
return local_dump;
size_t virtual_size = mapped_size;
// If resident size is not available, a virtual size is used as fallback.
size_t size = virtual_size;
#if defined(COUNT_RESIDENT_BYTES_SUPPORTED)
base::Optional<size_t> resident_size =
trace_event::ProcessMemoryDump::CountResidentBytesInSharedMemory(
mapped_memory, mapped_size);
if (resident_size.has_value())
size = resident_size.value();
#endif
local_dump = pmd->CreateAllocatorDump(dump_name);
local_dump->AddScalar(trace_event::MemoryAllocatorDump::kNameSize,
trace_event::MemoryAllocatorDump::kUnitsBytes, size);
local_dump->AddScalar("virtual_size",
trace_event::MemoryAllocatorDump::kUnitsBytes,
virtual_size);
auto global_dump_guid = GetGlobalDumpIdForTracing(mapped_id);
trace_event::MemoryAllocatorDump* global_dump =
pmd->CreateSharedGlobalAllocatorDump(global_dump_guid);
global_dump->AddScalar(trace_event::MemoryAllocatorDump::kNameSize,
trace_event::MemoryAllocatorDump::kUnitsBytes, size);
// The edges will be overriden by the clients with correct importance.
pmd->AddOverridableOwnershipEdge(local_dump->guid(), global_dump->guid(),
0 /* importance */);
return local_dump;
}
} // namespace

View file

@ -0,0 +1,82 @@
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_MEMORY_SHARED_MEMORY_TRACKER_H_
#define BASE_MEMORY_SHARED_MEMORY_TRACKER_H_
#include <map>
#include <string>
#include "base/memory/shared_memory_mapping.h"
#include "base/synchronization/lock.h"
#include "base/trace_event/memory_dump_provider.h"
namespace base {
namespace trace_event {
class MemoryAllocatorDump;
class MemoryAllocatorDumpGuid;
class ProcessMemoryDump;
}
// SharedMemoryTracker tracks shared memory usage.
class BASE_EXPORT SharedMemoryTracker : public trace_event::MemoryDumpProvider {
public:
// Returns a singleton instance.
static SharedMemoryTracker* GetInstance();
static std::string GetDumpNameForTracing(const UnguessableToken& id);
static trace_event::MemoryAllocatorDumpGuid GetGlobalDumpIdForTracing(
const UnguessableToken& id);
// Gets or creates if non-existant, a memory dump for the |shared_memory|
// inside the given |pmd|. Also adds the necessary edges for the dump when
// creating the dump.
static const trace_event::MemoryAllocatorDump* GetOrCreateSharedMemoryDump(
const SharedMemoryMapping& shared_memory,
trace_event::ProcessMemoryDump* pmd);
// Records shared memory usage on valid mapping.
void IncrementMemoryUsage(const SharedMemoryMapping& mapping);
// Records shared memory usage on unmapping.
void DecrementMemoryUsage(const SharedMemoryMapping& mapping);
// Root dump name for all shared memory dumps.
static const char kDumpRootName[];
private:
SharedMemoryTracker();
~SharedMemoryTracker() override;
// trace_event::MemoryDumpProvider implementation.
bool OnMemoryDump(const trace_event::MemoryDumpArgs& args,
trace_event::ProcessMemoryDump* pmd) override;
static const trace_event::MemoryAllocatorDump*
GetOrCreateSharedMemoryDumpInternal(void* mapped_memory,
size_t mapped_size,
const UnguessableToken& mapped_id,
trace_event::ProcessMemoryDump* pmd);
// Information associated with each mapped address.
struct UsageInfo {
UsageInfo(size_t size, const UnguessableToken& id)
: mapped_size(size), mapped_id(id) {}
size_t mapped_size;
UnguessableToken mapped_id;
};
// Used to lock when |usages_| is modified or read.
Lock usages_lock_;
std::map<void*, UsageInfo> usages_;
DISALLOW_COPY_AND_ASSIGN(SharedMemoryTracker);
};
} // namespace base
#endif // BASE_MEMORY_SHARED_MEMORY_TRACKER_H_

View file

@ -0,0 +1,279 @@
// Copyright (c) 2011 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
// PLEASE READ: Do you really need a singleton? If possible, use a
// function-local static of type base::NoDestructor<T> instead:
//
// Factory& Factory::GetInstance() {
// static base::NoDestructor<Factory> instance;
// return *instance;
// }
// !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
//
// Singletons make it hard to determine the lifetime of an object, which can
// lead to buggy code and spurious crashes.
//
// Instead of adding another singleton into the mix, try to identify either:
// a) An existing singleton that can manage your object's lifetime
// b) Locations where you can deterministically create the object and pass
// into other objects
//
// If you absolutely need a singleton, please keep them as trivial as possible
// and ideally a leaf dependency. Singletons get problematic when they attempt
// to do too much in their destructor or have circular dependencies.
#ifndef BASE_MEMORY_SINGLETON_H_
#define BASE_MEMORY_SINGLETON_H_
#include "base/at_exit.h"
#include "base/atomicops.h"
#include "base/base_export.h"
#include "base/lazy_instance_helpers.h"
#include "base/logging.h"
#include "base/macros.h"
#include "base/threading/thread_restrictions.h"
namespace base {
// Default traits for Singleton<Type>. Calls operator new and operator delete on
// the object. Registers automatic deletion at process exit.
// Overload if you need arguments or another memory allocation function.
template<typename Type>
struct DefaultSingletonTraits {
// Allocates the object.
static Type* New() {
// The parenthesis is very important here; it forces POD type
// initialization.
return new Type();
}
// Destroys the object.
static void Delete(Type* x) {
delete x;
}
// Set to true to automatically register deletion of the object on process
// exit. See below for the required call that makes this happen.
static const bool kRegisterAtExit = true;
#if DCHECK_IS_ON()
// Set to false to disallow access on a non-joinable thread. This is
// different from kRegisterAtExit because StaticMemorySingletonTraits allows
// access on non-joinable threads, and gracefully handles this.
static const bool kAllowedToAccessOnNonjoinableThread = false;
#endif
};
// Alternate traits for use with the Singleton<Type>. Identical to
// DefaultSingletonTraits except that the Singleton will not be cleaned up
// at exit.
template<typename Type>
struct LeakySingletonTraits : public DefaultSingletonTraits<Type> {
static const bool kRegisterAtExit = false;
#if DCHECK_IS_ON()
static const bool kAllowedToAccessOnNonjoinableThread = true;
#endif
};
// Alternate traits for use with the Singleton<Type>. Allocates memory
// for the singleton instance from a static buffer. The singleton will
// be cleaned up at exit, but can't be revived after destruction unless
// the ResurrectForTesting() method is called.
//
// This is useful for a certain category of things, notably logging and
// tracing, where the singleton instance is of a type carefully constructed to
// be safe to access post-destruction.
// In logging and tracing you'll typically get stray calls at odd times, like
// during static destruction, thread teardown and the like, and there's a
// termination race on the heap-based singleton - e.g. if one thread calls
// get(), but then another thread initiates AtExit processing, the first thread
// may call into an object residing in unallocated memory. If the instance is
// allocated from the data segment, then this is survivable.
//
// The destructor is to deallocate system resources, in this case to unregister
// a callback the system will invoke when logging levels change. Note that
// this is also used in e.g. Chrome Frame, where you have to allow for the
// possibility of loading briefly into someone else's process space, and
// so leaking is not an option, as that would sabotage the state of your host
// process once you've unloaded.
template <typename Type>
struct StaticMemorySingletonTraits {
// WARNING: User has to support a New() which returns null.
static Type* New() {
// Only constructs once and returns pointer; otherwise returns null.
if (subtle::NoBarrier_AtomicExchange(&dead_, 1))
return nullptr;
return new (buffer_) Type();
}
static void Delete(Type* p) {
if (p)
p->Type::~Type();
}
static const bool kRegisterAtExit = true;
#if DCHECK_IS_ON()
static const bool kAllowedToAccessOnNonjoinableThread = true;
#endif
static void ResurrectForTesting() { subtle::NoBarrier_Store(&dead_, 0); }
private:
alignas(Type) static char buffer_[sizeof(Type)];
// Signal the object was already deleted, so it is not revived.
static subtle::Atomic32 dead_;
};
template <typename Type>
alignas(Type) char StaticMemorySingletonTraits<Type>::buffer_[sizeof(Type)];
template <typename Type>
subtle::Atomic32 StaticMemorySingletonTraits<Type>::dead_ = 0;
// The Singleton<Type, Traits, DifferentiatingType> class manages a single
// instance of Type which will be created on first use and will be destroyed at
// normal process exit). The Trait::Delete function will not be called on
// abnormal process exit.
//
// DifferentiatingType is used as a key to differentiate two different
// singletons having the same memory allocation functions but serving a
// different purpose. This is mainly used for Locks serving different purposes.
//
// Example usage:
//
// In your header:
// namespace base {
// template <typename T>
// struct DefaultSingletonTraits;
// }
// class FooClass {
// public:
// static FooClass* GetInstance(); <-- See comment below on this.
// void Bar() { ... }
// private:
// FooClass() { ... }
// friend struct base::DefaultSingletonTraits<FooClass>;
//
// DISALLOW_COPY_AND_ASSIGN(FooClass);
// };
//
// In your source file:
// #include "base/memory/singleton.h"
// FooClass* FooClass::GetInstance() {
// return base::Singleton<FooClass>::get();
// }
//
// Or for leaky singletons:
// #include "base/memory/singleton.h"
// FooClass* FooClass::GetInstance() {
// return base::Singleton<
// FooClass, base::LeakySingletonTraits<FooClass>>::get();
// }
//
// And to call methods on FooClass:
// FooClass::GetInstance()->Bar();
//
// NOTE: The method accessing Singleton<T>::get() has to be named as GetInstance
// and it is important that FooClass::GetInstance() is not inlined in the
// header. This makes sure that when source files from multiple targets include
// this header they don't end up with different copies of the inlined code
// creating multiple copies of the singleton.
//
// Singleton<> has no non-static members and doesn't need to actually be
// instantiated.
//
// This class is itself thread-safe. The underlying Type must of course be
// thread-safe if you want to use it concurrently. Two parameters may be tuned
// depending on the user's requirements.
//
// Glossary:
// RAE = kRegisterAtExit
//
// On every platform, if Traits::RAE is true, the singleton will be destroyed at
// process exit. More precisely it uses AtExitManager which requires an
// object of this type to be instantiated. AtExitManager mimics the semantics
// of atexit() such as LIFO order but under Windows is safer to call. For more
// information see at_exit.h.
//
// If Traits::RAE is false, the singleton will not be freed at process exit,
// thus the singleton will be leaked if it is ever accessed. Traits::RAE
// shouldn't be false unless absolutely necessary. Remember that the heap where
// the object is allocated may be destroyed by the CRT anyway.
//
// Caveats:
// (a) Every call to get(), operator->() and operator*() incurs some overhead
// (16ns on my P4/2.8GHz) to check whether the object has already been
// initialized. You may wish to cache the result of get(); it will not
// change.
//
// (b) Your factory function must never throw an exception. This class is not
// exception-safe.
//
template <typename Type,
typename Traits = DefaultSingletonTraits<Type>,
typename DifferentiatingType = Type>
class Singleton {
private:
// A class T using the Singleton<T> pattern should declare a GetInstance()
// method and call Singleton::get() from within that. T may also declare a
// GetInstanceIfExists() method to invoke Singleton::GetIfExists().
friend Type;
// This class is safe to be constructed and copy-constructed since it has no
// member.
// Returns a pointer to the one true instance of the class.
static Type* get() {
#if DCHECK_IS_ON()
if (!Traits::kAllowedToAccessOnNonjoinableThread)
ThreadRestrictions::AssertSingletonAllowed();
#endif
return subtle::GetOrCreateLazyPointer(
&instance_, &CreatorFunc, nullptr,
Traits::kRegisterAtExit ? OnExit : nullptr, nullptr);
}
// Returns the same result as get() if the instance exists but doesn't
// construct it (and returns null) if it doesn't.
static Type* GetIfExists() {
#if DCHECK_IS_ON()
if (!Traits::kAllowedToAccessOnNonjoinableThread)
ThreadRestrictions::AssertSingletonAllowed();
#endif
if (!subtle::NoBarrier_Load(&instance_))
return nullptr;
// Need to invoke get() nonetheless as some Traits return null after
// destruction (even though |instance_| still holds garbage).
return get();
}
// Internal method used as an adaptor for GetOrCreateLazyPointer(). Do not use
// outside of that use case.
static Type* CreatorFunc(void* /* creator_arg*/) { return Traits::New(); }
// Adapter function for use with AtExit(). This should be called single
// threaded, so don't use atomic operations.
// Calling OnExit while singleton is in use by other threads is a mistake.
static void OnExit(void* /*unused*/) {
// AtExit should only ever be register after the singleton instance was
// created. We should only ever get here with a valid instance_ pointer.
Traits::Delete(reinterpret_cast<Type*>(subtle::NoBarrier_Load(&instance_)));
instance_ = 0;
}
static subtle::AtomicWord instance_;
};
template <typename Type, typename Traits, typename DifferentiatingType>
subtle::AtomicWord Singleton<Type, Traits, DifferentiatingType>::instance_ = 0;
} // namespace base
#endif // BASE_MEMORY_SINGLETON_H_

View file

@ -0,0 +1,80 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/memory/unsafe_shared_memory_region.h"
#include <utility>
namespace base {
UnsafeSharedMemoryRegion::CreateFunction*
UnsafeSharedMemoryRegion::create_hook_ = nullptr;
// static
UnsafeSharedMemoryRegion UnsafeSharedMemoryRegion::Create(size_t size) {
if (create_hook_)
return create_hook_(size);
subtle::PlatformSharedMemoryRegion handle =
subtle::PlatformSharedMemoryRegion::CreateUnsafe(size);
return UnsafeSharedMemoryRegion(std::move(handle));
}
// static
UnsafeSharedMemoryRegion UnsafeSharedMemoryRegion::Deserialize(
subtle::PlatformSharedMemoryRegion handle) {
return UnsafeSharedMemoryRegion(std::move(handle));
}
// static
subtle::PlatformSharedMemoryRegion
UnsafeSharedMemoryRegion::TakeHandleForSerialization(
UnsafeSharedMemoryRegion region) {
return std::move(region.handle_);
}
UnsafeSharedMemoryRegion::UnsafeSharedMemoryRegion() = default;
UnsafeSharedMemoryRegion::UnsafeSharedMemoryRegion(
UnsafeSharedMemoryRegion&& region) = default;
UnsafeSharedMemoryRegion& UnsafeSharedMemoryRegion::operator=(
UnsafeSharedMemoryRegion&& region) = default;
UnsafeSharedMemoryRegion::~UnsafeSharedMemoryRegion() = default;
UnsafeSharedMemoryRegion UnsafeSharedMemoryRegion::Duplicate() const {
return UnsafeSharedMemoryRegion(handle_.Duplicate());
}
WritableSharedMemoryMapping UnsafeSharedMemoryRegion::Map() const {
return MapAt(0, handle_.GetSize());
}
WritableSharedMemoryMapping UnsafeSharedMemoryRegion::MapAt(off_t offset,
size_t size) const {
if (!IsValid())
return {};
void* memory = nullptr;
size_t mapped_size = 0;
if (!handle_.MapAt(offset, size, &memory, &mapped_size))
return {};
return WritableSharedMemoryMapping(memory, size, mapped_size,
handle_.GetGUID());
}
bool UnsafeSharedMemoryRegion::IsValid() const {
return handle_.IsValid();
}
UnsafeSharedMemoryRegion::UnsafeSharedMemoryRegion(
subtle::PlatformSharedMemoryRegion handle)
: handle_(std::move(handle)) {
if (handle_.IsValid()) {
CHECK_EQ(handle_.GetMode(),
subtle::PlatformSharedMemoryRegion::Mode::kUnsafe);
}
}
} // namespace base

View file

@ -0,0 +1,121 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_MEMORY_UNSAFE_SHARED_MEMORY_REGION_H_
#define BASE_MEMORY_UNSAFE_SHARED_MEMORY_REGION_H_
#include "base/gtest_prod_util.h"
#include "base/macros.h"
#include "base/memory/platform_shared_memory_region.h"
#include "base/memory/shared_memory_mapping.h"
namespace base {
// Scoped move-only handle to a region of platform shared memory. The instance
// owns the platform handle it wraps. Mappings created by this region are
// writable. These mappings remain valid even after the region handle is moved
// or destroyed.
//
// NOTE: UnsafeSharedMemoryRegion cannot be converted to a read-only region. Use
// with caution as the region will be writable to any process with a handle to
// the region.
//
// Use this if and only if the following is true:
// - You do not need to share the region as read-only, and,
// - You need to have several instances of the region simultaneously, possibly
// in different processes, that can produce writable mappings.
class BASE_EXPORT UnsafeSharedMemoryRegion {
public:
using MappingType = WritableSharedMemoryMapping;
// Creates a new UnsafeSharedMemoryRegion instance of a given size that can be
// used for mapping writable shared memory into the virtual address space.
static UnsafeSharedMemoryRegion Create(size_t size);
using CreateFunction = decltype(Create);
// Returns an UnsafeSharedMemoryRegion built from a platform-specific handle
// that was taken from another UnsafeSharedMemoryRegion instance. Returns an
// invalid region iff the |handle| is invalid. CHECK-fails if the |handle|
// isn't unsafe.
// This should be used only by the code passing a handle across
// process boundaries.
static UnsafeSharedMemoryRegion Deserialize(
subtle::PlatformSharedMemoryRegion handle);
// Extracts a platform handle from the region. Ownership is transferred to the
// returned region object.
// This should be used only for sending the handle from the current
// process to another.
static subtle::PlatformSharedMemoryRegion TakeHandleForSerialization(
UnsafeSharedMemoryRegion region);
// Default constructor initializes an invalid instance.
UnsafeSharedMemoryRegion();
// Move operations are allowed.
UnsafeSharedMemoryRegion(UnsafeSharedMemoryRegion&&);
UnsafeSharedMemoryRegion& operator=(UnsafeSharedMemoryRegion&&);
// Destructor closes shared memory region if valid.
// All created mappings will remain valid.
~UnsafeSharedMemoryRegion();
// Duplicates the underlying platform handle and creates a new
// UnsafeSharedMemoryRegion instance that owns the newly created handle.
// Returns a valid UnsafeSharedMemoryRegion on success, invalid otherwise.
// The current region instance remains valid in any case.
UnsafeSharedMemoryRegion Duplicate() const;
// Maps the shared memory region into the caller's address space with write
// access. The mapped address is guaranteed to have an alignment of
// at least |subtle::PlatformSharedMemoryRegion::kMapMinimumAlignment|.
// Returns a valid WritableSharedMemoryMapping instance on success, invalid
// otherwise.
WritableSharedMemoryMapping Map() const;
// Same as above, but maps only |size| bytes of the shared memory region
// starting with the given |offset|. |offset| must be aligned to value of
// |SysInfo::VMAllocationGranularity()|. Returns an invalid mapping if
// requested bytes are out of the region limits.
WritableSharedMemoryMapping MapAt(off_t offset, size_t size) const;
// Whether the underlying platform handle is valid.
bool IsValid() const;
// Returns the maximum mapping size that can be created from this region.
size_t GetSize() const {
DCHECK(IsValid());
return handle_.GetSize();
}
// Returns 128-bit GUID of the region.
const UnguessableToken& GetGUID() const {
DCHECK(IsValid());
return handle_.GetGUID();
}
// Returns a platform shared memory handle. |this| remains the owner of the
// handle.
subtle::PlatformSharedMemoryRegion::PlatformHandle GetPlatformHandle() const {
DCHECK(IsValid());
return handle_.GetPlatformHandle();
}
private:
friend class SharedMemoryHooks;
explicit UnsafeSharedMemoryRegion(subtle::PlatformSharedMemoryRegion handle);
static void set_create_hook(CreateFunction* hook) { create_hook_ = hook; }
static CreateFunction* create_hook_;
subtle::PlatformSharedMemoryRegion handle_;
DISALLOW_COPY_AND_ASSIGN(UnsafeSharedMemoryRegion);
};
} // namespace base
#endif // BASE_MEMORY_UNSAFE_SHARED_MEMORY_REGION_H_

View file

@ -0,0 +1,99 @@
// Copyright (c) 2011 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/memory/weak_ptr.h"
namespace base {
namespace internal {
WeakReference::Flag::Flag() {
// Flags only become bound when checked for validity, or invalidated,
// so that we can check that later validity/invalidation operations on
// the same Flag take place on the same sequenced thread.
DETACH_FROM_SEQUENCE(sequence_checker_);
}
void WeakReference::Flag::Invalidate() {
// The flag being invalidated with a single ref implies that there are no
// weak pointers in existence. Allow deletion on other thread in this case.
#if DCHECK_IS_ON()
DCHECK(sequence_checker_.CalledOnValidSequence() || HasOneRef())
<< "WeakPtrs must be invalidated on the same sequenced thread.";
#endif
invalidated_.Set();
}
bool WeakReference::Flag::IsValid() const {
// WeakPtrs must be checked on the same sequenced thread.
DCHECK_CALLED_ON_VALID_SEQUENCE(sequence_checker_);
return !invalidated_.IsSet();
}
bool WeakReference::Flag::MaybeValid() const {
return !invalidated_.IsSet();
}
void WeakReference::Flag::DetachFromSequence() {
DETACH_FROM_SEQUENCE(sequence_checker_);
}
WeakReference::Flag::~Flag() = default;
WeakReference::WeakReference() = default;
WeakReference::WeakReference(const scoped_refptr<Flag>& flag) : flag_(flag) {}
WeakReference::~WeakReference() = default;
WeakReference::WeakReference(WeakReference&& other) noexcept = default;
WeakReference::WeakReference(const WeakReference& other) = default;
bool WeakReference::IsValid() const {
return flag_ && flag_->IsValid();
}
bool WeakReference::MaybeValid() const {
return flag_ && flag_->MaybeValid();
}
WeakReferenceOwner::WeakReferenceOwner()
: flag_(MakeRefCounted<WeakReference::Flag>()) {}
WeakReferenceOwner::~WeakReferenceOwner() {
flag_->Invalidate();
}
WeakReference WeakReferenceOwner::GetRef() const {
// If we hold the last reference to the Flag then detach the SequenceChecker.
if (!HasRefs())
flag_->DetachFromSequence();
return WeakReference(flag_);
}
void WeakReferenceOwner::Invalidate() {
flag_->Invalidate();
flag_ = MakeRefCounted<WeakReference::Flag>();
}
WeakPtrBase::WeakPtrBase() : ptr_(0) {}
WeakPtrBase::~WeakPtrBase() = default;
WeakPtrBase::WeakPtrBase(const WeakReference& ref, uintptr_t ptr)
: ref_(ref), ptr_(ptr) {
DCHECK(ptr_);
}
WeakPtrFactoryBase::WeakPtrFactoryBase(uintptr_t ptr) : ptr_(ptr) {
DCHECK(ptr_);
}
WeakPtrFactoryBase::~WeakPtrFactoryBase() {
ptr_ = 0;
}
} // namespace internal
} // namespace base

View file

@ -0,0 +1,395 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Weak pointers are pointers to an object that do not affect its lifetime,
// and which may be invalidated (i.e. reset to nullptr) by the object, or its
// owner, at any time, most commonly when the object is about to be deleted.
// Weak pointers are useful when an object needs to be accessed safely by one
// or more objects other than its owner, and those callers can cope with the
// object vanishing and e.g. tasks posted to it being silently dropped.
// Reference-counting such an object would complicate the ownership graph and
// make it harder to reason about the object's lifetime.
// EXAMPLE:
//
// class Controller {
// public:
// void SpawnWorker() { Worker::StartNew(weak_factory_.GetWeakPtr()); }
// void WorkComplete(const Result& result) { ... }
// private:
// // Member variables should appear before the WeakPtrFactory, to ensure
// // that any WeakPtrs to Controller are invalidated before its members
// // variable's destructors are executed, rendering them invalid.
// WeakPtrFactory<Controller> weak_factory_{this};
// };
//
// class Worker {
// public:
// static void StartNew(const WeakPtr<Controller>& controller) {
// Worker* worker = new Worker(controller);
// // Kick off asynchronous processing...
// }
// private:
// Worker(const WeakPtr<Controller>& controller)
// : controller_(controller) {}
// void DidCompleteAsynchronousProcessing(const Result& result) {
// if (controller_)
// controller_->WorkComplete(result);
// }
// WeakPtr<Controller> controller_;
// };
//
// With this implementation a caller may use SpawnWorker() to dispatch multiple
// Workers and subsequently delete the Controller, without waiting for all
// Workers to have completed.
// ------------------------- IMPORTANT: Thread-safety -------------------------
// Weak pointers may be passed safely between sequences, but must always be
// dereferenced and invalidated on the same SequencedTaskRunner otherwise
// checking the pointer would be racey.
//
// To ensure correct use, the first time a WeakPtr issued by a WeakPtrFactory
// is dereferenced, the factory and its WeakPtrs become bound to the calling
// sequence or current SequencedWorkerPool token, and cannot be dereferenced or
// invalidated on any other task runner. Bound WeakPtrs can still be handed
// off to other task runners, e.g. to use to post tasks back to object on the
// bound sequence.
//
// If all WeakPtr objects are destroyed or invalidated then the factory is
// unbound from the SequencedTaskRunner/Thread. The WeakPtrFactory may then be
// destroyed, or new WeakPtr objects may be used, from a different sequence.
//
// Thus, at least one WeakPtr object must exist and have been dereferenced on
// the correct sequence to enforce that other WeakPtr objects will enforce they
// are used on the desired sequence.
#ifndef BASE_MEMORY_WEAK_PTR_H_
#define BASE_MEMORY_WEAK_PTR_H_
#include <cstddef>
#include <type_traits>
#include "base/base_export.h"
#include "base/logging.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "base/sequence_checker.h"
#include "base/synchronization/atomic_flag.h"
namespace base {
template <typename T> class SupportsWeakPtr;
template <typename T> class WeakPtr;
namespace internal {
// These classes are part of the WeakPtr implementation.
// DO NOT USE THESE CLASSES DIRECTLY YOURSELF.
class BASE_EXPORT WeakReference {
public:
// Although Flag is bound to a specific SequencedTaskRunner, it may be
// deleted from another via base::WeakPtr::~WeakPtr().
class BASE_EXPORT Flag : public RefCountedThreadSafe<Flag> {
public:
Flag();
void Invalidate();
bool IsValid() const;
bool MaybeValid() const;
void DetachFromSequence();
private:
friend class base::RefCountedThreadSafe<Flag>;
~Flag();
SEQUENCE_CHECKER(sequence_checker_);
AtomicFlag invalidated_;
};
WeakReference();
explicit WeakReference(const scoped_refptr<Flag>& flag);
~WeakReference();
WeakReference(WeakReference&& other) noexcept;
WeakReference(const WeakReference& other);
WeakReference& operator=(WeakReference&& other) noexcept = default;
WeakReference& operator=(const WeakReference& other) = default;
bool IsValid() const;
bool MaybeValid() const;
private:
scoped_refptr<const Flag> flag_;
};
class BASE_EXPORT WeakReferenceOwner {
public:
WeakReferenceOwner();
~WeakReferenceOwner();
WeakReference GetRef() const;
bool HasRefs() const { return !flag_->HasOneRef(); }
void Invalidate();
private:
scoped_refptr<WeakReference::Flag> flag_;
};
// This class simplifies the implementation of WeakPtr's type conversion
// constructor by avoiding the need for a public accessor for ref_. A
// WeakPtr<T> cannot access the private members of WeakPtr<U>, so this
// base class gives us a way to access ref_ in a protected fashion.
class BASE_EXPORT WeakPtrBase {
public:
WeakPtrBase();
~WeakPtrBase();
WeakPtrBase(const WeakPtrBase& other) = default;
WeakPtrBase(WeakPtrBase&& other) noexcept = default;
WeakPtrBase& operator=(const WeakPtrBase& other) = default;
WeakPtrBase& operator=(WeakPtrBase&& other) noexcept = default;
void reset() {
ref_ = internal::WeakReference();
ptr_ = 0;
}
protected:
WeakPtrBase(const WeakReference& ref, uintptr_t ptr);
WeakReference ref_;
// This pointer is only valid when ref_.is_valid() is true. Otherwise, its
// value is undefined (as opposed to nullptr).
uintptr_t ptr_;
};
// This class provides a common implementation of common functions that would
// otherwise get instantiated separately for each distinct instantiation of
// SupportsWeakPtr<>.
class SupportsWeakPtrBase {
public:
// A safe static downcast of a WeakPtr<Base> to WeakPtr<Derived>. This
// conversion will only compile if there is exists a Base which inherits
// from SupportsWeakPtr<Base>. See base::AsWeakPtr() below for a helper
// function that makes calling this easier.
//
// Precondition: t != nullptr
template<typename Derived>
static WeakPtr<Derived> StaticAsWeakPtr(Derived* t) {
static_assert(
std::is_base_of<internal::SupportsWeakPtrBase, Derived>::value,
"AsWeakPtr argument must inherit from SupportsWeakPtr");
return AsWeakPtrImpl<Derived>(t);
}
private:
// This template function uses type inference to find a Base of Derived
// which is an instance of SupportsWeakPtr<Base>. We can then safely
// static_cast the Base* to a Derived*.
template <typename Derived, typename Base>
static WeakPtr<Derived> AsWeakPtrImpl(SupportsWeakPtr<Base>* t) {
WeakPtr<Base> ptr = t->AsWeakPtr();
return WeakPtr<Derived>(
ptr.ref_, static_cast<Derived*>(reinterpret_cast<Base*>(ptr.ptr_)));
}
};
} // namespace internal
template <typename T> class WeakPtrFactory;
// The WeakPtr class holds a weak reference to |T*|.
//
// This class is designed to be used like a normal pointer. You should always
// null-test an object of this class before using it or invoking a method that
// may result in the underlying object being destroyed.
//
// EXAMPLE:
//
// class Foo { ... };
// WeakPtr<Foo> foo;
// if (foo)
// foo->method();
//
template <typename T>
class WeakPtr : public internal::WeakPtrBase {
public:
WeakPtr() = default;
WeakPtr(std::nullptr_t) {}
// Allow conversion from U to T provided U "is a" T. Note that this
// is separate from the (implicit) copy and move constructors.
template <typename U>
WeakPtr(const WeakPtr<U>& other) : WeakPtrBase(other) {
// Need to cast from U* to T* to do pointer adjustment in case of multiple
// inheritance. This also enforces the "U is a T" rule.
T* t = reinterpret_cast<U*>(other.ptr_);
ptr_ = reinterpret_cast<uintptr_t>(t);
}
template <typename U>
WeakPtr(WeakPtr<U>&& other) noexcept : WeakPtrBase(std::move(other)) {
// Need to cast from U* to T* to do pointer adjustment in case of multiple
// inheritance. This also enforces the "U is a T" rule.
T* t = reinterpret_cast<U*>(other.ptr_);
ptr_ = reinterpret_cast<uintptr_t>(t);
}
T* get() const {
return ref_.IsValid() ? reinterpret_cast<T*>(ptr_) : nullptr;
}
T& operator*() const {
DCHECK(get() != nullptr);
return *get();
}
T* operator->() const {
DCHECK(get() != nullptr);
return get();
}
// Allow conditionals to test validity, e.g. if (weak_ptr) {...};
explicit operator bool() const { return get() != nullptr; }
// Returns false if the WeakPtr is confirmed to be invalid. This call is safe
// to make from any thread, e.g. to optimize away unnecessary work, but
// operator bool() must always be called, on the correct sequence, before
// actually using the pointer.
//
// Warning: as with any object, this call is only thread-safe if the WeakPtr
// instance isn't being re-assigned or reset() racily with this call.
bool MaybeValid() const { return ref_.MaybeValid(); }
// Returns whether the object |this| points to has been invalidated. This can
// be used to distinguish a WeakPtr to a destroyed object from one that has
// been explicitly set to null.
bool WasInvalidated() const { return ptr_ && !ref_.IsValid(); }
private:
friend class internal::SupportsWeakPtrBase;
template <typename U> friend class WeakPtr;
friend class SupportsWeakPtr<T>;
friend class WeakPtrFactory<T>;
WeakPtr(const internal::WeakReference& ref, T* ptr)
: WeakPtrBase(ref, reinterpret_cast<uintptr_t>(ptr)) {}
};
// Allow callers to compare WeakPtrs against nullptr to test validity.
template <class T>
bool operator!=(const WeakPtr<T>& weak_ptr, std::nullptr_t) {
return !(weak_ptr == nullptr);
}
template <class T>
bool operator!=(std::nullptr_t, const WeakPtr<T>& weak_ptr) {
return weak_ptr != nullptr;
}
template <class T>
bool operator==(const WeakPtr<T>& weak_ptr, std::nullptr_t) {
return weak_ptr.get() == nullptr;
}
template <class T>
bool operator==(std::nullptr_t, const WeakPtr<T>& weak_ptr) {
return weak_ptr == nullptr;
}
namespace internal {
class BASE_EXPORT WeakPtrFactoryBase {
protected:
WeakPtrFactoryBase(uintptr_t ptr);
~WeakPtrFactoryBase();
internal::WeakReferenceOwner weak_reference_owner_;
uintptr_t ptr_;
};
} // namespace internal
// A class may be composed of a WeakPtrFactory and thereby
// control how it exposes weak pointers to itself. This is helpful if you only
// need weak pointers within the implementation of a class. This class is also
// useful when working with primitive types. For example, you could have a
// WeakPtrFactory<bool> that is used to pass around a weak reference to a bool.
template <class T>
class WeakPtrFactory : public internal::WeakPtrFactoryBase {
public:
explicit WeakPtrFactory(T* ptr)
: WeakPtrFactoryBase(reinterpret_cast<uintptr_t>(ptr)) {}
~WeakPtrFactory() = default;
WeakPtr<T> GetWeakPtr() {
return WeakPtr<T>(weak_reference_owner_.GetRef(),
reinterpret_cast<T*>(ptr_));
}
// Call this method to invalidate all existing weak pointers.
void InvalidateWeakPtrs() {
DCHECK(ptr_);
weak_reference_owner_.Invalidate();
}
// Call this method to determine if any weak pointers exist.
bool HasWeakPtrs() const {
DCHECK(ptr_);
return weak_reference_owner_.HasRefs();
}
private:
DISALLOW_IMPLICIT_CONSTRUCTORS(WeakPtrFactory);
};
// A class may extend from SupportsWeakPtr to let others take weak pointers to
// it. This avoids the class itself implementing boilerplate to dispense weak
// pointers. However, since SupportsWeakPtr's destructor won't invalidate
// weak pointers to the class until after the derived class' members have been
// destroyed, its use can lead to subtle use-after-destroy issues.
template <class T>
class SupportsWeakPtr : public internal::SupportsWeakPtrBase {
public:
SupportsWeakPtr() = default;
WeakPtr<T> AsWeakPtr() {
return WeakPtr<T>(weak_reference_owner_.GetRef(), static_cast<T*>(this));
}
protected:
~SupportsWeakPtr() = default;
private:
internal::WeakReferenceOwner weak_reference_owner_;
DISALLOW_COPY_AND_ASSIGN(SupportsWeakPtr);
};
// Helper function that uses type deduction to safely return a WeakPtr<Derived>
// when Derived doesn't directly extend SupportsWeakPtr<Derived>, instead it
// extends a Base that extends SupportsWeakPtr<Base>.
//
// EXAMPLE:
// class Base : public base::SupportsWeakPtr<Producer> {};
// class Derived : public Base {};
//
// Derived derived;
// base::WeakPtr<Derived> ptr = base::AsWeakPtr(&derived);
//
// Note that the following doesn't work (invalid type conversion) since
// Derived::AsWeakPtr() is WeakPtr<Base> SupportsWeakPtr<Base>::AsWeakPtr(),
// and there's no way to safely cast WeakPtr<Base> to WeakPtr<Derived> at
// the caller.
//
// base::WeakPtr<Derived> ptr = derived.AsWeakPtr(); // Fails.
template <typename Derived>
WeakPtr<Derived> AsWeakPtr(Derived* t) {
return internal::SupportsWeakPtrBase::StaticAsWeakPtr<Derived>(t);
}
} // namespace base
#endif // BASE_MEMORY_WEAK_PTR_H_

View file

@ -0,0 +1,98 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/memory/writable_shared_memory_region.h"
#include <utility>
#include "build/build_config.h"
namespace base {
WritableSharedMemoryRegion::CreateFunction*
WritableSharedMemoryRegion::create_hook_ = nullptr;
// static
WritableSharedMemoryRegion WritableSharedMemoryRegion::Create(size_t size) {
if (create_hook_)
return create_hook_(size);
subtle::PlatformSharedMemoryRegion handle =
subtle::PlatformSharedMemoryRegion::CreateWritable(size);
return WritableSharedMemoryRegion(std::move(handle));
}
// static
WritableSharedMemoryRegion WritableSharedMemoryRegion::Deserialize(
subtle::PlatformSharedMemoryRegion handle) {
return WritableSharedMemoryRegion(std::move(handle));
}
// static
subtle::PlatformSharedMemoryRegion
WritableSharedMemoryRegion::TakeHandleForSerialization(
WritableSharedMemoryRegion region) {
return std::move(region.handle_);
}
// static
ReadOnlySharedMemoryRegion WritableSharedMemoryRegion::ConvertToReadOnly(
WritableSharedMemoryRegion region) {
subtle::PlatformSharedMemoryRegion handle = std::move(region.handle_);
if (!handle.ConvertToReadOnly())
return {};
return ReadOnlySharedMemoryRegion::Deserialize(std::move(handle));
}
UnsafeSharedMemoryRegion WritableSharedMemoryRegion::ConvertToUnsafe(
WritableSharedMemoryRegion region) {
subtle::PlatformSharedMemoryRegion handle = std::move(region.handle_);
if (!handle.ConvertToUnsafe())
return {};
return UnsafeSharedMemoryRegion::Deserialize(std::move(handle));
}
WritableSharedMemoryRegion::WritableSharedMemoryRegion() = default;
WritableSharedMemoryRegion::WritableSharedMemoryRegion(
WritableSharedMemoryRegion&& region) = default;
WritableSharedMemoryRegion& WritableSharedMemoryRegion::operator=(
WritableSharedMemoryRegion&& region) = default;
WritableSharedMemoryRegion::~WritableSharedMemoryRegion() = default;
WritableSharedMemoryMapping WritableSharedMemoryRegion::Map() const {
return MapAt(0, handle_.GetSize());
}
WritableSharedMemoryMapping WritableSharedMemoryRegion::MapAt(
off_t offset,
size_t size) const {
if (!IsValid())
return {};
void* memory = nullptr;
size_t mapped_size = 0;
if (!handle_.MapAt(offset, size, &memory, &mapped_size))
return {};
return WritableSharedMemoryMapping(memory, size, mapped_size,
handle_.GetGUID());
}
bool WritableSharedMemoryRegion::IsValid() const {
return handle_.IsValid();
}
WritableSharedMemoryRegion::WritableSharedMemoryRegion(
subtle::PlatformSharedMemoryRegion handle)
: handle_(std::move(handle)) {
if (handle_.IsValid()) {
CHECK_EQ(handle_.GetMode(),
subtle::PlatformSharedMemoryRegion::Mode::kWritable);
}
}
} // namespace base

View file

@ -0,0 +1,130 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_MEMORY_WRITABLE_SHARED_MEMORY_REGION_H_
#define BASE_MEMORY_WRITABLE_SHARED_MEMORY_REGION_H_
#include "base/macros.h"
#include "base/memory/platform_shared_memory_region.h"
#include "base/memory/read_only_shared_memory_region.h"
#include "base/memory/shared_memory_mapping.h"
#include "base/memory/unsafe_shared_memory_region.h"
#include "build/build_config.h"
namespace base {
// Scoped move-only handle to a region of platform shared memory. The instance
// owns the platform handle it wraps. Mappings created by this region are
// writable. These mappings remain valid even after the region handle is moved
// or destroyed.
//
// This region can be locked to read-only access by converting it to a
// ReadOnlySharedMemoryRegion. However, unlike ReadOnlySharedMemoryRegion and
// UnsafeSharedMemoryRegion, ownership of this region (while writable) is unique
// and may only be transferred, not duplicated.
//
// Unlike ReadOnlySharedMemoryRegion and UnsafeSharedMemoryRegion,
// WritableSharedMemoryRegion doesn't provide GetPlatformHandle() method to
// ensure that the region is never duplicated while writable.
class BASE_EXPORT WritableSharedMemoryRegion {
public:
using MappingType = WritableSharedMemoryMapping;
// Creates a new WritableSharedMemoryRegion instance of a given
// size that can be used for mapping writable shared memory into the virtual
// address space.
static WritableSharedMemoryRegion Create(size_t size);
using CreateFunction = decltype(Create);
// Returns a WritableSharedMemoryRegion built from a platform handle that was
// taken from another WritableSharedMemoryRegion instance. Returns an invalid
// region iff the |handle| is invalid. CHECK-fails if the |handle| isn't
// writable.
// This should be used only by the code passing handles across process
// boundaries.
static WritableSharedMemoryRegion Deserialize(
subtle::PlatformSharedMemoryRegion handle);
// Extracts a platform handle from the region. Ownership is transferred to the
// returned region object.
// This should be used only for sending the handle from the current
// process to another.
static subtle::PlatformSharedMemoryRegion TakeHandleForSerialization(
WritableSharedMemoryRegion region);
// Makes the region read-only. No new writable mappings of the region can be
// created after this call. Returns an invalid region on failure.
static ReadOnlySharedMemoryRegion ConvertToReadOnly(
WritableSharedMemoryRegion region);
// Makes the region unsafe. The region cannot be converted to read-only after
// this call. Returns an invalid region on failure.
static UnsafeSharedMemoryRegion ConvertToUnsafe(
WritableSharedMemoryRegion region);
// Default constructor initializes an invalid instance.
WritableSharedMemoryRegion();
// Move operations are allowed.
WritableSharedMemoryRegion(WritableSharedMemoryRegion&&);
WritableSharedMemoryRegion& operator=(WritableSharedMemoryRegion&&);
// Destructor closes shared memory region if valid.
// All created mappings will remain valid.
~WritableSharedMemoryRegion();
// Maps the shared memory region into the caller's address space with write
// access. The mapped address is guaranteed to have an alignment of
// at least |subtle::PlatformSharedMemoryRegion::kMapMinimumAlignment|.
// Returns a valid WritableSharedMemoryMapping instance on success, invalid
// otherwise.
WritableSharedMemoryMapping Map() const;
// Same as above, but maps only |size| bytes of the shared memory block
// starting with the given |offset|. |offset| must be aligned to value of
// |SysInfo::VMAllocationGranularity()|. Returns an invalid mapping if
// requested bytes are out of the region limits.
WritableSharedMemoryMapping MapAt(off_t offset, size_t size) const;
// Whether underlying platform handles are valid.
bool IsValid() const;
// Returns the maximum mapping size that can be created from this region.
size_t GetSize() const {
DCHECK(IsValid());
return handle_.GetSize();
}
// Returns 128-bit GUID of the region.
const UnguessableToken& GetGUID() const {
DCHECK(IsValid());
return handle_.GetGUID();
}
#if defined(OS_WIN)
// On Windows it is necessary in rare cases to take a writable handle from a
// region that will be converted to read-only. On this platform it is a safe
// operation, as the handle returned from this method will remain writable
// after the region is converted to read-only. However, it breaks chromium's
// WritableSharedMemoryRegion semantics and so should be use with care.
HANDLE UnsafeGetPlatformHandle() const { return handle_.GetPlatformHandle(); }
#endif
private:
friend class SharedMemoryHooks;
explicit WritableSharedMemoryRegion(
subtle::PlatformSharedMemoryRegion handle);
static void set_create_hook(CreateFunction* hook) { create_hook_ = hook; }
static CreateFunction* create_hook_;
subtle::PlatformSharedMemoryRegion handle_;
DISALLOW_COPY_AND_ASSIGN(WritableSharedMemoryRegion);
};
} // namespace base
#endif // BASE_MEMORY_WRITABLE_SHARED_MEMORY_REGION_H_