Repo created

This commit is contained in:
Fr4nz D13trich 2025-11-22 14:04:28 +01:00
parent 81b91f4139
commit f8c34fa5ee
22732 changed files with 4815320 additions and 2 deletions

View file

@ -0,0 +1,16 @@
eseckler@chromium.org
nuskos@chromium.org
oysteine@chromium.org
primiano@chromium.org
skyostil@chromium.org
# For memory-infra related changes
ssid@chromium.org
# Emeritus:
dsinclair@chromium.org
nduca@chromium.org
simonhatch@chromium.org
# TEAM: tracing@chromium.org
# COMPONENT: Speed>Tracing

View file

@ -0,0 +1,81 @@
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TRACE_EVENT_AUTO_OPEN_CLOSE_EVENT_H_
#define BASE_TRACE_EVENT_AUTO_OPEN_CLOSE_EVENT_H_
#include "base/macros.h"
#include "base/memory/weak_ptr.h"
#include "base/threading/thread_checker.h"
#include "base/time/time.h"
#include "base/trace_event/trace_event.h"
namespace base {
namespace trace_event {
// Class for tracing events that support "auto-opening" and "auto-closing".
// "auto-opening" = if the trace event is started (call Begin() before
// tracing is started,the trace event will be opened, with the start time
// being the time that the trace event was actually started.
// "auto-closing" = if the trace event is started but not ended by the time
// tracing ends, then the trace event will be automatically closed at the
// end of tracing.
// |category| must be known at compile-time in order to be used in trace macros.
// Hence, it's passed as a class templace argument.
template <const char* category>
class AutoOpenCloseEvent : public TraceLog::AsyncEnabledStateObserver {
public:
enum Type {
ASYNC
};
// As in the rest of the tracing macros, the const char* arguments here
// must be pointers to indefinitely lived strings (e.g. hard-coded string
// literals are okay, but not strings created by c_str())
AutoOpenCloseEvent(Type type, const char* event_name)
: event_name_(event_name) {
base::trace_event::TraceLog::GetInstance()->AddAsyncEnabledStateObserver(
weak_factory_.GetWeakPtr());
}
~AutoOpenCloseEvent() override {
DCHECK(thread_checker_.CalledOnValidThread());
base::trace_event::TraceLog::GetInstance()->RemoveAsyncEnabledStateObserver(
this);
}
void Begin() {
DCHECK(thread_checker_.CalledOnValidThread());
start_time_ = TRACE_TIME_TICKS_NOW();
TRACE_EVENT_ASYNC_BEGIN_WITH_TIMESTAMP0(
category, event_name_, static_cast<void*>(this), start_time_);
}
void End() {
DCHECK(thread_checker_.CalledOnValidThread());
TRACE_EVENT_ASYNC_END0(category, event_name_, static_cast<void*>(this));
start_time_ = base::TimeTicks();
}
// AsyncEnabledStateObserver implementation
void OnTraceLogEnabled() override {
DCHECK(thread_checker_.CalledOnValidThread());
if (!start_time_.is_null()) {
TRACE_EVENT_ASYNC_BEGIN_WITH_TIMESTAMP0(
category, event_name_, static_cast<void*>(this), start_time_);
}
}
void OnTraceLogDisabled() override {}
private:
const char* const event_name_;
base::TimeTicks start_time_;
base::ThreadChecker thread_checker_;
WeakPtrFactory<AutoOpenCloseEvent> weak_factory_{this};
DISALLOW_COPY_AND_ASSIGN(AutoOpenCloseEvent);
};
} // namespace trace_event
} // namespace base
#endif // BASE_TRACE_EVENT_AUTO_OPEN_CLOSE_EVENT_H_

View file

@ -0,0 +1,107 @@
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/trace_event/blame_context.h"
#include "base/strings/stringprintf.h"
#include "base/trace_event/trace_event.h"
#include "base/trace_event/traced_value.h"
namespace base {
namespace trace_event {
BlameContext::BlameContext(const char* category,
const char* name,
const char* type,
const char* scope,
int64_t id,
const BlameContext* parent_context)
: category_(category),
name_(name),
type_(type),
scope_(scope),
id_(id),
parent_scope_(parent_context ? parent_context->scope() : nullptr),
parent_id_(parent_context ? parent_context->id() : 0),
category_group_enabled_(nullptr) {
DCHECK(!parent_context || !std::strcmp(name_, parent_context->name()))
<< "Parent blame context must have the same name";
}
BlameContext::~BlameContext() {
DCHECK(thread_checker_.CalledOnValidThread());
DCHECK(WasInitialized());
TRACE_EVENT_API_ADD_TRACE_EVENT(TRACE_EVENT_PHASE_DELETE_OBJECT,
category_group_enabled_, type_, scope_, id_,
nullptr, TRACE_EVENT_FLAG_HAS_ID);
trace_event::TraceLog::GetInstance()->RemoveAsyncEnabledStateObserver(this);
}
void BlameContext::Enter() {
DCHECK(WasInitialized());
if (LIKELY(!*category_group_enabled_))
return;
TRACE_EVENT_API_ADD_TRACE_EVENT(TRACE_EVENT_PHASE_ENTER_CONTEXT,
category_group_enabled_, name_, scope_, id_,
nullptr, TRACE_EVENT_FLAG_HAS_ID);
}
void BlameContext::Leave() {
DCHECK(WasInitialized());
if (LIKELY(!*category_group_enabled_))
return;
TRACE_EVENT_API_ADD_TRACE_EVENT(TRACE_EVENT_PHASE_LEAVE_CONTEXT,
category_group_enabled_, name_, scope_, id_,
nullptr, TRACE_EVENT_FLAG_HAS_ID);
}
void BlameContext::TakeSnapshot() {
DCHECK(thread_checker_.CalledOnValidThread());
DCHECK(WasInitialized());
if (LIKELY(!*category_group_enabled_))
return;
std::unique_ptr<trace_event::TracedValue> snapshot(
new trace_event::TracedValue);
AsValueInto(snapshot.get());
TraceArguments args("snapshot", std::move(snapshot));
TRACE_EVENT_API_ADD_TRACE_EVENT(TRACE_EVENT_PHASE_SNAPSHOT_OBJECT,
category_group_enabled_, type_, scope_, id_,
&args, TRACE_EVENT_FLAG_HAS_ID);
}
void BlameContext::OnTraceLogEnabled() {
DCHECK(WasInitialized());
TakeSnapshot();
}
void BlameContext::OnTraceLogDisabled() {}
void BlameContext::AsValueInto(trace_event::TracedValue* state) {
DCHECK(WasInitialized());
if (!parent_id_)
return;
state->BeginDictionary("parent");
state->SetString("id_ref", StringPrintf("0x%" PRIx64, parent_id_));
state->SetString("scope", parent_scope_);
state->EndDictionary();
}
void BlameContext::Initialize() {
DCHECK(thread_checker_.CalledOnValidThread());
category_group_enabled_ =
TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED(category_);
TRACE_EVENT_API_ADD_TRACE_EVENT(TRACE_EVENT_PHASE_CREATE_OBJECT,
category_group_enabled_, type_, scope_, id_,
nullptr, TRACE_EVENT_FLAG_HAS_ID);
trace_event::TraceLog::GetInstance()->AddAsyncEnabledStateObserver(
weak_factory_.GetWeakPtr());
TakeSnapshot();
}
bool BlameContext::WasInitialized() const {
return category_group_enabled_ != nullptr;
}
} // namespace trace_event
} // namespace base

View file

@ -0,0 +1,138 @@
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TRACE_EVENT_BLAME_CONTEXT_H_
#define BASE_TRACE_EVENT_BLAME_CONTEXT_H_
#include <inttypes.h>
#include "base/base_export.h"
#include "base/macros.h"
#include "base/threading/thread_checker.h"
#include "base/trace_event/trace_log.h"
namespace base {
namespace trace_event {
class TracedValue;
}
namespace trace_event {
// A blame context represents a logical unit to which we want to attribute
// different costs (e.g., CPU, network, or memory usage). An example of a blame
// context is an <iframe> element on a web page. Different subsystems can
// "enter" and "leave" blame contexts to indicate that they are doing work which
// should be accounted against this blame context.
//
// A blame context can optionally have a parent context, forming a blame context
// tree. When work is attributed to a particular blame context, it is considered
// to count against all of that context's children too. This is useful when work
// cannot be exactly attributed into a more specific context. For example,
// Javascript garbage collection generally needs to inspect all objects on a
// page instead looking at each <iframe> individually. In this case the work
// should be attributed to a blame context which is the parent of all <iframe>
// blame contexts.
class BASE_EXPORT BlameContext
: public trace_event::TraceLog::AsyncEnabledStateObserver {
public:
// Construct a blame context belonging to the blame context tree |name|, using
// the tracing category |category|, identified by |id| from the |scope|
// namespace. |type| identifies the type of this object snapshot in the blame
// context tree. |parent_context| is the parent of this blame context or
// null. Note that all strings must have application lifetime.
//
// For example, a blame context which represents a specific <iframe> in a
// browser frame tree could be specified with:
//
// category="blink",
// name="FrameTree",
// type="IFrame",
// scope="IFrameIdentifier",
// id=1234.
//
// Each <iframe> blame context could have another <iframe> context as a
// parent, or a top-level context which represents the entire browser:
//
// category="blink",
// name="FrameTree",
// type="Browser",
// scope="BrowserIdentifier",
// id=1.
//
// Note that the |name| property is identical, signifying that both context
// types are part of the same tree.
//
BlameContext(const char* category,
const char* name,
const char* type,
const char* scope,
int64_t id,
const BlameContext* parent_context);
~BlameContext() override;
// Initialize the blame context, automatically taking a snapshot if tracing is
// enabled. Must be called before any other methods on this class.
void Initialize();
// Indicate that the current thread is now doing work which should count
// against this blame context. This function is allowed to be called in a
// thread different from where the blame context was created; However, any
// client doing that must be fully responsible for ensuring thready safety.
void Enter();
// Leave and stop doing work for a previously entered blame context. If
// another blame context belonging to the same tree was entered prior to this
// one, it becomes the active blame context for this thread again. Similar
// to Enter(), this function can be called in a thread different from where
// the blame context was created, and the same requirement on thread safety
// must be satisfied.
void Leave();
// Record a snapshot of the blame context. This is normally only needed if a
// blame context subclass defines custom properties (see AsValueInto) and one
// or more of those properties have changed.
void TakeSnapshot();
const char* category() const { return category_; }
const char* name() const { return name_; }
const char* type() const { return type_; }
const char* scope() const { return scope_; }
int64_t id() const { return id_; }
// trace_event::TraceLog::EnabledStateObserver implementation:
void OnTraceLogEnabled() override;
void OnTraceLogDisabled() override;
protected:
// Serialize the properties of this blame context into |state|. Subclasses can
// override this method to record additional properties (e.g, the URL for an
// <iframe> blame context). Note that an overridden implementation must still
// call this base method.
virtual void AsValueInto(trace_event::TracedValue* state);
private:
bool WasInitialized() const;
// The following string pointers have application lifetime.
const char* category_;
const char* name_;
const char* type_;
const char* scope_;
const int64_t id_;
const char* parent_scope_;
const int64_t parent_id_;
const unsigned char* category_group_enabled_;
ThreadChecker thread_checker_;
WeakPtrFactory<BlameContext> weak_factory_{this};
DISALLOW_COPY_AND_ASSIGN(BlameContext);
};
} // namespace trace_event
} // namespace base
#endif // BASE_TRACE_EVENT_BLAME_CONTEXT_H_

View file

@ -0,0 +1,14 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/trace_event/builtin_categories.h"
namespace base {
namespace trace_event {
constexpr const char* BuiltinCategories::kBuiltinCategories[];
constexpr const char* BuiltinCategories::kCategoriesForTesting[];
} // namespace trace_event
} // namespace base

View file

@ -0,0 +1,396 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TRACE_EVENT_BUILTIN_CATEGORIES_H_
#define BASE_TRACE_EVENT_BUILTIN_CATEGORIES_H_
#include "base/base_export.h"
#include "base/macros.h"
#include "base/stl_util.h"
#include "base/trace_event/common/trace_event_common.h"
#include "build/build_config.h"
// List of builtin category names. If you want to use a new category name in
// your code and you get a static assert, this is the right place to register
// the name. If the name is going to be used only for testing, please add it to
// |kIgnoredCategoriesForTesting| instead.
//
// Prefer to use '_' to separate word of category name, like content_capture.
//
// Parameter |X| must be a *macro* that takes a single |name| string argument,
// denoting a category name.
#define INTERNAL_TRACE_LIST_BUILTIN_CATEGORIES(X) \
/* These entries must go first to be consistent with the \
* CategoryRegistry::kCategory* consts.*/ \
X("tracing categories exhausted; must increase kMaxCategories") \
X("tracing already shutdown") \
X("__metadata") \
/* The rest of the list is in alphabetical order */ \
X("accessibility") \
X("AccountFetcherService") \
X("android_webview") \
X("audio") \
X("base") \
X("benchmark") \
X("blink") \
X("blink.bindings") \
X("blink.animations") \
X("blink.console") \
X("blink_gc") \
X("blink.net") \
X("blink_style") \
X("blink.user_timing") \
X("blink.worker") \
X("Blob") \
X("browser") \
X("browsing_data") \
X("CacheStorage") \
X("camera") \
X("cast_perf_test") \
X("cast.stream") \
X("cc") \
X("cc.debug") \
X("cdp.perf") \
X("chromeos") \
X("cma") \
X("compositor") \
X("content") \
X("content_capture") \
X("devtools") \
X("devtools.timeline") \
X("devtools.timeline.async") \
X("disk_cache") \
X("download") \
X("download_service") \
X("drm") \
X("drmcursor") \
X("dwrite") \
X("DXVA Decoding") \
X("EarlyJava") \
X("evdev") \
X("event") \
X("exo") \
X("explore_sites") \
X("FileSystem") \
X("file_system_provider") \
X("fonts") \
X("GAMEPAD") \
X("gpu") \
X("gpu.capture") \
X("headless") \
X("hwoverlays") \
X("identity") \
X("IndexedDB") \
X("input") \
X("io") \
X("ipc") \
X("Java") \
X("jni") \
X("jpeg") \
X("latency") \
X("latencyInfo") \
X("leveldb") \
X("loading") \
X("log") \
X("login") \
X("media") \
X("media_router") \
X("memory") \
X("midi") \
X("mojom") \
X("mus") \
X("native") \
X("navigation") \
X("net") \
X("netlog") \
X("offline_pages") \
X("omnibox") \
X("oobe") \
X("ozone") \
X("passwords") \
X("p2p") \
X("page-serialization") \
X("pepper") \
X("ppapi") \
X("ppapi proxy") \
X("rail") \
X("renderer") \
X("renderer_host") \
X("renderer.scheduler") \
X("RLZ") \
X("safe_browsing") \
X("screenlock_monitor") \
X("sequence_manager") \
X("service_manager") \
X("ServiceWorker") \
X("sharing") \
X("shell") \
X("shortcut_viewer") \
X("shutdown") \
X("SiteEngagement") \
X("skia") \
X("sql") \
X("startup") \
X("sync") \
X("sync_lock_contention") \
X("thread_pool") \
X("test_gpu") \
X("test_tracing") \
X("toplevel") \
X("ui") \
X("v8") \
X("v8.execute") \
X("ValueStoreFrontend::Backend") \
X("views") \
X("views.frame") \
X("viz") \
X("vk") \
X("wayland") \
X("webaudio") \
X("weblayer") \
X("WebCore") \
X("webrtc") \
X("xr") \
X(TRACE_DISABLED_BY_DEFAULT("animation-worklet")) \
X(TRACE_DISABLED_BY_DEFAULT("audio-worklet")) \
X(TRACE_DISABLED_BY_DEFAULT("blink.debug")) \
X(TRACE_DISABLED_BY_DEFAULT("blink.debug.display_lock")) \
X(TRACE_DISABLED_BY_DEFAULT("blink.debug.layout")) \
X(TRACE_DISABLED_BY_DEFAULT("blink.debug.layout.trees")) \
X(TRACE_DISABLED_BY_DEFAULT("blink.feature_usage")) \
X(TRACE_DISABLED_BY_DEFAULT("blink_gc")) \
X(TRACE_DISABLED_BY_DEFAULT("blink.image_decoding")) \
X(TRACE_DISABLED_BY_DEFAULT("blink.invalidation")) \
X(TRACE_DISABLED_BY_DEFAULT("cc")) \
X(TRACE_DISABLED_BY_DEFAULT("cc.debug")) \
X(TRACE_DISABLED_BY_DEFAULT("cc.debug.cdp-perf")) \
X(TRACE_DISABLED_BY_DEFAULT("cc.debug.display_items")) \
X(TRACE_DISABLED_BY_DEFAULT("cc.debug.picture")) \
X(TRACE_DISABLED_BY_DEFAULT("cc.debug.scheduler")) \
X(TRACE_DISABLED_BY_DEFAULT("cc.debug.scheduler.frames")) \
X(TRACE_DISABLED_BY_DEFAULT("cc.debug.scheduler.now")) \
X(TRACE_DISABLED_BY_DEFAULT("cpu_profiler")) \
X(TRACE_DISABLED_BY_DEFAULT("cpu_profiler.debug")) \
X(TRACE_DISABLED_BY_DEFAULT("devtools.screenshot")) \
X(TRACE_DISABLED_BY_DEFAULT("devtools.timeline")) \
X(TRACE_DISABLED_BY_DEFAULT("devtools.timeline.frame")) \
X(TRACE_DISABLED_BY_DEFAULT("devtools.timeline.inputs")) \
X(TRACE_DISABLED_BY_DEFAULT("devtools.timeline.invalidationTracking")) \
X(TRACE_DISABLED_BY_DEFAULT("devtools.timeline.layers")) \
X(TRACE_DISABLED_BY_DEFAULT("devtools.timeline.picture")) \
X(TRACE_DISABLED_BY_DEFAULT("file")) \
X(TRACE_DISABLED_BY_DEFAULT("fonts")) \
X(TRACE_DISABLED_BY_DEFAULT("gpu_cmd_queue")) \
X(TRACE_DISABLED_BY_DEFAULT("gpu.dawn")) \
X(TRACE_DISABLED_BY_DEFAULT("gpu.debug")) \
X(TRACE_DISABLED_BY_DEFAULT("gpu.decoder")) \
X(TRACE_DISABLED_BY_DEFAULT("gpu.device")) \
X(TRACE_DISABLED_BY_DEFAULT("gpu.service")) \
X(TRACE_DISABLED_BY_DEFAULT("histogram_samples")) \
X(TRACE_DISABLED_BY_DEFAULT("java-heap-profiler")) \
X(TRACE_DISABLED_BY_DEFAULT("layer-element")) \
X(TRACE_DISABLED_BY_DEFAULT("layout_shift.debug")) \
X(TRACE_DISABLED_BY_DEFAULT("lifecycles")) \
X(TRACE_DISABLED_BY_DEFAULT("loading")) \
X(TRACE_DISABLED_BY_DEFAULT("memory-infra")) \
X(TRACE_DISABLED_BY_DEFAULT("memory-infra.v8.code_stats")) \
X(TRACE_DISABLED_BY_DEFAULT("net")) \
X(TRACE_DISABLED_BY_DEFAULT("network")) \
X(TRACE_DISABLED_BY_DEFAULT("paint-worklet")) \
X(TRACE_DISABLED_BY_DEFAULT("power")) \
X(TRACE_DISABLED_BY_DEFAULT("renderer.scheduler")) \
X(TRACE_DISABLED_BY_DEFAULT("renderer.scheduler.debug")) \
X(TRACE_DISABLED_BY_DEFAULT("sequence_manager")) \
X(TRACE_DISABLED_BY_DEFAULT("sequence_manager.debug")) \
X(TRACE_DISABLED_BY_DEFAULT("sequence_manager.verbose_snapshots")) \
X(TRACE_DISABLED_BY_DEFAULT("skia")) \
X(TRACE_DISABLED_BY_DEFAULT("skia.gpu")) \
X(TRACE_DISABLED_BY_DEFAULT("skia.gpu.cache")) \
X(TRACE_DISABLED_BY_DEFAULT("SyncFileSystem")) \
X(TRACE_DISABLED_BY_DEFAULT("system_stats")) \
X(TRACE_DISABLED_BY_DEFAULT("thread_pool_diagnostics")) \
X(TRACE_DISABLED_BY_DEFAULT("toplevel.flow")) \
X(TRACE_DISABLED_BY_DEFAULT("toplevel.ipc")) \
X(TRACE_DISABLED_BY_DEFAULT("user_action_samples")) \
X(TRACE_DISABLED_BY_DEFAULT("v8.compile")) \
X(TRACE_DISABLED_BY_DEFAULT("v8.cpu_profiler")) \
X(TRACE_DISABLED_BY_DEFAULT("v8.cpu_profiler.hires")) \
X(TRACE_DISABLED_BY_DEFAULT("v8.gc")) \
X(TRACE_DISABLED_BY_DEFAULT("v8.gc_stats")) \
X(TRACE_DISABLED_BY_DEFAULT("v8.ic_stats")) \
X(TRACE_DISABLED_BY_DEFAULT("v8.runtime")) \
X(TRACE_DISABLED_BY_DEFAULT("v8.runtime_stats")) \
X(TRACE_DISABLED_BY_DEFAULT("v8.runtime_stats_sampling")) \
X(TRACE_DISABLED_BY_DEFAULT("v8.turbofan")) \
X(TRACE_DISABLED_BY_DEFAULT("v8.wasm")) \
X(TRACE_DISABLED_BY_DEFAULT("video_and_image_capture")) \
X(TRACE_DISABLED_BY_DEFAULT("viz.debug.overlay_planes")) \
X(TRACE_DISABLED_BY_DEFAULT("viz.hit_testing_flow")) \
X(TRACE_DISABLED_BY_DEFAULT("viz.overdraw")) \
X(TRACE_DISABLED_BY_DEFAULT("viz.quads")) \
X(TRACE_DISABLED_BY_DEFAULT("viz.surface_id_flow")) \
X(TRACE_DISABLED_BY_DEFAULT("viz.surface_lifetime")) \
X(TRACE_DISABLED_BY_DEFAULT("viz.triangles")) \
X(TRACE_DISABLED_BY_DEFAULT("webaudio.audionode")) \
X(TRACE_DISABLED_BY_DEFAULT("worker.scheduler"))
#define INTERNAL_TRACE_INIT_CATEGORY_NAME(name) name,
#define INTERNAL_TRACE_INIT_CATEGORY(name) {0, 0, name},
namespace base {
namespace trace_event {
// Constexpr version of string comparison operator. |a| and |b| must be valid
// C-style strings known at compile-time.
constexpr bool StrEqConstexpr(const char* a, const char* b) {
for (; *a != '\0' && *b != '\0'; ++a, ++b) {
if (*a != *b)
return false;
}
return *a == *b;
}
// Tests for |StrEqConstexpr()|.
static_assert(StrEqConstexpr("foo", "foo"), "strings should be equal");
static_assert(!StrEqConstexpr("foo", "Foo"), "strings should not be equal");
static_assert(!StrEqConstexpr("foo", "foo1"), "strings should not be equal");
static_assert(!StrEqConstexpr("foo2", "foo"), "strings should not be equal");
static_assert(StrEqConstexpr("", ""), "strings should be equal");
static_assert(!StrEqConstexpr("foo", ""), "strings should not be equal");
static_assert(!StrEqConstexpr("", "foo"), "strings should not be equal");
static_assert(!StrEqConstexpr("ab", "abc"), "strings should not be equal");
static_assert(!StrEqConstexpr("abc", "ab"), "strings should not be equal");
// Static-only class providing access to the compile-time registry of trace
// categories.
class BASE_EXPORT BuiltinCategories {
public:
// Returns a built-in category name at |index| in the registry.
static constexpr const char* At(size_t index) {
return kBuiltinCategories[index];
}
// Returns the amount of built-in categories in the registry.
static constexpr size_t Size() { return base::size(kBuiltinCategories); }
// Where in the builtin category list to start when populating the
// about://tracing UI.
static constexpr size_t kVisibleCategoryStart = 3;
// Returns whether the category is either:
// - Properly registered in the builtin list.
// - Constists of several categories separated by commas.
// - Used only in tests.
// All trace categories are checked against this. A static_assert is triggered
// if at least one category fails this check.
static constexpr bool IsAllowedCategory(const char* category) {
#if defined(OS_WIN) && defined(COMPONENT_BUILD)
return true;
#else
return IsBuiltinCategory(category) ||
IsCommaSeparatedCategoryGroup(category) ||
IsCategoryForTesting(category);
#endif
}
private:
// The array of built-in category names used for compile-time lookup.
static constexpr const char* kBuiltinCategories[] = {
INTERNAL_TRACE_LIST_BUILTIN_CATEGORIES(
INTERNAL_TRACE_INIT_CATEGORY_NAME)};
// The array of category names used only for testing. It's kept separately
// from the main list to avoid allocating the space for them in the binary.
static constexpr const char* kCategoriesForTesting[] = {
"\001\002\003\n\r",
"a",
"all",
"b",
"b1",
"c",
"c0",
"c1",
"c2",
"c3",
"c4",
"cat",
"cat1",
"cat2",
"cat3",
"cat4",
"cat5",
"cat6",
"category",
"drink",
"excluded_cat",
"filtered_cat",
"foo",
"inc",
"inc2",
"included",
"inc_wildcard_",
"inc_wildcard_abc",
"inc_wildchar_bla_end",
"inc_wildchar_x_end",
"kTestCategory",
"log",
"noise",
"other_included",
"test",
"test_category",
"Testing",
"TraceEventAgentTestCategory",
"unfiltered_cat",
"whitewashed",
"x",
TRACE_DISABLED_BY_DEFAULT("c9"),
TRACE_DISABLED_BY_DEFAULT("cat"),
TRACE_DISABLED_BY_DEFAULT("filtered_cat"),
TRACE_DISABLED_BY_DEFAULT("NotTesting"),
TRACE_DISABLED_BY_DEFAULT("Testing"),
TRACE_DISABLED_BY_DEFAULT("unfiltered_cat")};
// Returns whether |str| is in |array| of |array_len|.
static constexpr bool IsStringInArray(const char* str,
const char* const array[],
size_t array_len) {
for (size_t i = 0; i < array_len; ++i) {
if (StrEqConstexpr(str, array[i]))
return true;
}
return false;
}
// Returns whether |category_group| contains a ',' symbol, denoting that an
// event belongs to several categories. We don't add such strings in the
// builtin list but allow them to pass the static assert.
static constexpr bool IsCommaSeparatedCategoryGroup(
const char* category_group) {
for (; *category_group != '\0'; ++category_group) {
if (*category_group == ',')
return true;
}
return false;
}
// Returns whether |category| is used only for testing.
static constexpr bool IsCategoryForTesting(const char* category) {
return IsStringInArray(category, kCategoriesForTesting,
base::size(kCategoriesForTesting));
}
// Returns whether |category| is registered in the builtin list.
static constexpr bool IsBuiltinCategory(const char* category) {
return IsStringInArray(category, kBuiltinCategories,
base::size(kBuiltinCategories));
}
DISALLOW_IMPLICIT_CONSTRUCTORS(BuiltinCategories);
};
} // namespace trace_event
} // namespace base
#endif // BASE_TRACE_EVENT_BUILTIN_CATEGORIES_H_

View file

@ -0,0 +1,149 @@
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/trace_event/category_registry.h"
#include <string.h>
#include <type_traits>
#include "base/debug/leak_annotations.h"
#include "base/logging.h"
#include "base/third_party/dynamic_annotations/dynamic_annotations.h"
namespace base {
namespace trace_event {
namespace {
// |categories_| might end up causing creating dynamic initializers if not POD.
static_assert(std::is_pod<TraceCategory>::value, "TraceCategory must be POD");
} // namespace
// static
TraceCategory CategoryRegistry::categories_[kMaxCategories] = {
INTERNAL_TRACE_LIST_BUILTIN_CATEGORIES(INTERNAL_TRACE_INIT_CATEGORY)};
// static
base::subtle::AtomicWord CategoryRegistry::category_index_ =
BuiltinCategories::Size();
// static
TraceCategory* const CategoryRegistry::kCategoryExhausted = &categories_[0];
TraceCategory* const CategoryRegistry::kCategoryAlreadyShutdown =
&categories_[1];
TraceCategory* const CategoryRegistry::kCategoryMetadata = &categories_[2];
// static
void CategoryRegistry::Initialize() {
// Trace is enabled or disabled on one thread while other threads are
// accessing the enabled flag. We don't care whether edge-case events are
// traced or not, so we allow races on the enabled flag to keep the trace
// macros fast.
for (size_t i = 0; i < kMaxCategories; ++i) {
ANNOTATE_BENIGN_RACE(categories_[i].state_ptr(),
"trace_event category enabled");
// If this DCHECK is hit in a test it means that ResetForTesting() is not
// called and the categories state leaks between test fixtures.
DCHECK(!categories_[i].is_enabled());
}
}
// static
void CategoryRegistry::ResetForTesting() {
// reset_for_testing clears up only the enabled state and filters. The
// categories themselves cannot be cleared up because the static pointers
// injected by the macros still point to them and cannot be reset.
for (size_t i = 0; i < kMaxCategories; ++i)
categories_[i].reset_for_testing();
}
// static
TraceCategory* CategoryRegistry::GetCategoryByName(const char* category_name) {
DCHECK(!strchr(category_name, '"'))
<< "Category names may not contain double quote";
// The categories_ is append only, avoid using a lock for the fast path.
size_t category_index = base::subtle::Acquire_Load(&category_index_);
// Search for pre-existing category group.
for (size_t i = 0; i < category_index; ++i) {
if (strcmp(categories_[i].name(), category_name) == 0) {
return &categories_[i];
}
}
return nullptr;
}
bool CategoryRegistry::GetOrCreateCategoryLocked(
const char* category_name,
CategoryInitializerFn category_initializer_fn,
TraceCategory** category) {
// This is the slow path: the lock is not held in the fastpath
// (GetCategoryByName), so more than one thread could have reached here trying
// to add the same category.
*category = GetCategoryByName(category_name);
if (*category)
return false;
// Create a new category.
size_t category_index = base::subtle::Acquire_Load(&category_index_);
if (category_index >= kMaxCategories) {
NOTREACHED() << "must increase kMaxCategories";
*category = kCategoryExhausted;
return false;
}
// TODO(primiano): this strdup should be removed. The only documented reason
// for it was TraceWatchEvent, which is gone. However, something might have
// ended up relying on this. Needs some auditing before removal.
const char* category_name_copy = strdup(category_name);
ANNOTATE_LEAKING_OBJECT_PTR(category_name_copy);
*category = &categories_[category_index];
DCHECK(!(*category)->is_valid());
DCHECK(!(*category)->is_enabled());
(*category)->set_name(category_name_copy);
category_initializer_fn(*category);
// Update the max index now.
base::subtle::Release_Store(&category_index_, category_index + 1);
return true;
}
// static
const TraceCategory* CategoryRegistry::GetCategoryByStatePtr(
const uint8_t* category_state) {
const TraceCategory* category = TraceCategory::FromStatePtr(category_state);
DCHECK(IsValidCategoryPtr(category));
return category;
}
// static
bool CategoryRegistry::IsMetaCategory(const TraceCategory* category) {
DCHECK(IsValidCategoryPtr(category));
return category <= kCategoryMetadata;
}
// static
CategoryRegistry::Range CategoryRegistry::GetAllCategories() {
// The |categories_| array is append only. We have to only guarantee to
// not return an index to a category which is being initialized by
// GetOrCreateCategoryByName().
size_t category_index = base::subtle::Acquire_Load(&category_index_);
return CategoryRegistry::Range(&categories_[0], &categories_[category_index]);
}
// static
bool CategoryRegistry::IsValidCategoryPtr(const TraceCategory* category) {
// If any of these are hit, something has cached a corrupt category pointer.
uintptr_t ptr = reinterpret_cast<uintptr_t>(category);
return ptr % sizeof(void*) == 0 &&
ptr >= reinterpret_cast<uintptr_t>(&categories_[0]) &&
ptr <= reinterpret_cast<uintptr_t>(&categories_[kMaxCategories - 1]);
}
} // namespace trace_event
} // namespace base

View file

@ -0,0 +1,134 @@
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TRACE_EVENT_CATEGORY_REGISTRY_H_
#define BASE_TRACE_EVENT_CATEGORY_REGISTRY_H_
#include <stddef.h>
#include <stdint.h>
#include "base/atomicops.h"
#include "base/base_export.h"
#include "base/logging.h"
#include "base/stl_util.h"
#include "base/trace_event/builtin_categories.h"
#include "base/trace_event/common/trace_event_common.h"
#include "base/trace_event/trace_category.h"
#include "build/build_config.h"
namespace base {
namespace trace_event {
class TraceCategoryTest;
class TraceLog;
// Allows fast and thread-safe acces to the state of all tracing categories.
// All the methods in this class can be concurrently called on multiple threads,
// unless otherwise noted (e.g., GetOrCreateCategoryLocked).
// The reason why this is a fully static class with global state is to allow to
// statically define known categories as global linker-initialized structs,
// without requiring static initializers.
class BASE_EXPORT CategoryRegistry {
public:
// Allows for-each iterations over a slice of the categories array.
class Range {
public:
Range(TraceCategory* begin, TraceCategory* end) : begin_(begin), end_(end) {
DCHECK_LE(begin, end);
}
TraceCategory* begin() const { return begin_; }
TraceCategory* end() const { return end_; }
private:
TraceCategory* const begin_;
TraceCategory* const end_;
};
// Known categories.
static TraceCategory* const kCategoryExhausted;
static TraceCategory* const kCategoryMetadata;
static TraceCategory* const kCategoryAlreadyShutdown;
// Returns a category entry from the Category.state_ptr() pointer.
// TODO(primiano): trace macros should just keep a pointer to the entire
// TraceCategory, not just the enabled state pointer. That would remove the
// need for this function and make everything cleaner at no extra cost (as
// long as the |state_| is the first field of the struct, which can be
// guaranteed via static_assert, see TraceCategory ctor).
static const TraceCategory* GetCategoryByStatePtr(
const uint8_t* category_state);
// Returns a category from its name or nullptr if not found.
// The output |category| argument is an undefinitely lived pointer to the
// TraceCategory owned by the registry. TRACE_EVENTx macros will cache this
// pointer and use it for checks in their fast-paths.
static TraceCategory* GetCategoryByName(const char* category_name);
// Returns a built-in category from its name or nullptr if not found at
// compile-time. The return value is an undefinitely lived pointer to the
// TraceCategory owned by the registry.
static constexpr TraceCategory* GetBuiltinCategoryByName(
const char* category_group) {
#if defined(OS_WIN) && defined(COMPONENT_BUILD)
// The address cannot be evaluated at compile-time in Windows compoment
// builds.
return nullptr;
#else
for (size_t i = 0; i < BuiltinCategories::Size(); ++i) {
if (StrEqConstexpr(category_group, BuiltinCategories::At(i)))
return &categories_[i];
}
return nullptr;
#endif
}
// Returns whether |category| points at one of the meta categories that
// shouldn't be displayed in the tracing UI.
static bool IsMetaCategory(const TraceCategory* category);
private:
friend class TraceCategoryTest;
friend class TraceLog;
using CategoryInitializerFn = void (*)(TraceCategory*);
// The max number of trace categories that can be recorded.
static constexpr size_t kMaxCategories = 300;
// Checks that there is enough space for all builtin categories.
static_assert(BuiltinCategories::Size() <= kMaxCategories,
"kMaxCategories must be greater than kNumBuiltinCategories");
// Only for debugging/testing purposes, is a no-op on release builds.
static void Initialize();
// Resets the state of all categories, to clear up the state between tests.
static void ResetForTesting();
// Used to get/create a category in the slow-path. If the category exists
// already, this has the same effect of GetCategoryByName and returns false.
// If not, a new category is created and the CategoryInitializerFn is invoked
// before retuning true. The caller must guarantee serialization: either call
// this method from a single thread or hold a lock when calling this.
static bool GetOrCreateCategoryLocked(const char* category_name,
CategoryInitializerFn,
TraceCategory**);
// Allows to iterate over the valid categories in a for-each loop.
// This includes builtin categories such as __metadata.
static Range GetAllCategories();
// Returns whether |category| correctly points at |categories_| array entry.
static bool IsValidCategoryPtr(const TraceCategory* category);
// The static array of trace categories.
static TraceCategory categories_[kMaxCategories];
// Contains the number of created categories.
static base::subtle::AtomicWord category_index_;
};
} // namespace trace_event
} // namespace base
#endif // BASE_TRACE_EVENT_CATEGORY_REGISTRY_H_

View file

@ -0,0 +1,350 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/trace_event/cfi_backtrace_android.h"
#include <sys/mman.h>
#include <sys/types.h>
#include "base/android/apk_assets.h"
#include "base/android/library_loader/anchor_functions.h"
#if !defined(ARCH_CPU_ARMEL)
#error This file should not be built for this architecture.
#endif
/*
Basics of unwinding:
For each instruction in a function we need to know what is the offset of SP
(Stack Pointer) to reach the previous function's stack frame. To know which
function is being invoked, we need the return address of the next function. The
CFI information for an instruction is made up of 2 offsets, CFA (Call Frame
Address) offset and RA (Return Address) offset. The CFA offset is the change in
SP made by the function till the current instruction. This depends on amount of
memory allocated on stack by the function plus some registers that the function
stores that needs to be restored at the end of function. So, at each instruction
the CFA offset tells the offset from original SP before the function call. The
RA offset tells us the offset from the previous SP into the current function
where the return address is stored.
The unwind table file has 2 tables UNW_INDEX and UNW_DATA, inspired from ARM
EHABI format. The first table contains function addresses and an index into the
UNW_DATA table. The second table contains one or more rows for the function
unwind information.
UNW_INDEX contains two columns of N rows each, where N is the number of
functions.
1. First column 4 byte rows of all the function start address as offset from
start of the binary, in sorted order.
2. For each function addr, the second column contains 2 byte indices in order.
The indices are offsets (in count of 2 bytes) of the CFI data from start of
UNW_DATA.
The last entry in the table always contains CANT_UNWIND index to specify the
end address of the last function.
UNW_DATA contains data of all the functions. Each function data contains N rows.
The data found at the address pointed from UNW_INDEX will be:
2 bytes: N - number of rows that belong to current function.
N * 4 bytes: N rows of data. 16 bits : Address offset from function start.
14 bits : CFA offset / 4.
2 bits : RA offset / 4.
If the RA offset of a row is 0, then use the offset of the previous rows in the
same function.
TODO(ssid): Make sure RA offset is always present.
See extract_unwind_tables.py for details about how this data is extracted from
breakpad symbol files.
*/
extern "C" {
// The address of |__executable_start| gives the start address of the
// executable or shared library. This value is used to find the offset address
// of the instruction in binary from PC.
extern char __executable_start;
}
namespace base {
namespace trace_event {
namespace {
// The value of index when the function does not have unwind information.
constexpr uint32_t kCantUnwind = 0xFFFF;
// The mask on the CFI row data that is used to get the high 14 bits and
// multiply it by 4 to get CFA offset. Since the last 2 bits are masked out, a
// shift is not necessary.
constexpr uint16_t kCFAMask = 0xfffc;
// The mask on the CFI row data that is used to get the low 2 bits and multiply
// it by 4 to get the RA offset.
constexpr uint16_t kRAMask = 0x3;
constexpr uint16_t kRAShift = 2;
// The code in this file assumes we are running in 32-bit builds since all the
// addresses in the unwind table are specified in 32 bits.
static_assert(sizeof(uintptr_t) == 4,
"The unwind table format is only valid for 32 bit builds.");
// The CFI data in UNW_DATA table starts with number of rows (N) and then
// followed by N rows of 4 bytes long. The CFIUnwindDataRow represents a single
// row of CFI data of a function in the table. Since we cast the memory at the
// address after the address of number of rows, into an array of
// CFIUnwindDataRow, the size of the struct should be 4 bytes and the order of
// the members is fixed according to the given format. The first 2 bytes tell
// the address of function and last 2 bytes give the CFI data for the offset.
struct CFIUnwindDataRow {
// The address of the instruction in terms of offset from the start of the
// function.
uint16_t addr_offset;
// Represents the CFA and RA offsets to get information about next stack
// frame. This is the CFI data at the point before executing the instruction
// at |addr_offset| from the start of the function.
uint16_t cfi_data;
// Return the RA offset for the current unwind row.
size_t ra_offset() const { return (cfi_data & kRAMask) << kRAShift; }
// Returns the CFA offset for the current unwind row.
size_t cfa_offset() const { return cfi_data & kCFAMask; }
};
static_assert(
sizeof(CFIUnwindDataRow) == 4,
"The CFIUnwindDataRow struct must be exactly 4 bytes for searching.");
} // namespace
// static
CFIBacktraceAndroid* CFIBacktraceAndroid::GetInitializedInstance() {
static CFIBacktraceAndroid* instance = new CFIBacktraceAndroid();
return instance;
}
// static
bool CFIBacktraceAndroid::is_chrome_address(uintptr_t pc) {
return pc >= base::android::kStartOfText && pc < executable_end_addr();
}
// static
uintptr_t CFIBacktraceAndroid::executable_start_addr() {
return reinterpret_cast<uintptr_t>(&__executable_start);
}
// static
uintptr_t CFIBacktraceAndroid::executable_end_addr() {
return base::android::kEndOfText;
}
CFIBacktraceAndroid::CFIBacktraceAndroid()
: thread_local_cfi_cache_(
[](void* ptr) { delete static_cast<CFICache*>(ptr); }) {
Initialize();
}
CFIBacktraceAndroid::~CFIBacktraceAndroid() {}
void CFIBacktraceAndroid::Initialize() {
// This file name is defined by extract_unwind_tables.gni.
static constexpr char kCfiFileName[] = "assets/unwind_cfi_32";
MemoryMappedFile::Region cfi_region;
int fd = base::android::OpenApkAsset(kCfiFileName, &cfi_region);
if (fd < 0)
return;
cfi_mmap_ = std::make_unique<MemoryMappedFile>();
// The CFI region starts at |cfi_region.offset|.
if (!cfi_mmap_->Initialize(base::File(fd), cfi_region))
return;
ParseCFITables();
can_unwind_stack_frames_ = true;
}
void CFIBacktraceAndroid::ParseCFITables() {
// The first 4 bytes in the file is the number of entries in UNW_INDEX table.
size_t unw_index_size = 0;
memcpy(&unw_index_size, cfi_mmap_->data(), sizeof(unw_index_size));
// UNW_INDEX table starts after 4 bytes.
unw_index_function_col_ =
reinterpret_cast<const uintptr_t*>(cfi_mmap_->data()) + 1;
unw_index_row_count_ = unw_index_size;
unw_index_indices_col_ = reinterpret_cast<const uint16_t*>(
unw_index_function_col_ + unw_index_row_count_);
// The UNW_DATA table data is right after the end of UNW_INDEX table.
// Interpret the UNW_DATA table as an array of 2 byte numbers since the
// indexes we have from the UNW_INDEX table are in terms of 2 bytes.
unw_data_start_addr_ = unw_index_indices_col_ + unw_index_row_count_;
}
size_t CFIBacktraceAndroid::Unwind(const void** out_trace, size_t max_depth) {
// This function walks the stack using the call frame information to find the
// return addresses of all the functions that belong to current binary in call
// stack. For each function the CFI table defines the offset of the previous
// call frame and offset where the return address is stored.
if (!can_unwind_stack_frames())
return 0;
// Get the current register state. This register state can be taken at any
// point in the function and the unwind information would be for this point.
// Define local variables before trying to get the current PC and SP to make
// sure the register state obtained is consistent with each other.
uintptr_t pc = 0, sp = 0;
asm volatile("mov %0, pc" : "=r"(pc));
asm volatile("mov %0, sp" : "=r"(sp));
return Unwind(pc, sp, /*lr=*/0, out_trace, max_depth);
}
size_t CFIBacktraceAndroid::Unwind(uintptr_t pc,
uintptr_t sp,
uintptr_t lr,
const void** out_trace,
size_t max_depth) {
if (!can_unwind_stack_frames())
return 0;
// We can only unwind as long as the pc is within the chrome.so.
size_t depth = 0;
while (is_chrome_address(pc) && depth < max_depth) {
out_trace[depth++] = reinterpret_cast<void*>(pc);
// The offset of function from the start of the chrome.so binary:
uintptr_t func_addr = pc - executable_start_addr();
CFIRow cfi{};
if (!FindCFIRowForPC(func_addr, &cfi)) {
if (depth == 1 && lr != 0 && pc != lr) {
// If CFI data is not found for the frame, then we stopped in prolog of
// a function. The return address is stored in LR when in function
// prolog. So, update the PC with address in LR and do not update SP
// since SP was not updated by the prolog yet.
// TODO(ssid): Write tests / add info to detect if we are actually in
// function prolog. https://crbug.com/898276
pc = lr;
continue;
}
break;
}
// The rules for unwinding using the CFI information are:
// SP_prev = SP_cur + cfa_offset and
// PC_prev = * (SP_prev - ra_offset).
sp = sp + cfi.cfa_offset;
memcpy(&pc, reinterpret_cast<uintptr_t*>(sp - cfi.ra_offset),
sizeof(uintptr_t));
}
return depth;
}
void CFIBacktraceAndroid::AllocateCacheForCurrentThread() {
GetThreadLocalCFICache();
}
bool CFIBacktraceAndroid::FindCFIRowForPC(uintptr_t func_addr,
CFIBacktraceAndroid::CFIRow* cfi) {
if (!can_unwind_stack_frames())
return false;
auto* cache = GetThreadLocalCFICache();
*cfi = {0};
if (cache->Find(func_addr, cfi))
return true;
// Consider each column of UNW_INDEX table as arrays of uintptr_t (function
// addresses) and uint16_t (indices). Define start and end iterator on the
// first column array (addresses) and use std::lower_bound() to binary search
// on this array to find the required function address.
static const uintptr_t* const unw_index_fn_end =
unw_index_function_col_ + unw_index_row_count_;
const uintptr_t* found =
std::lower_bound(unw_index_function_col_, unw_index_fn_end, func_addr);
// If found is start, then the given function is not in the table. If the
// given pc is start of a function then we cannot unwind.
if (found == unw_index_function_col_ || *found == func_addr)
return false;
// std::lower_bound() returns the iter that corresponds to the first address
// that is greater than the given address. So, the required iter is always one
// less than the value returned by std::lower_bound().
--found;
uintptr_t func_start_addr = *found;
size_t row_num = found - unw_index_function_col_;
uint16_t index = unw_index_indices_col_[row_num];
DCHECK_LE(func_start_addr, func_addr);
// If the index is CANT_UNWIND then we do not have unwind infomation for the
// function.
if (index == kCantUnwind)
return false;
// The unwind data for the current function is at an offsset of the index
// found in UNW_INDEX table.
const uint16_t* unwind_data = unw_data_start_addr_ + index;
// The value of first 2 bytes is the CFI data row count for the function.
uint16_t row_count = 0;
memcpy(&row_count, unwind_data, sizeof(row_count));
// And the actual CFI rows start after 2 bytes from the |unwind_data|. Cast
// the data into an array of CFIUnwindDataRow since the struct is designed to
// represent each row. We should be careful to read only |row_count| number of
// elements in the array.
const CFIUnwindDataRow* function_data =
reinterpret_cast<const CFIUnwindDataRow*>(unwind_data + 1);
// Iterate through the CFI rows of the function to find the row that gives
// offset for the given instruction address.
CFIUnwindDataRow cfi_row = {0, 0};
uint16_t ra_offset = 0;
for (uint16_t i = 0; i < row_count; ++i) {
CFIUnwindDataRow row;
memcpy(&row, function_data + i, sizeof(CFIUnwindDataRow));
// The return address of the function is the instruction that is not yet
// been executed. The cfi row specifies the unwind info before executing the
// given instruction. If the given address is equal to the instruction
// offset, then use the current row. Or use the row with highest address
// less than the given address.
if (row.addr_offset + func_start_addr > func_addr)
break;
cfi_row = row;
// The ra offset of the last specified row should be used, if unspecified.
// So, keep updating the RA offset till we reach the correct CFI row.
// TODO(ssid): This should be fixed in the format and we should always
// output ra offset.
if (cfi_row.ra_offset())
ra_offset = cfi_row.ra_offset();
}
DCHECK_NE(0u, cfi_row.addr_offset);
*cfi = {cfi_row.cfa_offset(), ra_offset};
DCHECK(cfi->cfa_offset);
DCHECK(cfi->ra_offset);
// safe to update since the cache is thread local.
cache->Add(func_addr, *cfi);
return true;
}
CFIBacktraceAndroid::CFICache* CFIBacktraceAndroid::GetThreadLocalCFICache() {
auto* cache = static_cast<CFICache*>(thread_local_cfi_cache_.Get());
if (!cache) {
cache = new CFICache();
thread_local_cfi_cache_.Set(cache);
}
return cache;
}
void CFIBacktraceAndroid::CFICache::Add(uintptr_t address, CFIRow cfi) {
cache_[address % kLimit] = {address, cfi};
}
bool CFIBacktraceAndroid::CFICache::Find(uintptr_t address, CFIRow* cfi) {
if (cache_[address % kLimit].address == address) {
*cfi = cache_[address % kLimit].cfi;
return true;
}
return false;
}
} // namespace trace_event
} // namespace base

View file

@ -0,0 +1,175 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TRACE_EVENT_CFI_BACKTRACE_ANDROID_H_
#define BASE_TRACE_EVENT_CFI_BACKTRACE_ANDROID_H_
#include <stddef.h>
#include <stdint.h>
#include <memory>
#include "base/base_export.h"
#include "base/debug/debugging_buildflags.h"
#include "base/files/memory_mapped_file.h"
#include "base/gtest_prod_util.h"
#include "base/threading/thread_local_storage.h"
namespace base {
namespace trace_event {
// This class is used to unwind stack frames in the current thread. The unwind
// information (dwarf debug info) is stripped from the chrome binary and we do
// not build with exception tables (ARM EHABI) in release builds. So, we use a
// custom unwind table which is generated and added to specific android builds,
// when add_unwind_tables_in_apk build option is specified. This unwind table
// contains information for unwinding stack frames when the functions calls are
// from lib[mono]chrome.so. The file is added as an asset to the apk and the
// table is used to unwind stack frames for profiling. This class implements
// methods to read and parse the unwind table and unwind stack frames using this
// data.
class BASE_EXPORT CFIBacktraceAndroid {
public:
// Creates and initializes by memory mapping the unwind tables from apk assets
// on first call.
static CFIBacktraceAndroid* GetInitializedInstance();
// Returns true if the given program counter |pc| is mapped in chrome library.
static bool is_chrome_address(uintptr_t pc);
// Returns the start and end address of the current library.
static uintptr_t executable_start_addr();
static uintptr_t executable_end_addr();
// Returns true if stack unwinding is possible using CFI unwind tables in apk.
// There is no need to check this before each unwind call. Will always return
// the same value based on CFI tables being present in the binary.
bool can_unwind_stack_frames() const { return can_unwind_stack_frames_; }
// Returns the program counters by unwinding stack in the current thread in
// order of latest call frame first. Unwinding works only if
// can_unwind_stack_frames() returns true. This function allocates memory from
// heap for cache on the first call of the calling thread, unless
// AllocateCacheForCurrentThread() is called from the thread. For each stack
// frame, this method searches through the unwind table mapped in memory to
// find the unwind information for function and walks the stack to find all
// the return address. This only works until the last function call from the
// chrome.so. We do not have unwind information to unwind beyond any frame
// outside of chrome.so. Calls to Unwind() are thread safe and lock free, once
// Initialize() returns success.
size_t Unwind(const void** out_trace, size_t max_depth);
// Same as above function, but starts from a given program counter |pc|,
// stack pointer |sp| and link register |lr|. This can be from current thread
// or any other thread. But the caller must make sure that the thread's stack
// segment is not racy to read.
size_t Unwind(uintptr_t pc,
uintptr_t sp,
uintptr_t lr,
const void** out_trace,
size_t max_depth);
// Allocates memory for CFI cache for the current thread so that Unwind()
// calls are safe for signal handlers.
void AllocateCacheForCurrentThread();
// The CFI information that correspond to an instruction.
struct CFIRow {
bool operator==(const CFIBacktraceAndroid::CFIRow& o) const {
return cfa_offset == o.cfa_offset && ra_offset == o.ra_offset;
}
// The offset of the call frame address of previous function from the
// current stack pointer. Rule for unwinding SP: SP_prev = SP_cur +
// cfa_offset.
uint16_t cfa_offset = 0;
// The offset of location of return address from the previous call frame
// address. Rule for unwinding PC: PC_prev = * (SP_prev - ra_offset).
uint16_t ra_offset = 0;
};
// Finds the CFI row for the given |func_addr| in terms of offset from
// the start of the current binary. Concurrent calls are thread safe.
bool FindCFIRowForPC(uintptr_t func_addr, CFIRow* out);
private:
FRIEND_TEST_ALL_PREFIXES(CFIBacktraceAndroidTest, TestCFICache);
FRIEND_TEST_ALL_PREFIXES(CFIBacktraceAndroidTest, TestFindCFIRow);
FRIEND_TEST_ALL_PREFIXES(CFIBacktraceAndroidTest, TestUnwinding);
// A simple cache that stores entries in table using prime modulo hashing.
// This cache with 500 entries already gives us 95% hit rate, and fits in a
// single system page (usually 4KiB). Using a thread local cache for each
// thread gives us 30% improvements on performance of heap profiling.
class CFICache {
public:
// Add new item to the cache. It replaces an existing item with same hash.
// Constant time operation.
void Add(uintptr_t address, CFIRow cfi);
// Finds the given address and fills |cfi| with the info for the address.
// returns true if found, otherwise false. Assumes |address| is never 0.
bool Find(uintptr_t address, CFIRow* cfi);
private:
FRIEND_TEST_ALL_PREFIXES(CFIBacktraceAndroidTest, TestCFICache);
// Size is the highest prime which fits the cache in a single system page,
// usually 4KiB. A prime is chosen to make sure addresses are hashed evenly.
static const int kLimit = 509;
struct AddrAndCFI {
uintptr_t address;
CFIRow cfi;
};
AddrAndCFI cache_[kLimit] = {};
};
static_assert(sizeof(CFIBacktraceAndroid::CFICache) < 4096,
"The cache does not fit in a single page.");
CFIBacktraceAndroid();
~CFIBacktraceAndroid();
// Initializes unwind tables using the CFI asset file in the apk if present.
// Also stores the limits of mapped region of the lib[mono]chrome.so binary,
// since the unwind is only feasible for addresses within the .so file. Once
// initialized, the memory map of the unwind table is never cleared since we
// cannot guarantee that all the threads are done using the memory map when
// heap profiling is turned off. But since we keep the memory map is clean,
// the system can choose to evict the unused pages when needed. This would
// still reduce the total amount of address space available in process.
void Initialize();
// Finds the UNW_INDEX and UNW_DATA tables in from the CFI file memory map.
void ParseCFITables();
CFICache* GetThreadLocalCFICache();
// The start address of the memory mapped unwind table asset file. Unique ptr
// because it is replaced in tests.
std::unique_ptr<MemoryMappedFile> cfi_mmap_;
// The UNW_INDEX table: Start address of the function address column. The
// memory segment corresponding to this column is treated as an array of
// uintptr_t.
const uintptr_t* unw_index_function_col_ = nullptr;
// The UNW_INDEX table: Start address of the index column. The memory segment
// corresponding to this column is treated as an array of uint16_t.
const uint16_t* unw_index_indices_col_ = nullptr;
// The number of rows in UNW_INDEX table.
size_t unw_index_row_count_ = 0;
// The start address of UNW_DATA table.
const uint16_t* unw_data_start_addr_ = nullptr;
bool can_unwind_stack_frames_ = false;
ThreadLocalStorage::Slot thread_local_cfi_cache_;
};
} // namespace trace_event
} // namespace base
#endif // BASE_TRACE_EVENT_CFI_BACKTRACE_ANDROID_H_

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,230 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/trace_event/cpufreq_monitor_android.h"
#include <fcntl.h>
#include "base/atomicops.h"
#include "base/bind.h"
#include "base/files/file_util.h"
#include "base/files/scoped_file.h"
#include "base/memory/scoped_refptr.h"
#include "base/no_destructor.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/string_split.h"
#include "base/strings/stringprintf.h"
#include "base/task/post_task.h"
#include "base/task/task_traits.h"
#include "base/task/thread_pool.h"
#include "base/trace_event/trace_event.h"
namespace base {
namespace trace_event {
namespace {
const size_t kNumBytesToReadForSampling = 32;
constexpr const char kTraceCategory[] = TRACE_DISABLED_BY_DEFAULT("power");
const char kEventTitle[] = "CPU Frequency";
} // namespace
CPUFreqMonitorDelegate::CPUFreqMonitorDelegate() {}
std::string CPUFreqMonitorDelegate::GetScalingCurFreqPathString(
unsigned int cpu_id) const {
return base::StringPrintf(
"/sys/devices/system/cpu/cpu%d/cpufreq/scaling_cur_freq", cpu_id);
}
bool CPUFreqMonitorDelegate::IsTraceCategoryEnabled() const {
bool enabled;
TRACE_EVENT_CATEGORY_GROUP_ENABLED(kTraceCategory, &enabled);
return enabled;
}
unsigned int CPUFreqMonitorDelegate::GetKernelMaxCPUs() const {
std::string str;
if (!base::ReadFileToString(
base::FilePath("/sys/devices/system/cpu/kernel_max"), &str)) {
// If we fail to read the kernel_max file, we just assume that CPU0 exists.
return 0;
}
unsigned int kernel_max_cpu = 0;
base::StringToUint(str, &kernel_max_cpu);
return kernel_max_cpu;
}
std::string CPUFreqMonitorDelegate::GetRelatedCPUsPathString(
unsigned int cpu_id) const {
return base::StringPrintf(
"/sys/devices/system/cpu/cpu%d/cpufreq/related_cpus", cpu_id);
}
void CPUFreqMonitorDelegate::GetCPUIds(std::vector<unsigned int>* ids) const {
ids->clear();
unsigned int kernel_max_cpu = GetKernelMaxCPUs();
// CPUs related to one that's already marked for monitoring get set to "false"
// so we don't needlessly monitor CPUs with redundant frequency information.
char cpus_to_monitor[kernel_max_cpu + 1];
std::memset(cpus_to_monitor, 1, kernel_max_cpu + 1);
// Rule out the related CPUs for each one so we only end up with the CPUs
// that are representative of the cluster.
for (unsigned int i = 0; i <= kernel_max_cpu; i++) {
if (!cpus_to_monitor[i])
continue;
std::string filename = GetRelatedCPUsPathString(i);
std::string line;
if (!base::ReadFileToString(base::FilePath(filename), &line))
continue;
// When reading the related_cpus file, we expected the format to be
// something like "0 1 2 3" for CPU0-3 if they're all in one cluster.
for (auto& str_piece :
base::SplitString(line, " ", base::WhitespaceHandling::TRIM_WHITESPACE,
base::SplitResult::SPLIT_WANT_NONEMPTY)) {
unsigned int cpu_id;
if (base::StringToUint(str_piece, &cpu_id)) {
if (cpu_id != i && cpu_id >= 0 && cpu_id <= kernel_max_cpu)
cpus_to_monitor[cpu_id] = 0;
}
}
ids->push_back(i);
}
// If none of the files were readable, we assume CPU0 exists and fall back to
// using that.
if (ids->size() == 0)
ids->push_back(0);
}
void CPUFreqMonitorDelegate::RecordFrequency(unsigned int cpu_id,
unsigned int freq) {
TRACE_COUNTER_ID1(kTraceCategory, kEventTitle, cpu_id, freq);
}
scoped_refptr<SingleThreadTaskRunner>
CPUFreqMonitorDelegate::CreateTaskRunner() {
return base::ThreadPool::CreateSingleThreadTaskRunner(
{base::MayBlock(), base::TaskShutdownBehavior::SKIP_ON_SHUTDOWN,
base::TaskPriority::BEST_EFFORT},
base::SingleThreadTaskRunnerThreadMode::SHARED);
}
CPUFreqMonitor::CPUFreqMonitor()
: CPUFreqMonitor(std::make_unique<CPUFreqMonitorDelegate>()) {}
CPUFreqMonitor::CPUFreqMonitor(std::unique_ptr<CPUFreqMonitorDelegate> delegate)
: delegate_(std::move(delegate)) {}
CPUFreqMonitor::~CPUFreqMonitor() {
Stop();
}
// static
CPUFreqMonitor* CPUFreqMonitor::GetInstance() {
static base::NoDestructor<CPUFreqMonitor> instance;
return instance.get();
}
void CPUFreqMonitor::OnTraceLogEnabled() {
GetOrCreateTaskRunner()->PostTask(
FROM_HERE,
base::BindOnce(&CPUFreqMonitor::Start, weak_ptr_factory_.GetWeakPtr()));
}
void CPUFreqMonitor::OnTraceLogDisabled() {
Stop();
}
void CPUFreqMonitor::Start() {
// It's the responsibility of the caller to ensure that Start/Stop are
// synchronized. If Start/Stop are called asynchronously where this value
// may be incorrect, we have bigger problems.
if (base::subtle::NoBarrier_Load(&is_enabled_) == 1 ||
!delegate_->IsTraceCategoryEnabled()) {
return;
}
std::vector<unsigned int> cpu_ids;
delegate_->GetCPUIds(&cpu_ids);
std::vector<std::pair<unsigned int, base::ScopedFD>> fds;
for (unsigned int id : cpu_ids) {
std::string fstr = delegate_->GetScalingCurFreqPathString(id);
int fd = open(fstr.c_str(), O_RDONLY);
if (fd == -1)
continue;
fds.emplace_back(std::make_pair(id, base::ScopedFD(fd)));
}
// We failed to read any scaling_cur_freq files, no point sampling nothing.
if (fds.size() == 0)
return;
base::subtle::Release_Store(&is_enabled_, 1);
GetOrCreateTaskRunner()->PostTask(
FROM_HERE,
base::BindOnce(&CPUFreqMonitor::Sample, weak_ptr_factory_.GetWeakPtr(),
std::move(fds)));
}
void CPUFreqMonitor::Stop() {
base::subtle::Release_Store(&is_enabled_, 0);
}
void CPUFreqMonitor::Sample(
std::vector<std::pair<unsigned int, base::ScopedFD>> fds) {
// For the same reason as above we use NoBarrier_Load, because if this value
// is in transition and we use Acquire_Load then we'll never shut down our
// original Sample tasks until the next Stop, so it's still the responsibility
// of callers to sync Start/Stop.
if (base::subtle::NoBarrier_Load(&is_enabled_) == 0)
return;
for (auto& id_fd : fds) {
int fd = id_fd.second.get();
unsigned int freq = 0;
// If we have trouble reading data from the file for any reason we'll end up
// reporting the frequency as nothing.
lseek(fd, 0L, SEEK_SET);
char data[kNumBytesToReadForSampling];
size_t bytes_read = read(fd, data, kNumBytesToReadForSampling);
if (bytes_read > 0) {
if (bytes_read < kNumBytesToReadForSampling)
data[bytes_read] = '\0';
int ret = sscanf(data, "%d", &freq);
if (ret == 0 || ret == std::char_traits<char>::eof())
freq = 0;
}
delegate_->RecordFrequency(id_fd.first, freq);
}
GetOrCreateTaskRunner()->PostDelayedTask(
FROM_HERE,
base::BindOnce(&CPUFreqMonitor::Sample, weak_ptr_factory_.GetWeakPtr(),
std::move(fds)),
base::TimeDelta::FromMilliseconds(kDefaultCPUFreqSampleIntervalMs));
}
bool CPUFreqMonitor::IsEnabledForTesting() {
return base::subtle::Acquire_Load(&is_enabled_) == 1;
}
const scoped_refptr<SingleThreadTaskRunner>&
CPUFreqMonitor::GetOrCreateTaskRunner() {
if (!task_runner_)
task_runner_ = delegate_->CreateTaskRunner();
return task_runner_;
}
} // namespace trace_event
} // namespace base

View file

@ -0,0 +1,98 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TRACE_EVENT_CPUFREQ_MONITOR_ANDROID_H_
#define BASE_TRACE_EVENT_CPUFREQ_MONITOR_ANDROID_H_
#include "base/atomicops.h"
#include "base/base_export.h"
#include "base/files/scoped_file.h"
#include "base/memory/scoped_refptr.h"
#include "base/trace_event/trace_log.h"
namespace base {
class SingleThreadTaskRunner;
namespace trace_event {
// A delegate to isolate CPU frequency monitor functionality mainly for testing.
class BASE_EXPORT CPUFreqMonitorDelegate {
public:
CPUFreqMonitorDelegate();
virtual ~CPUFreqMonitorDelegate() = default;
// Returns a vector of the minimal set of CPU IDs that we need to monitor to
// get CPU frequency information. For CPUs that operate cores in a cluster,
// i.e. modern Qualcomm 8 cores, this is CPU0 and CPU4.
virtual void GetCPUIds(std::vector<unsigned int>* ids) const;
// Reads the kernel_max_cpu file to determine the max CPU ID, i.e. 7 on an
// 8-core CPU.
virtual unsigned int GetKernelMaxCPUs() const;
// Reads the frequency from the CPUs being monitored and records them.
virtual void RecordFrequency(unsigned int cpu_id, unsigned int freq);
// Returns whether or not the tracing category our CPU Frequency counters are
// in is enabled to determine if we should record.
virtual bool IsTraceCategoryEnabled() const;
// Gets the path to CPU frequency related files for a particular CPU ID.
virtual std::string GetScalingCurFreqPathString(unsigned int cpu_id) const;
virtual std::string GetRelatedCPUsPathString(unsigned int cpu_id) const;
// Allows us to delay creating a task runner, necessary because many tests
// don't like us creating one outside of a TaskEnvironment.
virtual scoped_refptr<SingleThreadTaskRunner> CreateTaskRunner();
private:
DISALLOW_COPY_AND_ASSIGN(CPUFreqMonitorDelegate);
};
// A class for monitoring the CPU frequency on unique cores/clusters.
class BASE_EXPORT CPUFreqMonitor : public TraceLog::EnabledStateObserver {
public:
// Overhead of reading one cluster on a Nexus 6P is ~0.1ms per CPU. 50ms seems
// frequent enough to get a general idea of CPU frequency trends.
static const size_t kDefaultCPUFreqSampleIntervalMs = 50;
CPUFreqMonitor();
~CPUFreqMonitor() override;
static CPUFreqMonitor* GetInstance();
void Start();
void Stop();
// TraceLog::EnabledStateObserver.
void OnTraceLogEnabled() override;
void OnTraceLogDisabled() override;
bool IsEnabledForTesting();
private:
friend class CPUFreqMonitorTest;
CPUFreqMonitor(std::unique_ptr<CPUFreqMonitorDelegate> delegate);
void Sample(std::vector<std::pair<unsigned int, base::ScopedFD>> fds);
// Uses the delegate's CreateTaskRunner function to lazily create a task
// runner so we don't illegally create a task runner on Chrome startup for
// various tests.
const scoped_refptr<SingleThreadTaskRunner>& GetOrCreateTaskRunner();
base::subtle::Atomic32 is_enabled_ = 0;
scoped_refptr<SingleThreadTaskRunner> task_runner_;
std::unique_ptr<CPUFreqMonitorDelegate> delegate_;
base::WeakPtrFactory<CPUFreqMonitor> weak_ptr_factory_{this};
DISALLOW_COPY_AND_ASSIGN(CPUFreqMonitor);
};
} // namespace trace_event
} // namespace base
#endif // BASE_TRACE_EVENT_CPUFREQ_MONITOR_ANDROID_H_

View file

@ -0,0 +1,95 @@
<?xml version='1.0' encoding='utf-8' standalone='yes'?>
<assembly
xmlns="urn:schemas-microsoft-com:asm.v3"
xmlns:xsd="http://www.w3.org/2001/XMLSchema"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
manifestVersion="1.0"
>
<assemblyIdentity
buildType="$(build.buildType)"
language="neutral"
name="Chrome.ETW"
processorArchitecture="$(build.arch)"
publicKeyToken="$(Build.WindowsPublicKeyToken)"
version="$(build.version)"
versionScope="nonSxS"
/>
<instrumentation
xmlns:win="http://manifests.microsoft.com/win/2004/08/windows/events"
buildFilter="not build.isWow"
>
<events xmlns="http://schemas.microsoft.com/win/2004/08/events">
<provider
guid="{D2D578D9-2936-45B6-A09f-30E32715F42D}"
messageFileName="chrome.dll"
name="Chrome"
resourceFileName="chrome.dll"
symbol="CHROME"
>
<channels>
<importChannel
chid="SYSTEM"
name="System"
/>
</channels>
<templates>
<template tid="tid_chrome_event">
<data
inType="win:AnsiString"
name="Name"
/>
<data
inType="win:AnsiString"
name="Phase"
/>
<data
inType="win:AnsiString"
name="Arg Name 1"
/>
<data
inType="win:AnsiString"
name="Arg Value 1"
/>
<data
inType="win:AnsiString"
name="Arg Name 2"
/>
<data
inType="win:AnsiString"
name="Arg Value 2"
/>
<data
inType="win:AnsiString"
name="Arg Name 3"
/>
<data
inType="win:AnsiString"
name="Arg Value 3"
/>
</template>
</templates>
<events>
<event
channel="SYSTEM"
level="win:Informational"
message="$(string.ChromeEvent.EventMessage)"
opcode="win:Info"
symbol="ChromeEvent"
template="tid_chrome_event"
value="1"
/>
</events>
</provider>
</events>
</instrumentation>
<localization>
<resources culture="en-US">
<stringTable>
<string
id="ChromeEvent.EventMessage"
value="Chrome Event: %1 (%2)"
/>
</stringTable>
</resources>
</localization>
</assembly>

View file

@ -0,0 +1,26 @@
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/trace_event/event_name_filter.h"
#include "base/trace_event/trace_event_impl.h"
namespace base {
namespace trace_event {
// static
const char EventNameFilter::kName[] = "event_whitelist_predicate";
EventNameFilter::EventNameFilter(
std::unique_ptr<EventNamesWhitelist> event_names_whitelist)
: event_names_whitelist_(std::move(event_names_whitelist)) {}
EventNameFilter::~EventNameFilter() = default;
bool EventNameFilter::FilterTraceEvent(const TraceEvent& trace_event) const {
return event_names_whitelist_->count(trace_event.name()) != 0;
}
} // namespace trace_event
} // namespace base

View file

@ -0,0 +1,46 @@
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TRACE_EVENT_EVENT_NAME_FILTER_H_
#define BASE_TRACE_EVENT_EVENT_NAME_FILTER_H_
#include <memory>
#include <string>
#include <unordered_set>
#include "base/base_export.h"
#include "base/macros.h"
#include "base/trace_event/trace_event_filter.h"
namespace base {
namespace trace_event {
class TraceEvent;
// Filters trace events by checking the full name against a whitelist.
// The current implementation is quite simple and dumb and just uses a
// hashtable which requires char* to std::string conversion. It could be smarter
// and use a bloom filter trie. However, today this is used too rarely to
// justify that cost.
class BASE_EXPORT EventNameFilter : public TraceEventFilter {
public:
using EventNamesWhitelist = std::unordered_set<std::string>;
static const char kName[];
EventNameFilter(std::unique_ptr<EventNamesWhitelist>);
~EventNameFilter() override;
// TraceEventFilter implementation.
bool FilterTraceEvent(const TraceEvent&) const override;
private:
std::unique_ptr<const EventNamesWhitelist> event_names_whitelist_;
DISALLOW_COPY_AND_ASSIGN(EventNameFilter);
};
} // namespace trace_event
} // namespace base
#endif // BASE_TRACE_EVENT_EVENT_NAME_FILTER_H_

View file

@ -0,0 +1,134 @@
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TRACE_EVENT_HEAP_PROFILER_H
#define BASE_TRACE_EVENT_HEAP_PROFILER_H
#include "base/compiler_specific.h"
#include "base/trace_event/heap_profiler_allocation_context_tracker.h"
// This header file defines the set of macros that are used to track memory
// usage in the heap profiler. This is in addition to the macros defined in
// trace_event.h and are specific to heap profiler. This file also defines
// implementation details of these macros.
// Implementation detail: heap profiler macros create temporary variables to
// keep instrumentation overhead low. These macros give each temporary variable
// a unique name based on the line number to prevent name collisions.
#define INTERNAL_HEAP_PROFILER_UID3(a, b) heap_profiler_unique_##a##b
#define INTERNAL_HEAP_PROFILER_UID2(a, b) INTERNAL_HEAP_PROFILER_UID3(a, b)
#define INTERNAL_HEAP_PROFILER_UID(name_prefix) \
INTERNAL_HEAP_PROFILER_UID2(name_prefix, __LINE__)
// Scoped tracker for task execution context in the heap profiler.
#define TRACE_HEAP_PROFILER_API_SCOPED_TASK_EXECUTION \
trace_event_internal::HeapProfilerScopedTaskExecutionTracker
// Scoped tracker that tracks the given program counter as a native stack frame
// in the heap profiler.
#define TRACE_HEAP_PROFILER_API_SCOPED_WITH_PROGRAM_COUNTER \
trace_event_internal::HeapProfilerScopedStackFrame
// Returns the current task context (c-string) tracked by heap profiler. This is
// useful along with TRACE_HEAP_PROFILER_API_SCOPED_TASK_EXECUTION if a async
// system needs to track client's allocation context across post tasks. Use this
// macro to get the current context and use
// TRACE_HEAP_PROFILER_API_SCOPED_TASK_EXECUTION in the posted task which
// allocates memory for a client.
#define TRACE_HEAP_PROFILER_API_GET_CURRENT_TASK_CONTEXT \
trace_event_internal::HeapProfilerCurrentTaskContext
// A scoped ignore event used to tell heap profiler to ignore all the
// allocations in the scope. It is useful to exclude allocations made for
// tracing from the heap profiler dumps.
#define HEAP_PROFILER_SCOPED_IGNORE \
trace_event_internal::HeapProfilerScopedIgnore INTERNAL_HEAP_PROFILER_UID( \
scoped_ignore)
namespace trace_event_internal {
// HeapProfilerScopedTaskExecutionTracker records the current task's context in
// the heap profiler.
class HeapProfilerScopedTaskExecutionTracker {
public:
inline explicit HeapProfilerScopedTaskExecutionTracker(
const char* task_context)
: context_(task_context) {
using base::trace_event::AllocationContextTracker;
if (UNLIKELY(AllocationContextTracker::capture_mode() !=
AllocationContextTracker::CaptureMode::DISABLED)) {
AllocationContextTracker::GetInstanceForCurrentThread()
->PushCurrentTaskContext(context_);
}
}
inline ~HeapProfilerScopedTaskExecutionTracker() {
using base::trace_event::AllocationContextTracker;
if (UNLIKELY(AllocationContextTracker::capture_mode() !=
AllocationContextTracker::CaptureMode::DISABLED)) {
AllocationContextTracker::GetInstanceForCurrentThread()
->PopCurrentTaskContext(context_);
}
}
private:
const char* context_;
};
class HeapProfilerScopedStackFrame {
public:
inline explicit HeapProfilerScopedStackFrame(const void* program_counter)
: program_counter_(program_counter) {
using base::trace_event::AllocationContextTracker;
if (UNLIKELY(AllocationContextTracker::capture_mode() ==
AllocationContextTracker::CaptureMode::MIXED_STACK)) {
AllocationContextTracker::GetInstanceForCurrentThread()
->PushNativeStackFrame(program_counter_);
}
}
inline ~HeapProfilerScopedStackFrame() {
using base::trace_event::AllocationContextTracker;
if (UNLIKELY(AllocationContextTracker::capture_mode() ==
AllocationContextTracker::CaptureMode::MIXED_STACK)) {
AllocationContextTracker::GetInstanceForCurrentThread()
->PopNativeStackFrame(program_counter_);
}
}
private:
const void* const program_counter_;
};
inline const char* HeapProfilerCurrentTaskContext() {
return base::trace_event::AllocationContextTracker::
GetInstanceForCurrentThread()
->TaskContext();
}
class BASE_EXPORT HeapProfilerScopedIgnore {
public:
inline HeapProfilerScopedIgnore() {
using base::trace_event::AllocationContextTracker;
if (UNLIKELY(
AllocationContextTracker::capture_mode() !=
AllocationContextTracker::CaptureMode::DISABLED)) {
AllocationContextTracker::GetInstanceForCurrentThread()
->begin_ignore_scope();
}
}
inline ~HeapProfilerScopedIgnore() {
using base::trace_event::AllocationContextTracker;
if (UNLIKELY(
AllocationContextTracker::capture_mode() !=
AllocationContextTracker::CaptureMode::DISABLED)) {
AllocationContextTracker::GetInstanceForCurrentThread()
->end_ignore_scope();
}
}
};
} // namespace trace_event_internal
#endif // BASE_TRACE_EVENT_HEAP_PROFILER_H

View file

@ -0,0 +1,88 @@
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/trace_event/heap_profiler_allocation_context.h"
#include <cstring>
#include "base/hash/hash.h"
#include "base/macros.h"
namespace base {
namespace trace_event {
bool operator < (const StackFrame& lhs, const StackFrame& rhs) {
return lhs.value < rhs.value;
}
bool operator == (const StackFrame& lhs, const StackFrame& rhs) {
return lhs.value == rhs.value;
}
bool operator != (const StackFrame& lhs, const StackFrame& rhs) {
return !(lhs.value == rhs.value);
}
Backtrace::Backtrace() = default;
bool operator==(const Backtrace& lhs, const Backtrace& rhs) {
if (lhs.frame_count != rhs.frame_count) return false;
return std::equal(lhs.frames, lhs.frames + lhs.frame_count, rhs.frames);
}
bool operator!=(const Backtrace& lhs, const Backtrace& rhs) {
return !(lhs == rhs);
}
AllocationContext::AllocationContext(): type_name(nullptr) {}
AllocationContext::AllocationContext(const Backtrace& backtrace,
const char* type_name)
: backtrace(backtrace), type_name(type_name) {}
bool operator==(const AllocationContext& lhs, const AllocationContext& rhs) {
return (lhs.backtrace == rhs.backtrace) && (lhs.type_name == rhs.type_name);
}
bool operator!=(const AllocationContext& lhs, const AllocationContext& rhs) {
return !(lhs == rhs);
}
} // namespace trace_event
} // namespace base
namespace std {
using base::trace_event::AllocationContext;
using base::trace_event::Backtrace;
using base::trace_event::StackFrame;
size_t hash<StackFrame>::operator()(const StackFrame& frame) const {
return hash<const void*>()(frame.value);
}
size_t hash<Backtrace>::operator()(const Backtrace& backtrace) const {
const void* values[Backtrace::kMaxFrameCount];
for (size_t i = 0; i != backtrace.frame_count; ++i) {
values[i] = backtrace.frames[i].value;
}
return base::PersistentHash(values, backtrace.frame_count * sizeof(*values));
}
size_t hash<AllocationContext>::operator()(const AllocationContext& ctx) const {
size_t backtrace_hash = hash<Backtrace>()(ctx.backtrace);
// Multiplicative hash from [Knuth 1998]. Works best if |size_t| is 32 bits,
// because the magic number is a prime very close to 2^32 / golden ratio, but
// will still redistribute keys bijectively on 64-bit architectures because
// the magic number is coprime to 2^64.
size_t type_hash = reinterpret_cast<size_t>(ctx.type_name) * 2654435761;
// Multiply one side to break the commutativity of +. Multiplication with a
// number coprime to |numeric_limits<size_t>::max() + 1| is bijective so
// randomness is preserved.
return (backtrace_hash * 3) + type_hash;
}
} // namespace std

View file

@ -0,0 +1,132 @@
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_CONTEXT_H_
#define BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_CONTEXT_H_
#include <stddef.h>
#include <stdint.h>
#include <functional>
#include "base/base_export.h"
namespace base {
namespace trace_event {
// When heap profiling is enabled, tracing keeps track of the allocation
// context for each allocation intercepted. It is generated by the
// |AllocationContextTracker| which keeps stacks of context in TLS.
// The tracker is initialized lazily.
// The backtrace in the allocation context is a snapshot of the stack. For now,
// this is the pseudo stack where frames are created by trace event macros. In
// the future, we might add the option to use the native call stack. In that
// case, |Backtrace| and |AllocationContextTracker::GetContextSnapshot| might
// have different implementations that can be selected by a compile time flag.
// The number of stack frames stored in the backtrace is a trade off between
// memory used for tracing and accuracy. Measurements done on a prototype
// revealed that:
//
// - In 60 percent of the cases, pseudo stack depth <= 7.
// - In 87 percent of the cases, pseudo stack depth <= 9.
// - In 95 percent of the cases, pseudo stack depth <= 11.
//
// See the design doc (https://goo.gl/4s7v7b) for more details.
// Represents (pseudo) stack frame. Used in Backtrace class below.
//
// Conceptually stack frame is identified by its value, and type is used
// mostly to properly format the value. Value is expected to be a valid
// pointer from process' address space.
struct BASE_EXPORT StackFrame {
enum class Type {
TRACE_EVENT_NAME, // const char* string
THREAD_NAME, // const char* thread name
PROGRAM_COUNTER, // as returned by stack tracing (e.g. by StackTrace)
};
static StackFrame FromTraceEventName(const char* name) {
return {Type::TRACE_EVENT_NAME, name};
}
static StackFrame FromThreadName(const char* name) {
return {Type::THREAD_NAME, name};
}
static StackFrame FromProgramCounter(const void* pc) {
return {Type::PROGRAM_COUNTER, pc};
}
Type type;
const void* value;
};
bool BASE_EXPORT operator < (const StackFrame& lhs, const StackFrame& rhs);
bool BASE_EXPORT operator == (const StackFrame& lhs, const StackFrame& rhs);
bool BASE_EXPORT operator != (const StackFrame& lhs, const StackFrame& rhs);
struct BASE_EXPORT Backtrace {
Backtrace();
// If the stack is higher than what can be stored here, the top frames
// (the ones further from main()) are stored. Depth of 12 is enough for most
// pseudo traces (see above), but not for native traces, where we need more.
enum { kMaxFrameCount = 48 };
StackFrame frames[kMaxFrameCount];
size_t frame_count = 0;
};
bool BASE_EXPORT operator==(const Backtrace& lhs, const Backtrace& rhs);
bool BASE_EXPORT operator!=(const Backtrace& lhs, const Backtrace& rhs);
// The |AllocationContext| is context metadata that is kept for every allocation
// when heap profiling is enabled. To simplify memory management for book-
// keeping, this struct has a fixed size.
struct BASE_EXPORT AllocationContext {
AllocationContext();
AllocationContext(const Backtrace& backtrace, const char* type_name);
Backtrace backtrace;
// Type name of the type stored in the allocated memory. A null pointer
// indicates "unknown type". Grouping is done by comparing pointers, not by
// deep string comparison. In a component build, where a type name can have a
// string literal in several dynamic libraries, this may distort grouping.
const char* type_name;
};
bool BASE_EXPORT operator==(const AllocationContext& lhs,
const AllocationContext& rhs);
bool BASE_EXPORT operator!=(const AllocationContext& lhs,
const AllocationContext& rhs);
// Struct to store the size and count of the allocations.
struct AllocationMetrics {
size_t size;
size_t count;
};
} // namespace trace_event
} // namespace base
namespace std {
template <>
struct BASE_EXPORT hash<base::trace_event::StackFrame> {
size_t operator()(const base::trace_event::StackFrame& frame) const;
};
template <>
struct BASE_EXPORT hash<base::trace_event::Backtrace> {
size_t operator()(const base::trace_event::Backtrace& backtrace) const;
};
template <>
struct BASE_EXPORT hash<base::trace_event::AllocationContext> {
size_t operator()(const base::trace_event::AllocationContext& context) const;
};
} // namespace std
#endif // BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_CONTEXT_H_

View file

@ -0,0 +1,270 @@
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/trace_event/heap_profiler_allocation_context_tracker.h"
#include <algorithm>
#include <iterator>
#include "base/atomicops.h"
#include "base/debug/debugging_buildflags.h"
#include "base/debug/leak_annotations.h"
#include "base/debug/stack_trace.h"
#include "base/no_destructor.h"
#include "base/stl_util.h"
#include "base/threading/platform_thread.h"
#include "base/threading/thread_local_storage.h"
#include "base/trace_event/heap_profiler_allocation_context.h"
#include "build/build_config.h"
#if defined(OS_ANDROID) && BUILDFLAG(CAN_UNWIND_WITH_CFI_TABLE)
#include "base/trace_event/cfi_backtrace_android.h"
#endif
#if defined(OS_LINUX) || defined(OS_ANDROID)
#include <sys/prctl.h>
#endif
namespace base {
namespace trace_event {
subtle::Atomic32 AllocationContextTracker::capture_mode_ =
static_cast<int32_t>(AllocationContextTracker::CaptureMode::DISABLED);
namespace {
const size_t kMaxStackDepth = 128u;
const size_t kMaxTaskDepth = 16u;
AllocationContextTracker* const kInitializingSentinel =
reinterpret_cast<AllocationContextTracker*>(-1);
// This function is added to the TLS slot to clean up the instance when the
// thread exits.
void DestructAllocationContextTracker(void* alloc_ctx_tracker) {
delete static_cast<AllocationContextTracker*>(alloc_ctx_tracker);
}
ThreadLocalStorage::Slot& AllocationContextTrackerTLS() {
static NoDestructor<ThreadLocalStorage::Slot> tls_alloc_ctx_tracker(
&DestructAllocationContextTracker);
return *tls_alloc_ctx_tracker;
}
// Cannot call ThreadIdNameManager::GetName because it holds a lock and causes
// deadlock when lock is already held by ThreadIdNameManager before the current
// allocation. Gets the thread name from kernel if available or returns a string
// with id. This function intentionally leaks the allocated strings since they
// are used to tag allocations even after the thread dies.
const char* GetAndLeakThreadName() {
char name[16];
#if defined(OS_LINUX) || defined(OS_ANDROID)
// If the thread name is not set, try to get it from prctl. Thread name might
// not be set in cases where the thread started before heap profiling was
// enabled.
int err = prctl(PR_GET_NAME, name);
if (!err) {
return strdup(name);
}
#endif // defined(OS_LINUX) || defined(OS_ANDROID)
// Use tid if we don't have a thread name.
snprintf(name, sizeof(name), "%lu",
static_cast<unsigned long>(PlatformThread::CurrentId()));
return strdup(name);
}
} // namespace
// static
AllocationContextTracker*
AllocationContextTracker::GetInstanceForCurrentThread() {
AllocationContextTracker* tracker = static_cast<AllocationContextTracker*>(
AllocationContextTrackerTLS().Get());
if (tracker == kInitializingSentinel)
return nullptr; // Re-entrancy case.
if (!tracker) {
AllocationContextTrackerTLS().Set(kInitializingSentinel);
tracker = new AllocationContextTracker();
AllocationContextTrackerTLS().Set(tracker);
}
return tracker;
}
AllocationContextTracker::AllocationContextTracker()
: thread_name_(nullptr), ignore_scope_depth_(0) {
tracked_stack_.reserve(kMaxStackDepth);
task_contexts_.reserve(kMaxTaskDepth);
task_contexts_.push_back("UntrackedTask");
}
AllocationContextTracker::~AllocationContextTracker() = default;
// static
void AllocationContextTracker::SetCurrentThreadName(const char* name) {
if (name && capture_mode() != CaptureMode::DISABLED) {
GetInstanceForCurrentThread()->thread_name_ = name;
}
}
// static
void AllocationContextTracker::SetCaptureMode(CaptureMode mode) {
// Release ordering ensures that when a thread observes |capture_mode_| to
// be true through an acquire load, the TLS slot has been initialized.
subtle::Release_Store(&capture_mode_, static_cast<int32_t>(mode));
}
void AllocationContextTracker::PushPseudoStackFrame(
AllocationContextTracker::PseudoStackFrame stack_frame) {
// Impose a limit on the height to verify that every push is popped, because
// in practice the pseudo stack never grows higher than ~20 frames.
if (tracked_stack_.size() < kMaxStackDepth) {
tracked_stack_.push_back(
StackFrame::FromTraceEventName(stack_frame.trace_event_name));
} else {
NOTREACHED();
}
}
void AllocationContextTracker::PopPseudoStackFrame(
AllocationContextTracker::PseudoStackFrame stack_frame) {
// Guard for stack underflow. If tracing was started with a TRACE_EVENT in
// scope, the frame was never pushed, so it is possible that pop is called
// on an empty stack.
if (tracked_stack_.empty())
return;
tracked_stack_.pop_back();
}
void AllocationContextTracker::PushNativeStackFrame(const void* pc) {
if (tracked_stack_.size() < kMaxStackDepth)
tracked_stack_.push_back(StackFrame::FromProgramCounter(pc));
else
NOTREACHED();
}
void AllocationContextTracker::PopNativeStackFrame(const void* pc) {
if (tracked_stack_.empty())
return;
DCHECK_EQ(pc, tracked_stack_.back().value);
tracked_stack_.pop_back();
}
void AllocationContextTracker::PushCurrentTaskContext(const char* context) {
DCHECK(context);
if (task_contexts_.size() < kMaxTaskDepth)
task_contexts_.push_back(context);
else
NOTREACHED();
}
void AllocationContextTracker::PopCurrentTaskContext(const char* context) {
// Guard for stack underflow. If tracing was started with a TRACE_EVENT in
// scope, the context was never pushed, so it is possible that pop is called
// on an empty stack. Note that the context always contains "UntrackedTask".
if (task_contexts_.size() == 1)
return;
DCHECK_EQ(context, task_contexts_.back())
<< "Encountered an unmatched context end";
task_contexts_.pop_back();
}
bool AllocationContextTracker::GetContextSnapshot(AllocationContext* ctx) {
if (ignore_scope_depth_)
return false;
CaptureMode mode = static_cast<CaptureMode>(
subtle::NoBarrier_Load(&capture_mode_));
auto* backtrace = std::begin(ctx->backtrace.frames);
auto* backtrace_end = std::end(ctx->backtrace.frames);
if (!thread_name_) {
// Ignore the string allocation made by GetAndLeakThreadName to avoid
// reentrancy.
ignore_scope_depth_++;
thread_name_ = GetAndLeakThreadName();
ANNOTATE_LEAKING_OBJECT_PTR(thread_name_);
DCHECK(thread_name_);
ignore_scope_depth_--;
}
// Add the thread name as the first entry in pseudo stack.
if (thread_name_) {
*backtrace++ = StackFrame::FromThreadName(thread_name_);
}
switch (mode) {
case CaptureMode::DISABLED:
{
break;
}
case CaptureMode::PSEUDO_STACK:
case CaptureMode::MIXED_STACK:
{
for (const StackFrame& stack_frame : tracked_stack_) {
if (backtrace == backtrace_end)
break;
*backtrace++ = stack_frame;
}
break;
}
case CaptureMode::NATIVE_STACK:
{
// Backtrace contract requires us to return bottom frames, i.e.
// from main() and up. Stack unwinding produces top frames, i.e.
// from this point and up until main(). We intentionally request
// kMaxFrameCount + 1 frames, so that we know if there are more frames
// than our backtrace capacity.
#if !defined(OS_NACL) // We don't build base/debug/stack_trace.cc for NaCl.
#if defined(OS_ANDROID) && BUILDFLAG(CAN_UNWIND_WITH_CFI_TABLE)
const void* frames[Backtrace::kMaxFrameCount + 1];
static_assert(base::size(frames) >= Backtrace::kMaxFrameCount,
"not requesting enough frames to fill Backtrace");
size_t frame_count =
CFIBacktraceAndroid::GetInitializedInstance()->Unwind(
frames, base::size(frames));
#elif BUILDFLAG(CAN_UNWIND_WITH_FRAME_POINTERS)
const void* frames[Backtrace::kMaxFrameCount + 1];
static_assert(base::size(frames) >= Backtrace::kMaxFrameCount,
"not requesting enough frames to fill Backtrace");
size_t frame_count = debug::TraceStackFramePointers(
frames, base::size(frames),
1 /* exclude this function from the trace */);
#else
// Fall-back to capturing the stack with base::debug::StackTrace,
// which is likely slower, but more reliable.
base::debug::StackTrace stack_trace(Backtrace::kMaxFrameCount + 1);
size_t frame_count = 0u;
const void* const* frames = stack_trace.Addresses(&frame_count);
#endif
// If there are too many frames, keep the ones furthest from main().
size_t backtrace_capacity = backtrace_end - backtrace;
int32_t starting_frame_index = frame_count;
if (frame_count > backtrace_capacity) {
starting_frame_index = backtrace_capacity - 1;
*backtrace++ = StackFrame::FromTraceEventName("<truncated>");
}
for (int32_t i = starting_frame_index - 1; i >= 0; --i) {
const void* frame = frames[i];
*backtrace++ = StackFrame::FromProgramCounter(frame);
}
#endif // !defined(OS_NACL)
break;
}
}
ctx->backtrace.frame_count = backtrace - std::begin(ctx->backtrace.frames);
ctx->type_name = TaskContext();
return true;
}
} // namespace trace_event
} // namespace base

View file

@ -0,0 +1,145 @@
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_CONTEXT_TRACKER_H_
#define BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_CONTEXT_TRACKER_H_
#include <vector>
#include "base/atomicops.h"
#include "base/base_export.h"
#include "base/macros.h"
#include "base/trace_event/heap_profiler_allocation_context.h"
namespace base {
namespace trace_event {
// AllocationContextTracker is a thread-local object. Its main purpose is to
// keep track of a pseudo stack of trace events. Chrome has been instrumented
// with lots of `TRACE_EVENT` macros. These trace events push their name to a
// thread-local stack when they go into scope, and pop when they go out of
// scope, if all of the following conditions have been met:
//
// * A trace is being recorded.
// * The category of the event is enabled in the trace config.
// * Heap profiling is enabled (with the `--enable-heap-profiling` flag).
//
// This means that allocations that occur before tracing is started will not
// have backtrace information in their context.
//
// AllocationContextTracker also keeps track of some thread state not related to
// trace events. See |AllocationContext|.
//
// A thread-local instance of the context tracker is initialized lazily when it
// is first accessed. This might be because a trace event pushed or popped, or
// because `GetContextSnapshot()` was called when an allocation occurred
class BASE_EXPORT AllocationContextTracker {
public:
enum class CaptureMode : int32_t {
DISABLED, // Don't capture anything
PSEUDO_STACK, // Backtrace has trace events
MIXED_STACK, // Backtrace has trace events + from
// HeapProfilerScopedStackFrame
NATIVE_STACK, // Backtrace has full native backtraces from stack unwinding
};
// Stack frame constructed from trace events in codebase.
struct BASE_EXPORT PseudoStackFrame {
const char* trace_event_category;
const char* trace_event_name;
bool operator==(const PseudoStackFrame& other) const {
return trace_event_category == other.trace_event_category &&
trace_event_name == other.trace_event_name;
}
};
// Globally sets capturing mode.
// TODO(primiano): How to guard against *_STACK -> DISABLED -> *_STACK?
static void SetCaptureMode(CaptureMode mode);
// Returns global capturing mode.
inline static CaptureMode capture_mode() {
// A little lag after heap profiling is enabled or disabled is fine, it is
// more important that the check is as cheap as possible when capturing is
// not enabled, so do not issue a memory barrier in the fast path.
if (subtle::NoBarrier_Load(&capture_mode_) ==
static_cast<int32_t>(CaptureMode::DISABLED))
return CaptureMode::DISABLED;
// In the slow path, an acquire load is required to pair with the release
// store in |SetCaptureMode|. This is to ensure that the TLS slot for
// the thread-local allocation context tracker has been initialized if
// |capture_mode| returns something other than DISABLED.
return static_cast<CaptureMode>(subtle::Acquire_Load(&capture_mode_));
}
// Returns the thread-local instance, creating one if necessary. Returns
// always a valid instance, unless it is called re-entrantly, in which case
// returns nullptr in the nested calls.
static AllocationContextTracker* GetInstanceForCurrentThread();
// Set the thread name in the AllocationContextTracker of the current thread
// if capture is enabled.
static void SetCurrentThreadName(const char* name);
// Starts and ends a new ignore scope between which the allocations are
// ignored by the heap profiler. GetContextSnapshot() returns false when
// allocations are ignored.
void begin_ignore_scope() { ignore_scope_depth_++; }
void end_ignore_scope() {
if (ignore_scope_depth_)
ignore_scope_depth_--;
}
// Pushes and pops a frame onto the thread-local pseudo stack.
// TODO(ssid): Change PseudoStackFrame to const char*. Only event name is
// used.
void PushPseudoStackFrame(PseudoStackFrame stack_frame);
void PopPseudoStackFrame(PseudoStackFrame stack_frame);
// Pushes and pops a native stack frame onto thread local tracked stack.
void PushNativeStackFrame(const void* pc);
void PopNativeStackFrame(const void* pc);
// Push and pop current task's context. A stack is used to support nested
// tasks and the top of the stack will be used in allocation context.
void PushCurrentTaskContext(const char* context);
void PopCurrentTaskContext(const char* context);
// Returns most recent task context added by ScopedTaskExecutionTracker.
const char* TaskContext() const {
return task_contexts_.empty() ? nullptr : task_contexts_.back();
}
// Fills a snapshot of the current thread-local context. Doesn't fill and
// returns false if allocations are being ignored.
bool GetContextSnapshot(AllocationContext* snapshot);
~AllocationContextTracker();
private:
AllocationContextTracker();
static subtle::Atomic32 capture_mode_;
// The pseudo stack where frames are |TRACE_EVENT| names or inserted PCs.
std::vector<StackFrame> tracked_stack_;
// The thread name is used as the first entry in the pseudo stack.
const char* thread_name_;
// Stack of tasks' contexts. Context serves as a different dimension than
// pseudo stack to cluster allocations.
std::vector<const char*> task_contexts_;
uint32_t ignore_scope_depth_;
DISALLOW_COPY_AND_ASSIGN(AllocationContextTracker);
};
} // namespace trace_event
} // namespace base
#endif // BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_CONTEXT_TRACKER_H_

View file

@ -0,0 +1,70 @@
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/trace_event/heap_profiler_event_filter.h"
#include "base/trace_event/category_registry.h"
#include "base/trace_event/heap_profiler_allocation_context_tracker.h"
#include "base/trace_event/trace_category.h"
#include "base/trace_event/trace_event.h"
#include "base/trace_event/trace_event_impl.h"
namespace base {
namespace trace_event {
namespace {
inline bool IsPseudoStackEnabled() {
// Only PSEUDO_STACK and MIXED_STACK modes require trace events.
return AllocationContextTracker::capture_mode() ==
AllocationContextTracker::CaptureMode::PSEUDO_STACK ||
AllocationContextTracker::capture_mode() ==
AllocationContextTracker::CaptureMode::MIXED_STACK;
}
inline AllocationContextTracker* GetThreadLocalTracker() {
return AllocationContextTracker::GetInstanceForCurrentThread();
}
} // namespace
// static
const char HeapProfilerEventFilter::kName[] = "heap_profiler_predicate";
HeapProfilerEventFilter::HeapProfilerEventFilter() = default;
HeapProfilerEventFilter::~HeapProfilerEventFilter() = default;
bool HeapProfilerEventFilter::FilterTraceEvent(
const TraceEvent& trace_event) const {
if (!IsPseudoStackEnabled())
return true;
// TODO(primiano): Add support for events with copied name crbug.com/581079.
if (trace_event.flags() & TRACE_EVENT_FLAG_COPY)
return true;
const auto* category = CategoryRegistry::GetCategoryByStatePtr(
trace_event.category_group_enabled());
AllocationContextTracker::PseudoStackFrame frame = {category->name(),
trace_event.name()};
if (trace_event.phase() == TRACE_EVENT_PHASE_BEGIN ||
trace_event.phase() == TRACE_EVENT_PHASE_COMPLETE) {
GetThreadLocalTracker()->PushPseudoStackFrame(frame);
} else if (trace_event.phase() == TRACE_EVENT_PHASE_END) {
// The pop for |TRACE_EVENT_PHASE_COMPLETE| events is in |EndEvent|.
GetThreadLocalTracker()->PopPseudoStackFrame(frame);
}
// Do not filter-out any events and always return true. TraceLog adds the
// event only if it is enabled for recording.
return true;
}
void HeapProfilerEventFilter::EndEvent(const char* category_name,
const char* event_name) const {
if (IsPseudoStackEnabled())
GetThreadLocalTracker()->PopPseudoStackFrame({category_name, event_name});
}
} // namespace trace_event
} // namespace base

View file

@ -0,0 +1,40 @@
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TRACE_EVENT_HEAP_PROFILER_EVENT_FILTER_H_
#define BASE_TRACE_EVENT_HEAP_PROFILER_EVENT_FILTER_H_
#include "base/base_export.h"
#include "base/macros.h"
#include "base/trace_event/trace_event_filter.h"
namespace base {
namespace trace_event {
class TraceEvent;
// This filter unconditionally accepts all events and pushes/pops them from the
// thread-local AllocationContextTracker instance as they are seen.
// This is used to cheaply construct the heap profiler pseudo stack without
// having to actually record all events.
class BASE_EXPORT HeapProfilerEventFilter : public TraceEventFilter {
public:
static const char kName[];
HeapProfilerEventFilter();
~HeapProfilerEventFilter() override;
// TraceEventFilter implementation.
bool FilterTraceEvent(const TraceEvent& trace_event) const override;
void EndEvent(const char* category_name,
const char* event_name) const override;
private:
DISALLOW_COPY_AND_ASSIGN(HeapProfilerEventFilter);
};
} // namespace trace_event
} // namespace base
#endif // BASE_TRACE_EVENT_HEAP_PROFILER_EVENT_FILTER_H_

View file

@ -0,0 +1,47 @@
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/trace_event/java_heap_dump_provider_android.h"
#include "base/android/java_runtime.h"
#include "base/trace_event/process_memory_dump.h"
namespace base {
namespace trace_event {
// static
JavaHeapDumpProvider* JavaHeapDumpProvider::GetInstance() {
return Singleton<JavaHeapDumpProvider,
LeakySingletonTraits<JavaHeapDumpProvider>>::get();
}
JavaHeapDumpProvider::JavaHeapDumpProvider() {
}
JavaHeapDumpProvider::~JavaHeapDumpProvider() {
}
// Called at trace dump point time. Creates a snapshot with the memory counters
// for the current process.
bool JavaHeapDumpProvider::OnMemoryDump(const MemoryDumpArgs& args,
ProcessMemoryDump* pmd) {
// These numbers come from java.lang.Runtime stats.
long total_heap_size = 0;
long free_heap_size = 0;
android::JavaRuntime::GetMemoryUsage(&total_heap_size, &free_heap_size);
MemoryAllocatorDump* outer_dump = pmd->CreateAllocatorDump("java_heap");
outer_dump->AddScalar(MemoryAllocatorDump::kNameSize,
MemoryAllocatorDump::kUnitsBytes, total_heap_size);
MemoryAllocatorDump* inner_dump =
pmd->CreateAllocatorDump("java_heap/allocated_objects");
inner_dump->AddScalar(MemoryAllocatorDump::kNameSize,
MemoryAllocatorDump::kUnitsBytes,
total_heap_size - free_heap_size);
return true;
}
} // namespace trace_event
} // namespace base

View file

@ -0,0 +1,36 @@
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TRACE_EVENT_JAVA_HEAP_DUMP_PROVIDER_ANDROID_H_
#define BASE_TRACE_EVENT_JAVA_HEAP_DUMP_PROVIDER_ANDROID_H_
#include "base/macros.h"
#include "base/memory/singleton.h"
#include "base/trace_event/memory_dump_provider.h"
namespace base {
namespace trace_event {
// Dump provider which collects process-wide memory stats.
class BASE_EXPORT JavaHeapDumpProvider : public MemoryDumpProvider {
public:
static JavaHeapDumpProvider* GetInstance();
// MemoryDumpProvider implementation.
bool OnMemoryDump(const MemoryDumpArgs& args,
ProcessMemoryDump* pmd) override;
private:
friend struct DefaultSingletonTraits<JavaHeapDumpProvider>;
JavaHeapDumpProvider();
~JavaHeapDumpProvider() override;
DISALLOW_COPY_AND_ASSIGN(JavaHeapDumpProvider);
};
} // namespace trace_event
} // namespace base
#endif // BASE_TRACE_EVENT_JAVA_HEAP_DUMP_PROVIDER_ANDROID_H_

View file

@ -0,0 +1,47 @@
// Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/trace_event/log_message.h"
#include <stdint.h>
#include <string>
#include "base/json/string_escape.h"
#include "base/strings/stringprintf.h"
namespace base {
namespace trace_event {
LogMessage::LogMessage(const char* file, base::StringPiece message, int line)
: file_(file), message_(message), line_number_(line) {}
LogMessage::~LogMessage() = default;
void LogMessage::AppendAsTraceFormat(std::string* out) const {
out->append("{");
out->append(base::StringPrintf("\"line\":\"%d\",", line_number_));
out->append("\"message\":");
base::EscapeJSONString(message_, true, out);
out->append(",");
out->append(base::StringPrintf("\"file\":\"%s\"", file_));
out->append("}");
}
void LogMessage::EstimateTraceMemoryOverhead(
TraceEventMemoryOverhead* overhead) {
overhead->Add(TraceEventMemoryOverhead::kOther, sizeof(this));
overhead->AddString(message_);
}
bool LogMessage::AppendToProto(ProtoAppender* appender) {
// LogMessage is handled in a special way in
// track_event_thread_local_event_sink.cc in the function |AddTraceEvent|, so
// this call should never happen.
NOTREACHED();
return false;
}
} // namespace trace_event
} // namespace base

View file

@ -0,0 +1,49 @@
// Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TRACE_EVENT_LOG_MESSAGE_H_
#define BASE_TRACE_EVENT_LOG_MESSAGE_H_
#include <stddef.h>
#include <memory>
#include <string>
#include <vector>
#include "base/macros.h"
#include "base/strings/string_piece.h"
#include "base/trace_event/trace_event_impl.h"
namespace base {
class Value;
namespace trace_event {
class BASE_EXPORT LogMessage : public ConvertableToTraceFormat {
public:
LogMessage(const char* file, base::StringPiece message, int line);
~LogMessage() override;
// ConvertableToTraceFormat class implementation.
void AppendAsTraceFormat(std::string* out) const override;
bool AppendToProto(ProtoAppender* appender) override;
void EstimateTraceMemoryOverhead(TraceEventMemoryOverhead* overhead) override;
const char* file() const { return file_; }
const std::string& message() const { return message_; }
int line_number() const { return line_number_; }
private:
const char* file_;
std::string message_;
int line_number_;
DISALLOW_COPY_AND_ASSIGN(LogMessage);
};
} // namespace trace_event
} // namespace base
#endif // BASE_TRACE_EVENT_LOG_MESSAGE_H_

View file

@ -0,0 +1,187 @@
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/trace_event/malloc_dump_provider.h"
#include <stddef.h>
#include <unordered_map>
#include "base/allocator/allocator_extension.h"
#include "base/allocator/buildflags.h"
#include "base/debug/profiler.h"
#include "base/trace_event/process_memory_dump.h"
#include "base/trace_event/traced_value.h"
#include "build/build_config.h"
#if defined(OS_MACOSX)
#include <malloc/malloc.h>
#else
#include <malloc.h>
#endif
#if defined(OS_WIN)
#include <windows.h>
#endif
namespace base {
namespace trace_event {
namespace {
#if defined(OS_WIN)
// A structure containing some information about a given heap.
struct WinHeapInfo {
size_t committed_size;
size_t uncommitted_size;
size_t allocated_size;
size_t block_count;
};
// NOTE: crbug.com/665516
// Unfortunately, there is no safe way to collect information from secondary
// heaps due to limitations and racy nature of this piece of WinAPI.
void WinHeapMemoryDumpImpl(WinHeapInfo* crt_heap_info) {
// Iterate through whichever heap our CRT is using.
HANDLE crt_heap = reinterpret_cast<HANDLE>(_get_heap_handle());
::HeapLock(crt_heap);
PROCESS_HEAP_ENTRY heap_entry;
heap_entry.lpData = nullptr;
// Walk over all the entries in the main heap.
while (::HeapWalk(crt_heap, &heap_entry) != FALSE) {
if ((heap_entry.wFlags & PROCESS_HEAP_ENTRY_BUSY) != 0) {
crt_heap_info->allocated_size += heap_entry.cbData;
crt_heap_info->block_count++;
} else if ((heap_entry.wFlags & PROCESS_HEAP_REGION) != 0) {
crt_heap_info->committed_size += heap_entry.Region.dwCommittedSize;
crt_heap_info->uncommitted_size += heap_entry.Region.dwUnCommittedSize;
}
}
CHECK(::HeapUnlock(crt_heap) == TRUE);
}
#endif // defined(OS_WIN)
} // namespace
// static
const char MallocDumpProvider::kAllocatedObjects[] = "malloc/allocated_objects";
// static
MallocDumpProvider* MallocDumpProvider::GetInstance() {
return Singleton<MallocDumpProvider,
LeakySingletonTraits<MallocDumpProvider>>::get();
}
MallocDumpProvider::MallocDumpProvider() = default;
MallocDumpProvider::~MallocDumpProvider() = default;
// Called at trace dump point time. Creates a snapshot the memory counters for
// the current process.
bool MallocDumpProvider::OnMemoryDump(const MemoryDumpArgs& args,
ProcessMemoryDump* pmd) {
{
base::AutoLock auto_lock(emit_metrics_on_memory_dump_lock_);
if (!emit_metrics_on_memory_dump_)
return true;
}
size_t total_virtual_size = 0;
size_t resident_size = 0;
size_t allocated_objects_size = 0;
size_t allocated_objects_count = 0;
#if BUILDFLAG(USE_TCMALLOC)
bool res =
allocator::GetNumericProperty("generic.heap_size", &total_virtual_size);
DCHECK(res);
res = allocator::GetNumericProperty("generic.total_physical_bytes",
&resident_size);
DCHECK(res);
res = allocator::GetNumericProperty("generic.current_allocated_bytes",
&allocated_objects_size);
DCHECK(res);
#elif defined(OS_MACOSX) || defined(OS_IOS)
malloc_statistics_t stats = {0};
malloc_zone_statistics(nullptr, &stats);
total_virtual_size = stats.size_allocated;
allocated_objects_size = stats.size_in_use;
// Resident size is approximated pretty well by stats.max_size_in_use.
// However, on macOS, freed blocks are both resident and reusable, which is
// semantically equivalent to deallocated. The implementation of libmalloc
// will also only hold a fixed number of freed regions before actually
// starting to deallocate them, so stats.max_size_in_use is also not
// representative of the peak size. As a result, stats.max_size_in_use is
// typically somewhere between actually resident [non-reusable] pages, and
// peak size. This is not very useful, so we just use stats.size_in_use for
// resident_size, even though it's an underestimate and fails to account for
// fragmentation. See
// https://bugs.chromium.org/p/chromium/issues/detail?id=695263#c1.
resident_size = stats.size_in_use;
#elif defined(OS_WIN)
// This is too expensive on Windows, crbug.com/780735.
if (args.level_of_detail == MemoryDumpLevelOfDetail::DETAILED) {
WinHeapInfo main_heap_info = {};
WinHeapMemoryDumpImpl(&main_heap_info);
total_virtual_size =
main_heap_info.committed_size + main_heap_info.uncommitted_size;
// Resident size is approximated with committed heap size. Note that it is
// possible to do this with better accuracy on windows by intersecting the
// working set with the virtual memory ranges occuipied by the heap. It's
// not clear that this is worth it, as it's fairly expensive to do.
resident_size = main_heap_info.committed_size;
allocated_objects_size = main_heap_info.allocated_size;
allocated_objects_count = main_heap_info.block_count;
}
#elif defined(OS_FUCHSIA)
// TODO(fuchsia): Port, see https://crbug.com/706592.
#else
struct mallinfo info = mallinfo();
// In case of Android's jemalloc |arena| is 0 and the outer pages size is
// reported by |hblkhd|. In case of dlmalloc the total is given by
// |arena| + |hblkhd|. For more details see link: http://goo.gl/fMR8lF.
total_virtual_size = info.arena + info.hblkhd;
resident_size = info.uordblks;
// Total allocated space is given by |uordblks|.
allocated_objects_size = info.uordblks;
#endif
MemoryAllocatorDump* outer_dump = pmd->CreateAllocatorDump("malloc");
outer_dump->AddScalar("virtual_size", MemoryAllocatorDump::kUnitsBytes,
total_virtual_size);
outer_dump->AddScalar(MemoryAllocatorDump::kNameSize,
MemoryAllocatorDump::kUnitsBytes, resident_size);
MemoryAllocatorDump* inner_dump = pmd->CreateAllocatorDump(kAllocatedObjects);
inner_dump->AddScalar(MemoryAllocatorDump::kNameSize,
MemoryAllocatorDump::kUnitsBytes,
allocated_objects_size);
if (allocated_objects_count != 0) {
inner_dump->AddScalar(MemoryAllocatorDump::kNameObjectCount,
MemoryAllocatorDump::kUnitsObjects,
allocated_objects_count);
}
if (resident_size > allocated_objects_size) {
// Explicitly specify why is extra memory resident. In tcmalloc it accounts
// for free lists and caches. In mac and ios it accounts for the
// fragmentation and metadata.
MemoryAllocatorDump* other_dump =
pmd->CreateAllocatorDump("malloc/metadata_fragmentation_caches");
other_dump->AddScalar(MemoryAllocatorDump::kNameSize,
MemoryAllocatorDump::kUnitsBytes,
resident_size - allocated_objects_size);
}
return true;
}
void MallocDumpProvider::EnableMetrics() {
base::AutoLock auto_lock(emit_metrics_on_memory_dump_lock_);
emit_metrics_on_memory_dump_ = true;
}
void MallocDumpProvider::DisableMetrics() {
base::AutoLock auto_lock(emit_metrics_on_memory_dump_lock_);
emit_metrics_on_memory_dump_ = false;
}
} // namespace trace_event
} // namespace base

View file

@ -0,0 +1,56 @@
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TRACE_EVENT_MALLOC_DUMP_PROVIDER_H_
#define BASE_TRACE_EVENT_MALLOC_DUMP_PROVIDER_H_
#include "base/macros.h"
#include "base/memory/singleton.h"
#include "base/synchronization/lock.h"
#include "base/trace_event/memory_dump_provider.h"
#include "build/build_config.h"
#if defined(OS_LINUX) || defined(OS_ANDROID) || defined(OS_WIN) || \
(defined(OS_MACOSX) && !defined(OS_IOS))
#define MALLOC_MEMORY_TRACING_SUPPORTED
#endif
namespace base {
namespace trace_event {
// Dump provider which collects process-wide memory stats.
class BASE_EXPORT MallocDumpProvider : public MemoryDumpProvider {
public:
// Name of the allocated_objects dump. Use this to declare suballocator dumps
// from other dump providers.
static const char kAllocatedObjects[];
static MallocDumpProvider* GetInstance();
// MemoryDumpProvider implementation.
bool OnMemoryDump(const MemoryDumpArgs& args,
ProcessMemoryDump* pmd) override;
// Used by out-of-process heap-profiling. When malloc is profiled by an
// external process, that process will be responsible for emitting metrics on
// behalf of this one. Thus, MallocDumpProvider should not do anything.
void EnableMetrics();
void DisableMetrics();
private:
friend struct DefaultSingletonTraits<MallocDumpProvider>;
MallocDumpProvider();
~MallocDumpProvider() override;
bool emit_metrics_on_memory_dump_ = true;
base::Lock emit_metrics_on_memory_dump_lock_;
DISALLOW_COPY_AND_ASSIGN(MallocDumpProvider);
};
} // namespace trace_event
} // namespace base
#endif // BASE_TRACE_EVENT_MALLOC_DUMP_PROVIDER_H_

View file

@ -0,0 +1,148 @@
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/trace_event/memory_allocator_dump.h"
#include <string.h>
#include "base/format_macros.h"
#include "base/memory/ptr_util.h"
#include "base/strings/stringprintf.h"
#include "base/trace_event/memory_dump_manager.h"
#include "base/trace_event/memory_dump_provider.h"
#include "base/trace_event/process_memory_dump.h"
#include "base/trace_event/traced_value.h"
#include "base/values.h"
namespace base {
namespace trace_event {
const char MemoryAllocatorDump::kNameSize[] = "size";
const char MemoryAllocatorDump::kNameObjectCount[] = "object_count";
const char MemoryAllocatorDump::kTypeScalar[] = "scalar";
const char MemoryAllocatorDump::kTypeString[] = "string";
const char MemoryAllocatorDump::kUnitsBytes[] = "bytes";
const char MemoryAllocatorDump::kUnitsObjects[] = "objects";
MemoryAllocatorDump::MemoryAllocatorDump(
const std::string& absolute_name,
MemoryDumpLevelOfDetail level_of_detail,
const MemoryAllocatorDumpGuid& guid)
: absolute_name_(absolute_name),
guid_(guid),
level_of_detail_(level_of_detail),
flags_(Flags::DEFAULT) {
// The |absolute_name| cannot be empty.
DCHECK(!absolute_name.empty());
// The |absolute_name| can contain slash separator, but not leading or
// trailing ones.
DCHECK(absolute_name[0] != '/' && *absolute_name.rbegin() != '/');
}
MemoryAllocatorDump::~MemoryAllocatorDump() = default;
void MemoryAllocatorDump::AddScalar(const char* name,
const char* units,
uint64_t value) {
entries_.emplace_back(name, units, value);
}
void MemoryAllocatorDump::AddString(const char* name,
const char* units,
const std::string& value) {
// String attributes are disabled in background mode.
if (level_of_detail_ == MemoryDumpLevelOfDetail::BACKGROUND) {
NOTREACHED();
return;
}
entries_.emplace_back(name, units, value);
}
void MemoryAllocatorDump::AsValueInto(TracedValue* value) const {
std::string string_conversion_buffer;
value->BeginDictionaryWithCopiedName(absolute_name_);
value->SetString("guid", guid_.ToString());
value->BeginDictionary("attrs");
for (const Entry& entry : entries_) {
value->BeginDictionaryWithCopiedName(entry.name);
switch (entry.entry_type) {
case Entry::kUint64:
SStringPrintf(&string_conversion_buffer, "%" PRIx64,
entry.value_uint64);
value->SetString("type", kTypeScalar);
value->SetString("units", entry.units);
value->SetString("value", string_conversion_buffer);
break;
case Entry::kString:
value->SetString("type", kTypeString);
value->SetString("units", entry.units);
value->SetString("value", entry.value_string);
break;
}
value->EndDictionary();
}
value->EndDictionary(); // "attrs": { ... }
if (flags_)
value->SetInteger("flags", flags_);
value->EndDictionary(); // "allocator_name/heap_subheap": { ... }
}
uint64_t MemoryAllocatorDump::GetSizeInternal() const {
if (cached_size_.has_value())
return *cached_size_;
for (const auto& entry : entries_) {
if (entry.entry_type == Entry::kUint64 && entry.units == kUnitsBytes &&
strcmp(entry.name.c_str(), kNameSize) == 0) {
cached_size_ = entry.value_uint64;
return entry.value_uint64;
}
}
return 0;
}
MemoryAllocatorDump::Entry::Entry() : entry_type(kString), value_uint64() {}
MemoryAllocatorDump::Entry::Entry(MemoryAllocatorDump::Entry&&) noexcept =
default;
MemoryAllocatorDump::Entry& MemoryAllocatorDump::Entry::operator=(
MemoryAllocatorDump::Entry&&) = default;
MemoryAllocatorDump::Entry::Entry(std::string name,
std::string units,
uint64_t value)
: name(name), units(units), entry_type(kUint64), value_uint64(value) {}
MemoryAllocatorDump::Entry::Entry(std::string name,
std::string units,
std::string value)
: name(name), units(units), entry_type(kString), value_string(value) {}
bool MemoryAllocatorDump::Entry::operator==(const Entry& rhs) const {
if (!(name == rhs.name && units == rhs.units && entry_type == rhs.entry_type))
return false;
switch (entry_type) {
case EntryType::kUint64:
return value_uint64 == rhs.value_uint64;
case EntryType::kString:
return value_string == rhs.value_string;
}
NOTREACHED();
return false;
}
void PrintTo(const MemoryAllocatorDump::Entry& entry, std::ostream* out) {
switch (entry.entry_type) {
case MemoryAllocatorDump::Entry::EntryType::kUint64:
*out << "<Entry(\"" << entry.name << "\", \"" << entry.units << "\", "
<< entry.value_uint64 << ")>";
return;
case MemoryAllocatorDump::Entry::EntryType::kString:
*out << "<Entry(\"" << entry.name << "\", \"" << entry.units << "\", \""
<< entry.value_string << "\")>";
return;
}
NOTREACHED();
}
} // namespace trace_event
} // namespace base

View file

@ -0,0 +1,153 @@
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TRACE_EVENT_MEMORY_ALLOCATOR_DUMP_H_
#define BASE_TRACE_EVENT_MEMORY_ALLOCATOR_DUMP_H_
#include <stdint.h>
#include <memory>
#include <ostream>
#include <string>
#include "base/base_export.h"
#include "base/gtest_prod_util.h"
#include "base/logging.h"
#include "base/macros.h"
#include "base/optional.h"
#include "base/trace_event/memory_allocator_dump_guid.h"
#include "base/trace_event/memory_dump_request_args.h"
#include "base/trace_event/traced_value.h"
#include "base/unguessable_token.h"
#include "base/values.h"
namespace base {
namespace trace_event {
class ProcessMemoryDump;
class TracedValue;
// Data model for user-land memory allocator dumps.
class BASE_EXPORT MemoryAllocatorDump {
public:
enum Flags {
DEFAULT = 0,
// A dump marked weak will be discarded by TraceViewer.
WEAK = 1 << 0,
};
// In the TraceViewer UI table each MemoryAllocatorDump becomes
// a row and each Entry generates a column (if it doesn't already
// exist).
struct BASE_EXPORT Entry {
enum EntryType {
kUint64,
kString,
};
// By design name, units and value_string are always coming from
// indefinitely lived const char* strings, the only reason we copy
// them into a std::string is to handle Mojo (de)serialization.
// TODO(hjd): Investigate optimization (e.g. using StringPiece).
Entry(); // Only for deserialization.
Entry(std::string name, std::string units, uint64_t value);
Entry(std::string name, std::string units, std::string value);
Entry(Entry&& other) noexcept;
Entry& operator=(Entry&& other);
bool operator==(const Entry& rhs) const;
std::string name;
std::string units;
EntryType entry_type;
uint64_t value_uint64;
std::string value_string;
DISALLOW_COPY_AND_ASSIGN(Entry);
};
MemoryAllocatorDump(const std::string& absolute_name,
MemoryDumpLevelOfDetail,
const MemoryAllocatorDumpGuid&);
~MemoryAllocatorDump();
// Standard attribute |name|s for the AddScalar and AddString() methods.
static const char kNameSize[]; // To represent allocated space.
static const char kNameObjectCount[]; // To represent number of objects.
// Standard attribute |unit|s for the AddScalar and AddString() methods.
static const char kUnitsBytes[]; // Unit name to represent bytes.
static const char kUnitsObjects[]; // Unit name to represent #objects.
// Constants used only internally and by tests.
static const char kTypeScalar[]; // Type name for scalar attributes.
static const char kTypeString[]; // Type name for string attributes.
// Setters for scalar attributes. Some examples:
// - "size" column (all dumps are expected to have at least this one):
// AddScalar(kNameSize, kUnitsBytes, 1234);
// - Some extra-column reporting internal details of the subsystem:
// AddScalar("number_of_freelist_entries", kUnitsObjects, 42)
// - Other informational column:
// AddString("kitten", "name", "shadow");
void AddScalar(const char* name, const char* units, uint64_t value);
void AddString(const char* name, const char* units, const std::string& value);
// Absolute name, unique within the scope of an entire ProcessMemoryDump.
const std::string& absolute_name() const { return absolute_name_; }
// Called at trace generation time to populate the TracedValue.
void AsValueInto(TracedValue* value) const;
// Get the size for this dump.
// The size is the value set with AddScalar(kNameSize, kUnitsBytes, size);
// TODO(hjd): this should return an Optional<uint64_t>.
uint64_t GetSizeInternal() const;
MemoryDumpLevelOfDetail level_of_detail() const { return level_of_detail_; }
// Use enum Flags to set values.
void set_flags(int flags) { flags_ |= flags; }
void clear_flags(int flags) { flags_ &= ~flags; }
int flags() const { return flags_; }
// |guid| is an optional global dump identifier, unique across all processes
// within the scope of a global dump. It is only required when using the
// graph APIs (see TODO_method_name) to express retention / suballocation or
// cross process sharing. See crbug.com/492102 for design docs.
// Subsequent MemoryAllocatorDump(s) with the same |absolute_name| are
// expected to have the same guid.
const MemoryAllocatorDumpGuid& guid() const { return guid_; }
const std::vector<Entry>& entries() const { return entries_; }
// Only for mojo serialization, which can mutate the collection.
std::vector<Entry>* mutable_entries_for_serialization() const {
cached_size_.reset(); // The caller can mutate the collection.
// Mojo takes a const input argument even for move-only types that can be
// mutate while serializing (like this one). Hence the const_cast.
return const_cast<std::vector<Entry>*>(&entries_);
}
private:
const std::string absolute_name_;
MemoryAllocatorDumpGuid guid_;
MemoryDumpLevelOfDetail level_of_detail_;
int flags_; // See enum Flags.
mutable Optional<uint64_t> cached_size_; // Lazy, for GetSizeInternal().
std::vector<Entry> entries_;
DISALLOW_COPY_AND_ASSIGN(MemoryAllocatorDump);
};
// This is required by gtest to print a readable output on test failures.
void BASE_EXPORT PrintTo(const MemoryAllocatorDump::Entry&, std::ostream*);
} // namespace trace_event
} // namespace base
#endif // BASE_TRACE_EVENT_MEMORY_ALLOCATOR_DUMP_H_

View file

@ -0,0 +1,40 @@
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/trace_event/memory_allocator_dump_guid.h"
#include "base/format_macros.h"
#include "base/hash/sha1.h"
#include "base/strings/stringprintf.h"
namespace base {
namespace trace_event {
namespace {
uint64_t HashString(const std::string& str) {
uint64_t hash[(kSHA1Length + sizeof(uint64_t) - 1) / sizeof(uint64_t)] = {0};
SHA1HashBytes(reinterpret_cast<const unsigned char*>(str.data()), str.size(),
reinterpret_cast<unsigned char*>(hash));
return hash[0];
}
} // namespace
MemoryAllocatorDumpGuid::MemoryAllocatorDumpGuid(uint64_t guid) : guid_(guid) {}
MemoryAllocatorDumpGuid::MemoryAllocatorDumpGuid()
: MemoryAllocatorDumpGuid(0u) {
}
MemoryAllocatorDumpGuid::MemoryAllocatorDumpGuid(const std::string& guid_str)
: MemoryAllocatorDumpGuid(HashString(guid_str)) {
}
std::string MemoryAllocatorDumpGuid::ToString() const {
return StringPrintf("%" PRIx64, guid_);
}
} // namespace trace_event
} // namespace base

View file

@ -0,0 +1,55 @@
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TRACE_EVENT_MEMORY_ALLOCATOR_DUMP_GUID_H_
#define BASE_TRACE_EVENT_MEMORY_ALLOCATOR_DUMP_GUID_H_
#include <stdint.h>
#include <string>
#include "base/base_export.h"
namespace base {
namespace trace_event {
class BASE_EXPORT MemoryAllocatorDumpGuid {
public:
MemoryAllocatorDumpGuid();
explicit MemoryAllocatorDumpGuid(uint64_t guid);
// Utility ctor to hash a GUID if the caller prefers a string. The caller
// still has to ensure that |guid_str| is unique, per snapshot, within the
// global scope of all the traced processes.
explicit MemoryAllocatorDumpGuid(const std::string& guid_str);
uint64_t ToUint64() const { return guid_; }
// Returns a (hex-encoded) string representation of the guid.
std::string ToString() const;
bool empty() const { return guid_ == 0u; }
bool operator==(const MemoryAllocatorDumpGuid& other) const {
return guid_ == other.guid_;
}
bool operator!=(const MemoryAllocatorDumpGuid& other) const {
return !(*this == other);
}
bool operator<(const MemoryAllocatorDumpGuid& other) const {
return guid_ < other.guid_;
}
private:
uint64_t guid_;
// Deliberately copy-able.
};
} // namespace trace_event
} // namespace base
#endif // BASE_TRACE_EVENT_MEMORY_ALLOCATOR_DUMP_GUID_H_

View file

@ -0,0 +1,546 @@
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/trace_event/memory_dump_manager.h"
#include <inttypes.h>
#include <stdio.h>
#include <algorithm>
#include <memory>
#include <utility>
#include "base/allocator/buildflags.h"
#include "base/base_switches.h"
#include "base/command_line.h"
#include "base/debug/alias.h"
#include "base/debug/stack_trace.h"
#include "base/memory/ptr_util.h"
#include "base/sequenced_task_runner.h"
#include "base/strings/string_util.h"
#include "base/third_party/dynamic_annotations/dynamic_annotations.h"
#include "base/threading/thread.h"
#include "base/threading/thread_task_runner_handle.h"
#include "base/trace_event/heap_profiler.h"
#include "base/trace_event/heap_profiler_allocation_context_tracker.h"
#include "base/trace_event/heap_profiler_event_filter.h"
#include "base/trace_event/malloc_dump_provider.h"
#include "base/trace_event/memory_dump_provider.h"
#include "base/trace_event/memory_dump_scheduler.h"
#include "base/trace_event/memory_infra_background_allowlist.h"
#include "base/trace_event/process_memory_dump.h"
#include "base/trace_event/trace_event.h"
#include "base/trace_event/traced_value.h"
#include "build/build_config.h"
#if defined(OS_ANDROID)
#include "base/trace_event/java_heap_dump_provider_android.h"
#if BUILDFLAG(CAN_UNWIND_WITH_CFI_TABLE)
#include "base/trace_event/cfi_backtrace_android.h"
#endif
#endif // defined(OS_ANDROID)
namespace base {
namespace trace_event {
namespace {
MemoryDumpManager* g_memory_dump_manager_for_testing = nullptr;
// Temporary (until scheduler is moved outside of here)
// trampoline function to match the |request_dump_function| passed to Initialize
// to the callback expected by MemoryDumpScheduler.
// TODO(primiano): remove this.
void DoGlobalDumpWithoutCallback(
MemoryDumpManager::RequestGlobalDumpFunction global_dump_fn,
MemoryDumpType dump_type,
MemoryDumpLevelOfDetail level_of_detail) {
global_dump_fn.Run(dump_type, level_of_detail);
}
} // namespace
// static
constexpr const char* MemoryDumpManager::kTraceCategory;
// static
const int MemoryDumpManager::kMaxConsecutiveFailuresCount = 3;
// static
const uint64_t MemoryDumpManager::kInvalidTracingProcessId = 0;
// static
const char* const MemoryDumpManager::kSystemAllocatorPoolName =
#if defined(MALLOC_MEMORY_TRACING_SUPPORTED)
MallocDumpProvider::kAllocatedObjects;
#else
nullptr;
#endif
// static
MemoryDumpManager* MemoryDumpManager::GetInstance() {
if (g_memory_dump_manager_for_testing)
return g_memory_dump_manager_for_testing;
return Singleton<MemoryDumpManager,
LeakySingletonTraits<MemoryDumpManager>>::get();
}
// static
std::unique_ptr<MemoryDumpManager>
MemoryDumpManager::CreateInstanceForTesting() {
DCHECK(!g_memory_dump_manager_for_testing);
std::unique_ptr<MemoryDumpManager> instance(new MemoryDumpManager());
g_memory_dump_manager_for_testing = instance.get();
return instance;
}
MemoryDumpManager::MemoryDumpManager()
: is_coordinator_(false),
tracing_process_id_(kInvalidTracingProcessId),
dumper_registrations_ignored_for_testing_(false) {}
MemoryDumpManager::~MemoryDumpManager() {
Thread* dump_thread = nullptr;
{
AutoLock lock(lock_);
if (dump_thread_) {
dump_thread = dump_thread_.get();
}
}
if (dump_thread) {
dump_thread->Stop();
}
AutoLock lock(lock_);
dump_thread_.reset();
g_memory_dump_manager_for_testing = nullptr;
}
void MemoryDumpManager::Initialize(
RequestGlobalDumpFunction request_dump_function,
bool is_coordinator) {
{
AutoLock lock(lock_);
DCHECK(!request_dump_function.is_null());
DCHECK(!can_request_global_dumps());
request_dump_function_ = request_dump_function;
is_coordinator_ = is_coordinator;
}
// Enable the core dump providers.
#if defined(MALLOC_MEMORY_TRACING_SUPPORTED)
RegisterDumpProvider(MallocDumpProvider::GetInstance(), "Malloc", nullptr);
#endif
#if defined(OS_ANDROID)
RegisterDumpProvider(JavaHeapDumpProvider::GetInstance(), "JavaHeap",
nullptr);
#endif
}
void MemoryDumpManager::RegisterDumpProvider(
MemoryDumpProvider* mdp,
const char* name,
scoped_refptr<SingleThreadTaskRunner> task_runner,
MemoryDumpProvider::Options options) {
options.dumps_on_single_thread_task_runner = true;
RegisterDumpProviderInternal(mdp, name, std::move(task_runner), options);
}
void MemoryDumpManager::RegisterDumpProvider(
MemoryDumpProvider* mdp,
const char* name,
scoped_refptr<SingleThreadTaskRunner> task_runner) {
// Set |dumps_on_single_thread_task_runner| to true because all providers
// without task runner are run on dump thread.
MemoryDumpProvider::Options options;
options.dumps_on_single_thread_task_runner = true;
RegisterDumpProviderInternal(mdp, name, std::move(task_runner), options);
}
void MemoryDumpManager::RegisterDumpProviderWithSequencedTaskRunner(
MemoryDumpProvider* mdp,
const char* name,
scoped_refptr<SequencedTaskRunner> task_runner,
MemoryDumpProvider::Options options) {
DCHECK(task_runner);
options.dumps_on_single_thread_task_runner = false;
RegisterDumpProviderInternal(mdp, name, std::move(task_runner), options);
}
void MemoryDumpManager::RegisterDumpProviderInternal(
MemoryDumpProvider* mdp,
const char* name,
scoped_refptr<SequencedTaskRunner> task_runner,
const MemoryDumpProvider::Options& options) {
if (dumper_registrations_ignored_for_testing_)
return;
// Only a handful of MDPs are required to compute the memory metrics. These
// have small enough performance overhead that it is reasonable to run them
// in the background while the user is doing other things. Those MDPs are
// 'allowed in background mode'.
bool allowed_in_background_mode = IsMemoryDumpProviderInAllowlist(name);
scoped_refptr<MemoryDumpProviderInfo> mdpinfo = new MemoryDumpProviderInfo(
mdp, name, std::move(task_runner), options, allowed_in_background_mode);
{
AutoLock lock(lock_);
bool already_registered = !dump_providers_.insert(mdpinfo).second;
// This actually happens in some tests which don't have a clean tear-down
// path for RenderThreadImpl::Init().
if (already_registered)
return;
}
}
void MemoryDumpManager::UnregisterDumpProvider(MemoryDumpProvider* mdp) {
UnregisterDumpProviderInternal(mdp, false /* delete_async */);
}
void MemoryDumpManager::UnregisterAndDeleteDumpProviderSoon(
std::unique_ptr<MemoryDumpProvider> mdp) {
UnregisterDumpProviderInternal(mdp.release(), true /* delete_async */);
}
void MemoryDumpManager::UnregisterDumpProviderInternal(
MemoryDumpProvider* mdp,
bool take_mdp_ownership_and_delete_async) {
std::unique_ptr<MemoryDumpProvider> owned_mdp;
if (take_mdp_ownership_and_delete_async)
owned_mdp.reset(mdp);
AutoLock lock(lock_);
auto mdp_iter = dump_providers_.begin();
for (; mdp_iter != dump_providers_.end(); ++mdp_iter) {
if ((*mdp_iter)->dump_provider == mdp)
break;
}
if (mdp_iter == dump_providers_.end())
return; // Not registered / already unregistered.
if (take_mdp_ownership_and_delete_async) {
// The MDP will be deleted whenever the MDPInfo struct will, that is either:
// - At the end of this function, if no dump is in progress.
// - In ContinueAsyncProcessDump() when MDPInfo is removed from
// |pending_dump_providers|.
DCHECK(!(*mdp_iter)->owned_dump_provider);
(*mdp_iter)->owned_dump_provider = std::move(owned_mdp);
} else {
// If you hit this DCHECK, your dump provider has a bug.
// Unregistration of a MemoryDumpProvider is safe only if:
// - The MDP has specified a sequenced task runner affinity AND the
// unregistration happens on the same task runner. So that the MDP cannot
// unregister and be in the middle of a OnMemoryDump() at the same time.
// - The MDP has NOT specified a task runner affinity and its ownership is
// transferred via UnregisterAndDeleteDumpProviderSoon().
// In all the other cases, it is not possible to guarantee that the
// unregistration will not race with OnMemoryDump() calls.
DCHECK((*mdp_iter)->task_runner &&
(*mdp_iter)->task_runner->RunsTasksInCurrentSequence())
<< "MemoryDumpProvider \"" << (*mdp_iter)->name << "\" attempted to "
<< "unregister itself in a racy way. Please file a crbug.";
}
// The MDPInfo instance can still be referenced by the
// |ProcessMemoryDumpAsyncState.pending_dump_providers|. For this reason
// the MDPInfo is flagged as disabled. It will cause InvokeOnMemoryDump()
// to just skip it, without actually invoking the |mdp|, which might be
// destroyed by the caller soon after this method returns.
(*mdp_iter)->disabled = true;
dump_providers_.erase(mdp_iter);
}
bool MemoryDumpManager::IsDumpProviderRegisteredForTesting(
MemoryDumpProvider* provider) {
AutoLock lock(lock_);
for (const auto& info : dump_providers_) {
if (info->dump_provider == provider)
return true;
}
return false;
}
scoped_refptr<SequencedTaskRunner>
MemoryDumpManager::GetDumpThreadTaskRunner() {
base::AutoLock lock(lock_);
return GetOrCreateBgTaskRunnerLocked();
}
scoped_refptr<base::SequencedTaskRunner>
MemoryDumpManager::GetOrCreateBgTaskRunnerLocked() {
lock_.AssertAcquired();
if (dump_thread_)
return dump_thread_->task_runner();
dump_thread_ = std::make_unique<Thread>("MemoryInfra");
bool started = dump_thread_->Start();
CHECK(started);
return dump_thread_->task_runner();
}
void MemoryDumpManager::CreateProcessDump(const MemoryDumpRequestArgs& args,
ProcessMemoryDumpCallback callback) {
char guid_str[20];
sprintf(guid_str, "0x%" PRIx64, args.dump_guid);
TRACE_EVENT_NESTABLE_ASYNC_BEGIN1(kTraceCategory, "ProcessMemoryDump",
TRACE_ID_LOCAL(args.dump_guid), "dump_guid",
TRACE_STR_COPY(guid_str));
// If argument filter is enabled then only background mode dumps should be
// allowed. In case the trace config passed for background tracing session
// missed the allowed modes argument, it crashes here instead of creating
// unexpected dumps.
if (TraceLog::GetInstance()
->GetCurrentTraceConfig()
.IsArgumentFilterEnabled()) {
CHECK_EQ(MemoryDumpLevelOfDetail::BACKGROUND, args.level_of_detail);
}
std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state;
{
AutoLock lock(lock_);
pmd_async_state.reset(new ProcessMemoryDumpAsyncState(
args, dump_providers_, std::move(callback),
GetOrCreateBgTaskRunnerLocked()));
}
// Start the process dump. This involves task runner hops as specified by the
// MemoryDumpProvider(s) in RegisterDumpProvider()).
ContinueAsyncProcessDump(pmd_async_state.release());
}
// Invokes OnMemoryDump() on all MDPs that are next in the pending list and run
// on the current sequenced task runner. If the next MDP does not run in current
// sequenced task runner, then switches to that task runner and continues. All
// OnMemoryDump() invocations are linearized. |lock_| is used in these functions
// purely to ensure consistency w.r.t. (un)registrations of |dump_providers_|.
void MemoryDumpManager::ContinueAsyncProcessDump(
ProcessMemoryDumpAsyncState* owned_pmd_async_state) {
HEAP_PROFILER_SCOPED_IGNORE;
// Initalizes the ThreadLocalEventBuffer to guarantee that the TRACE_EVENTs
// in the PostTask below don't end up registering their own dump providers
// (for discounting trace memory overhead) while holding the |lock_|.
TraceLog::GetInstance()->InitializeThreadLocalEventBufferIfSupported();
// In theory |owned_pmd_async_state| should be a unique_ptr. The only reason
// why it isn't is because of the corner case logic of |did_post_task|
// above, which needs to take back the ownership of the |pmd_async_state| when
// the PostTask() fails.
// Unfortunately, PostTask() destroys the unique_ptr arguments upon failure
// to prevent accidental leaks. Using a unique_ptr would prevent us to to
// skip the hop and move on. Hence the manual naked -> unique ptr juggling.
auto pmd_async_state = WrapUnique(owned_pmd_async_state);
owned_pmd_async_state = nullptr;
while (!pmd_async_state->pending_dump_providers.empty()) {
// Read MemoryDumpProviderInfo thread safety considerations in
// memory_dump_manager.h when accessing |mdpinfo| fields.
MemoryDumpProviderInfo* mdpinfo =
pmd_async_state->pending_dump_providers.back().get();
// If we are in background mode, we should invoke only the whitelisted
// providers. Ignore other providers and continue.
if (pmd_async_state->req_args.level_of_detail ==
MemoryDumpLevelOfDetail::BACKGROUND &&
!mdpinfo->allowed_in_background_mode) {
pmd_async_state->pending_dump_providers.pop_back();
continue;
}
// If the dump provider did not specify a task runner affinity, dump on
// |dump_thread_|.
scoped_refptr<SequencedTaskRunner> task_runner = mdpinfo->task_runner;
if (!task_runner) {
DCHECK(mdpinfo->options.dumps_on_single_thread_task_runner);
task_runner = pmd_async_state->dump_thread_task_runner;
DCHECK(task_runner);
}
// If |RunsTasksInCurrentSequence()| is true then no PostTask is
// required since we are on the right SequencedTaskRunner.
if (task_runner->RunsTasksInCurrentSequence()) {
InvokeOnMemoryDump(mdpinfo, pmd_async_state->process_memory_dump.get());
pmd_async_state->pending_dump_providers.pop_back();
continue;
}
bool did_post_task = task_runner->PostTask(
FROM_HERE,
BindOnce(&MemoryDumpManager::ContinueAsyncProcessDump, Unretained(this),
Unretained(pmd_async_state.get())));
if (did_post_task) {
// Ownership is tranferred to the posted task.
ignore_result(pmd_async_state.release());
return;
}
// PostTask usually fails only if the process or thread is shut down. So,
// the dump provider is disabled here. But, don't disable unbound dump
// providers, since the |dump_thread_| is controlled by MDM.
if (mdpinfo->task_runner) {
// A locked access is required to R/W |disabled| (for the
// UnregisterAndDeleteDumpProviderSoon() case).
AutoLock lock(lock_);
mdpinfo->disabled = true;
}
// PostTask failed. Ignore the dump provider and continue.
pmd_async_state->pending_dump_providers.pop_back();
}
FinishAsyncProcessDump(std::move(pmd_async_state));
}
// This function is called on the right task runner for current MDP. It is
// either the task runner specified by MDP or |dump_thread_task_runner| if the
// MDP did not specify task runner. Invokes the dump provider's OnMemoryDump()
// (unless disabled).
void MemoryDumpManager::InvokeOnMemoryDump(MemoryDumpProviderInfo* mdpinfo,
ProcessMemoryDump* pmd) {
HEAP_PROFILER_SCOPED_IGNORE;
DCHECK(!mdpinfo->task_runner ||
mdpinfo->task_runner->RunsTasksInCurrentSequence());
TRACE_EVENT1(kTraceCategory, "MemoryDumpManager::InvokeOnMemoryDump",
"dump_provider.name", mdpinfo->name);
// Do not add any other TRACE_EVENT macro (or function that might have them)
// below this point. Under some rare circunstances, they can re-initialize
// and invalide the current ThreadLocalEventBuffer MDP, making the
// |should_dump| check below susceptible to TOCTTOU bugs
// (https://crbug.com/763365).
bool is_thread_bound;
{
// A locked access is required to R/W |disabled| (for the
// UnregisterAndDeleteDumpProviderSoon() case).
AutoLock lock(lock_);
// Unregister the dump provider if it failed too many times consecutively.
if (!mdpinfo->disabled &&
mdpinfo->consecutive_failures >= kMaxConsecutiveFailuresCount) {
mdpinfo->disabled = true;
DLOG(ERROR) << "Disabling MemoryDumpProvider \"" << mdpinfo->name
<< "\". Dump failed multiple times consecutively.";
}
if (mdpinfo->disabled)
return;
is_thread_bound = mdpinfo->task_runner != nullptr;
} // AutoLock lock(lock_);
// Invoke the dump provider.
// A stack allocated string with dump provider name is useful to debug
// crashes while invoking dump after a |dump_provider| is not unregistered
// in safe way.
char provider_name_for_debugging[16];
strncpy(provider_name_for_debugging, mdpinfo->name,
sizeof(provider_name_for_debugging) - 1);
provider_name_for_debugging[sizeof(provider_name_for_debugging) - 1] = '\0';
base::debug::Alias(provider_name_for_debugging);
ANNOTATE_BENIGN_RACE(&mdpinfo->disabled, "best-effort race detection");
CHECK(!is_thread_bound ||
!*(static_cast<volatile bool*>(&mdpinfo->disabled)));
bool dump_successful =
mdpinfo->dump_provider->OnMemoryDump(pmd->dump_args(), pmd);
mdpinfo->consecutive_failures =
dump_successful ? 0 : mdpinfo->consecutive_failures + 1;
}
void MemoryDumpManager::FinishAsyncProcessDump(
std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state) {
HEAP_PROFILER_SCOPED_IGNORE;
DCHECK(pmd_async_state->pending_dump_providers.empty());
const uint64_t dump_guid = pmd_async_state->req_args.dump_guid;
if (!pmd_async_state->callback_task_runner->BelongsToCurrentThread()) {
scoped_refptr<SingleThreadTaskRunner> callback_task_runner =
pmd_async_state->callback_task_runner;
callback_task_runner->PostTask(
FROM_HERE, BindOnce(&MemoryDumpManager::FinishAsyncProcessDump,
Unretained(this), std::move(pmd_async_state)));
return;
}
TRACE_EVENT0(kTraceCategory, "MemoryDumpManager::FinishAsyncProcessDump");
if (!pmd_async_state->callback.is_null()) {
std::move(pmd_async_state->callback)
.Run(true /* success */, dump_guid,
std::move(pmd_async_state->process_memory_dump));
}
TRACE_EVENT_NESTABLE_ASYNC_END0(kTraceCategory, "ProcessMemoryDump",
TRACE_ID_LOCAL(dump_guid));
}
void MemoryDumpManager::SetupForTracing(
const TraceConfig::MemoryDumpConfig& memory_dump_config) {
AutoLock lock(lock_);
// At this point we must have the ability to request global dumps.
DCHECK(can_request_global_dumps());
MemoryDumpScheduler::Config periodic_config;
for (const auto& trigger : memory_dump_config.triggers) {
if (trigger.trigger_type == MemoryDumpType::PERIODIC_INTERVAL) {
if (periodic_config.triggers.empty()) {
periodic_config.callback =
BindRepeating(&DoGlobalDumpWithoutCallback, request_dump_function_,
MemoryDumpType::PERIODIC_INTERVAL);
}
periodic_config.triggers.push_back(
{trigger.level_of_detail, trigger.min_time_between_dumps_ms});
}
}
// Only coordinator process triggers periodic memory dumps.
if (is_coordinator_ && !periodic_config.triggers.empty()) {
MemoryDumpScheduler::GetInstance()->Start(periodic_config,
GetOrCreateBgTaskRunnerLocked());
}
}
void MemoryDumpManager::TeardownForTracing() {
// There might be a memory dump in progress while this happens. Therefore,
// ensure that the MDM state which depends on the tracing enabled / disabled
// state is always accessed by the dumping methods holding the |lock_|.
AutoLock lock(lock_);
MemoryDumpScheduler::GetInstance()->Stop();
}
MemoryDumpManager::ProcessMemoryDumpAsyncState::ProcessMemoryDumpAsyncState(
MemoryDumpRequestArgs req_args,
const MemoryDumpProviderInfo::OrderedSet& dump_providers,
ProcessMemoryDumpCallback callback,
scoped_refptr<SequencedTaskRunner> dump_thread_task_runner)
: req_args(req_args),
callback(std::move(callback)),
callback_task_runner(ThreadTaskRunnerHandle::Get()),
dump_thread_task_runner(std::move(dump_thread_task_runner)) {
pending_dump_providers.reserve(dump_providers.size());
pending_dump_providers.assign(dump_providers.rbegin(), dump_providers.rend());
MemoryDumpArgs args = {req_args.level_of_detail, req_args.determinism,
req_args.dump_guid};
process_memory_dump = std::make_unique<ProcessMemoryDump>(args);
}
MemoryDumpManager::ProcessMemoryDumpAsyncState::~ProcessMemoryDumpAsyncState() =
default;
} // namespace trace_event
} // namespace base

View file

@ -0,0 +1,269 @@
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TRACE_EVENT_MEMORY_DUMP_MANAGER_H_
#define BASE_TRACE_EVENT_MEMORY_DUMP_MANAGER_H_
#include <stdint.h>
#include <map>
#include <memory>
#include <unordered_set>
#include <vector>
#include "base/atomicops.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "base/memory/singleton.h"
#include "base/synchronization/lock.h"
#include "base/trace_event/memory_allocator_dump.h"
#include "base/trace_event/memory_dump_provider_info.h"
#include "base/trace_event/memory_dump_request_args.h"
#include "base/trace_event/process_memory_dump.h"
#include "base/trace_event/trace_event.h"
namespace base {
class SequencedTaskRunner;
class SingleThreadTaskRunner;
class Thread;
namespace trace_event {
class MemoryDumpProvider;
// This is the interface exposed to the rest of the codebase to deal with
// memory tracing. The main entry point for clients is represented by
// RequestDumpPoint(). The extension by Un(RegisterDumpProvider).
class BASE_EXPORT MemoryDumpManager {
public:
using RequestGlobalDumpFunction =
RepeatingCallback<void(MemoryDumpType, MemoryDumpLevelOfDetail)>;
static constexpr const char* const kTraceCategory =
TRACE_DISABLED_BY_DEFAULT("memory-infra");
// This value is returned as the tracing id of the child processes by
// GetTracingProcessId() when tracing is not enabled.
static const uint64_t kInvalidTracingProcessId;
static MemoryDumpManager* GetInstance();
static std::unique_ptr<MemoryDumpManager> CreateInstanceForTesting();
// Invoked once per process to listen to trace begin / end events.
// Initialization can happen after (Un)RegisterMemoryDumpProvider() calls
// and the MemoryDumpManager guarantees to support this.
// On the other side, the MemoryDumpManager will not be fully operational
// (any CreateProcessDump() will return a failure) until initialized.
// Arguments:
// is_coordinator: True when current process coordinates the periodic dump
// triggering.
// request_dump_function: Function to invoke a global dump. Global dump
// involves embedder-specific behaviors like multiprocess handshaking.
// TODO(primiano): this is only required to trigger global dumps from
// the scheduler. Should be removed once they are both moved out of base.
void Initialize(RequestGlobalDumpFunction request_dump_function,
bool is_coordinator);
// (Un)Registers a MemoryDumpProvider instance.
// Args:
// - mdp: the MemoryDumpProvider instance to be registered. MemoryDumpManager
// does NOT take memory ownership of |mdp|, which is expected to either
// be a singleton or unregister itself.
// - name: a friendly name (duplicates allowed). Used for debugging and
// run-time profiling of memory-infra internals. Must be a long-lived
// C string.
// - task_runner: either a SingleThreadTaskRunner or SequencedTaskRunner. All
// the calls to |mdp| will be run on the given |task_runner|. If passed
// null |mdp| should be able to handle calls on arbitrary threads.
// - options: extra optional arguments. See memory_dump_provider.h.
void RegisterDumpProvider(MemoryDumpProvider* mdp,
const char* name,
scoped_refptr<SingleThreadTaskRunner> task_runner);
void RegisterDumpProvider(MemoryDumpProvider* mdp,
const char* name,
scoped_refptr<SingleThreadTaskRunner> task_runner,
MemoryDumpProvider::Options options);
void RegisterDumpProviderWithSequencedTaskRunner(
MemoryDumpProvider* mdp,
const char* name,
scoped_refptr<SequencedTaskRunner> task_runner,
MemoryDumpProvider::Options options);
void UnregisterDumpProvider(MemoryDumpProvider* mdp);
// Unregisters an unbound dump provider and takes care about its deletion
// asynchronously. Can be used only for for dump providers with no
// task-runner affinity.
// This method takes ownership of the dump provider and guarantees that:
// - The |mdp| will be deleted at some point in the near future.
// - Its deletion will not happen concurrently with the OnMemoryDump() call.
// Note that OnMemoryDump() calls can still happen after this method returns.
void UnregisterAndDeleteDumpProviderSoon(
std::unique_ptr<MemoryDumpProvider> mdp);
// Prepare MemoryDumpManager for CreateProcessDump() calls for tracing-related
// modes (i.e. |level_of_detail| != SUMMARY_ONLY).
// Also initializes the scheduler with the given config.
void SetupForTracing(const TraceConfig::MemoryDumpConfig&);
// Tear-down tracing related state.
// Non-tracing modes (e.g. SUMMARY_ONLY) will continue to work.
void TeardownForTracing();
// Creates a memory dump for the current process and appends it to the trace.
// |callback| will be invoked asynchronously upon completion on the same
// thread on which CreateProcessDump() was called. This method should only be
// used by the memory-infra service while creating a global memory dump.
void CreateProcessDump(const MemoryDumpRequestArgs& args,
ProcessMemoryDumpCallback callback);
// Lets tests see if a dump provider is registered.
bool IsDumpProviderRegisteredForTesting(MemoryDumpProvider*);
// Returns a unique id for identifying the processes. The id can be
// retrieved by child processes only when tracing is enabled. This is
// intended to express cross-process sharing of memory dumps on the
// child-process side, without having to know its own child process id.
uint64_t GetTracingProcessId() const { return tracing_process_id_; }
void set_tracing_process_id(uint64_t tracing_process_id) {
tracing_process_id_ = tracing_process_id;
}
// Returns the name for a the allocated_objects dump. Use this to declare
// suballocator dumps from other dump providers.
// It will return nullptr if there is no dump provider for the system
// allocator registered (which is currently the case for Mac OS).
const char* system_allocator_pool_name() const {
return kSystemAllocatorPoolName;
}
// When set to true, calling |RegisterMemoryDumpProvider| is a no-op.
void set_dumper_registrations_ignored_for_testing(bool ignored) {
dumper_registrations_ignored_for_testing_ = ignored;
}
scoped_refptr<SequencedTaskRunner> GetDumpThreadTaskRunner();
private:
friend std::default_delete<MemoryDumpManager>; // For the testing instance.
friend struct DefaultSingletonTraits<MemoryDumpManager>;
friend class MemoryDumpManagerTest;
FRIEND_TEST_ALL_PREFIXES(MemoryDumpManagerTest,
NoStackOverflowWithTooManyMDPs);
// Holds the state of a process memory dump that needs to be carried over
// across task runners in order to fulfill an asynchronous CreateProcessDump()
// request. At any time exactly one task runner owns a
// ProcessMemoryDumpAsyncState.
struct ProcessMemoryDumpAsyncState {
ProcessMemoryDumpAsyncState(
MemoryDumpRequestArgs req_args,
const MemoryDumpProviderInfo::OrderedSet& dump_providers,
ProcessMemoryDumpCallback callback,
scoped_refptr<SequencedTaskRunner> dump_thread_task_runner);
~ProcessMemoryDumpAsyncState();
// A ProcessMemoryDump to collect data from MemoryDumpProviders.
std::unique_ptr<ProcessMemoryDump> process_memory_dump;
// The arguments passed to the initial CreateProcessDump() request.
const MemoryDumpRequestArgs req_args;
// An ordered sequence of dump providers that have to be invoked to complete
// the dump. This is a copy of |dump_providers_| at the beginning of a dump
// and becomes empty at the end, when all dump providers have been invoked.
std::vector<scoped_refptr<MemoryDumpProviderInfo>> pending_dump_providers;
// Callback passed to the initial call to CreateProcessDump().
ProcessMemoryDumpCallback callback;
// The thread on which FinishAsyncProcessDump() (and hence |callback|)
// should be invoked. This is the thread on which the initial
// CreateProcessDump() request was called.
const scoped_refptr<SingleThreadTaskRunner> callback_task_runner;
// The thread on which unbound dump providers should be invoked.
// This is essentially |dump_thread_|.task_runner() but needs to be kept
// as a separate variable as it needs to be accessed by arbitrary dumpers'
// threads outside of the lock_ to avoid races when disabling tracing.
// It is immutable for all the duration of a tracing session.
const scoped_refptr<SequencedTaskRunner> dump_thread_task_runner;
private:
DISALLOW_COPY_AND_ASSIGN(ProcessMemoryDumpAsyncState);
};
static const int kMaxConsecutiveFailuresCount;
static const char* const kSystemAllocatorPoolName;
MemoryDumpManager();
virtual ~MemoryDumpManager();
static void SetInstanceForTesting(MemoryDumpManager* instance);
// Lazily initializes dump_thread_ and returns its TaskRunner.
scoped_refptr<base::SequencedTaskRunner> GetOrCreateBgTaskRunnerLocked();
// Calls InvokeOnMemoryDump() for the each MDP that belongs to the current
// task runner and switches to the task runner of the next MDP. Handles
// failures in MDP and thread hops, and always calls FinishAsyncProcessDump()
// at the end.
void ContinueAsyncProcessDump(
ProcessMemoryDumpAsyncState* owned_pmd_async_state);
// Invokes OnMemoryDump() of the given MDP. Should be called on the MDP task
// runner.
void InvokeOnMemoryDump(MemoryDumpProviderInfo* mdpinfo,
ProcessMemoryDump* pmd);
void FinishAsyncProcessDump(
std::unique_ptr<ProcessMemoryDumpAsyncState> pmd_async_state);
// Helper for RegierDumpProvider* functions.
void RegisterDumpProviderInternal(
MemoryDumpProvider* mdp,
const char* name,
scoped_refptr<SequencedTaskRunner> task_runner,
const MemoryDumpProvider::Options& options);
// Helper for the public UnregisterDumpProvider* functions.
void UnregisterDumpProviderInternal(MemoryDumpProvider* mdp,
bool take_mdp_ownership_and_delete_async);
bool can_request_global_dumps() const {
return !request_dump_function_.is_null();
}
// An ordered set of registered MemoryDumpProviderInfo(s), sorted by task
// runner affinity (MDPs belonging to the same task runners are adjacent).
MemoryDumpProviderInfo::OrderedSet dump_providers_;
// Function provided by the embedder to handle global dump requests.
RequestGlobalDumpFunction request_dump_function_;
// True when current process coordinates the periodic dump triggering.
bool is_coordinator_;
// Protects from concurrent accesses to the local state, eg: to guard against
// disabling logging while dumping on another thread.
Lock lock_;
// Thread used for MemoryDumpProviders which don't specify a task runner
// affinity.
std::unique_ptr<Thread> dump_thread_;
// The unique id of the child process. This is created only for tracing and is
// expected to be valid only when tracing is enabled.
uint64_t tracing_process_id_;
// When true, calling |RegisterMemoryDumpProvider| is a no-op.
bool dumper_registrations_ignored_for_testing_;
DISALLOW_COPY_AND_ASSIGN(MemoryDumpManager);
};
} // namespace trace_event
} // namespace base
#endif // BASE_TRACE_EVENT_MEMORY_DUMP_MANAGER_H_

View file

@ -0,0 +1,38 @@
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TRACE_EVENT_MEMORY_DUMP_MANAGER_TEST_UTILS_H_
#define BASE_TRACE_EVENT_MEMORY_DUMP_MANAGER_TEST_UTILS_H_
#include "base/bind.h"
#include "base/trace_event/memory_dump_manager.h"
#include "base/trace_event/memory_dump_request_args.h"
namespace base {
namespace trace_event {
void RequestGlobalDumpForInProcessTesting(
base::trace_event::MemoryDumpType dump_type,
base::trace_event::MemoryDumpLevelOfDetail level_of_detail) {
MemoryDumpRequestArgs local_args = {0 /* dump_guid */, dump_type,
level_of_detail};
MemoryDumpManager::GetInstance()->CreateProcessDump(
local_args, ProcessMemoryDumpCallback());
}
// Short circuits the RequestGlobalDumpFunction() to CreateProcessDump(),
// effectively allowing to use both in unittests with the same behavior.
// Unittests are in-process only and don't require all the multi-process
// dump handshaking (which would require bits outside of base).
void InitializeMemoryDumpManagerForInProcessTesting(bool is_coordinator) {
MemoryDumpManager* instance = MemoryDumpManager::GetInstance();
instance->set_dumper_registrations_ignored_for_testing(true);
instance->Initialize(BindRepeating(&RequestGlobalDumpForInProcessTesting),
is_coordinator);
}
} // namespace trace_event
} // namespace base
#endif // BASE_TRACE_EVENT_MEMORY_DUMP_MANAGER_TEST_UTILS_H_

View file

@ -0,0 +1,52 @@
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TRACE_EVENT_MEMORY_DUMP_PROVIDER_H_
#define BASE_TRACE_EVENT_MEMORY_DUMP_PROVIDER_H_
#include "base/base_export.h"
#include "base/macros.h"
#include "base/process/process_handle.h"
#include "base/trace_event/memory_dump_request_args.h"
namespace base {
namespace trace_event {
class ProcessMemoryDump;
// The contract interface that memory dump providers must implement.
class BASE_EXPORT MemoryDumpProvider {
public:
// Optional arguments for MemoryDumpManager::RegisterDumpProvider().
struct Options {
Options() : dumps_on_single_thread_task_runner(false) {}
// |dumps_on_single_thread_task_runner| is true if the dump provider runs on
// a SingleThreadTaskRunner, which is usually the case. It is faster to run
// all providers that run on the same thread together without thread hops.
bool dumps_on_single_thread_task_runner;
};
virtual ~MemoryDumpProvider() = default;
// Called by the MemoryDumpManager when generating memory dumps.
// The |args| specify if the embedder should generate light/heavy dumps on
// dump requests. The embedder should return true if the |pmd| was
// successfully populated, false if something went wrong and the dump should
// be considered invalid.
// (Note, the MemoryDumpManager has a fail-safe logic which will disable the
// MemoryDumpProvider for the entire trace session if it fails consistently).
virtual bool OnMemoryDump(const MemoryDumpArgs& args,
ProcessMemoryDump* pmd) = 0;
protected:
MemoryDumpProvider() = default;
DISALLOW_COPY_AND_ASSIGN(MemoryDumpProvider);
};
} // namespace trace_event
} // namespace base
#endif // BASE_TRACE_EVENT_MEMORY_DUMP_PROVIDER_H_

View file

@ -0,0 +1,43 @@
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/trace_event/memory_dump_provider_info.h"
#include <tuple>
#include "base/sequenced_task_runner.h"
namespace base {
namespace trace_event {
MemoryDumpProviderInfo::MemoryDumpProviderInfo(
MemoryDumpProvider* dump_provider,
const char* name,
scoped_refptr<SequencedTaskRunner> task_runner,
const MemoryDumpProvider::Options& options,
bool allowed_in_background_mode)
: dump_provider(dump_provider),
options(options),
name(name),
task_runner(std::move(task_runner)),
allowed_in_background_mode(allowed_in_background_mode),
consecutive_failures(0),
disabled(false) {}
MemoryDumpProviderInfo::~MemoryDumpProviderInfo() = default;
bool MemoryDumpProviderInfo::Comparator::operator()(
const scoped_refptr<MemoryDumpProviderInfo>& a,
const scoped_refptr<MemoryDumpProviderInfo>& b) const {
if (!a || !b)
return a.get() < b.get();
// Ensure that unbound providers (task_runner == nullptr) always run last.
// Rationale: some unbound dump providers are known to be slow, keep them last
// to avoid skewing timings of the other dump providers.
return std::tie(a->task_runner, a->dump_provider) >
std::tie(b->task_runner, b->dump_provider);
}
} // namespace trace_event
} // namespace base

View file

@ -0,0 +1,108 @@
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TRACE_EVENT_MEMORY_DUMP_PROVIDER_INFO_H_
#define BASE_TRACE_EVENT_MEMORY_DUMP_PROVIDER_INFO_H_
#include <memory>
#include <set>
#include "base/base_export.h"
#include "base/memory/ref_counted.h"
#include "base/trace_event/memory_dump_provider.h"
namespace base {
class SequencedTaskRunner;
namespace trace_event {
// Wraps a MemoryDumpProvider (MDP), which is registered via
// MemoryDumpManager(MDM)::RegisterDumpProvider(), holding the extra information
// required to deal with it (which task runner it should be invoked onto,
// whether it has been disabled, etc.)
// More importantly, having a refptr to this object guarantees that a MDP that
// is not thread-bound (hence which can only be unregistered via
// MDM::UnregisterAndDeleteDumpProviderSoon()) will stay alive as long as the
// refptr is held.
//
// Lifetime:
// At any time, there is at most one instance of this class for each instance
// of a given MemoryDumpProvider, but there might be several scoped_refptr
// holding onto each of this. Specifically:
// - In nominal conditions, there is a refptr for each registered MDP in the
// MDM's |dump_providers_| list.
// - In most cases, the only refptr (in the |dump_providers_| list) is destroyed
// by MDM::UnregisterDumpProvider().
// - However, when MDM starts a dump, the list of refptrs is copied into the
// ProcessMemoryDumpAsyncState. That list is pruned as MDP(s) are invoked.
// - If UnregisterDumpProvider() is called on a non-thread-bound MDP while a
// dump is in progress, the extar extra of the handle is destroyed in
// MDM::SetupNextMemoryDump() or MDM::InvokeOnMemoryDump(), when the copy
// inside ProcessMemoryDumpAsyncState is erase()-d.
// - The PeakDetector can keep extra refptrs when enabled.
struct BASE_EXPORT MemoryDumpProviderInfo
: public RefCountedThreadSafe<MemoryDumpProviderInfo> {
public:
// Define a total order based on the |task_runner| affinity, so that MDPs
// belonging to the same SequencedTaskRunner are adjacent in the set.
struct Comparator {
bool operator()(const scoped_refptr<MemoryDumpProviderInfo>& a,
const scoped_refptr<MemoryDumpProviderInfo>& b) const;
};
using OrderedSet =
std::set<scoped_refptr<MemoryDumpProviderInfo>, Comparator>;
MemoryDumpProviderInfo(MemoryDumpProvider* dump_provider,
const char* name,
scoped_refptr<SequencedTaskRunner> task_runner,
const MemoryDumpProvider::Options& options,
bool allowed_in_background_mode);
// It is safe to access the const fields below from any thread as they are
// never mutated.
MemoryDumpProvider* const dump_provider;
// The |options| arg passed to MDM::RegisterDumpProvider().
const MemoryDumpProvider::Options options;
// Human readable name, not unique (distinct MDP instances might have the same
// name). Used for debugging, testing and whitelisting for BACKGROUND mode.
const char* const name;
// The task runner on which the MDP::OnMemoryDump call should be posted onto.
// Can be nullptr, in which case the MDP will be invoked on a background
// thread handled by MDM.
const scoped_refptr<SequencedTaskRunner> task_runner;
// True if the dump provider is whitelisted for background mode.
const bool allowed_in_background_mode;
// These fields below, instead, are not thread safe and can be mutated only:
// - On the |task_runner|, when not null (i.e. for thread-bound MDPS).
// - By the MDM's background thread (or in any other way that guarantees
// sequencing) for non-thread-bound MDPs.
// Used to transfer ownership for UnregisterAndDeleteDumpProviderSoon().
// nullptr in all other cases.
std::unique_ptr<MemoryDumpProvider> owned_dump_provider;
// For fail-safe logic (auto-disable failing MDPs).
int consecutive_failures;
// Flagged either by the auto-disable logic or during unregistration.
bool disabled;
private:
friend class base::RefCountedThreadSafe<MemoryDumpProviderInfo>;
~MemoryDumpProviderInfo();
DISALLOW_COPY_AND_ASSIGN(MemoryDumpProviderInfo);
};
} // namespace trace_event
} // namespace base
#endif // BASE_TRACE_EVENT_MEMORY_DUMP_PROVIDER_INFO_H_

View file

@ -0,0 +1,64 @@
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/trace_event/memory_dump_request_args.h"
#include "base/logging.h"
namespace base {
namespace trace_event {
// static
const char* MemoryDumpTypeToString(const MemoryDumpType& dump_type) {
switch (dump_type) {
case MemoryDumpType::PERIODIC_INTERVAL:
return "periodic_interval";
case MemoryDumpType::EXPLICITLY_TRIGGERED:
return "explicitly_triggered";
case MemoryDumpType::SUMMARY_ONLY:
return "summary_only";
}
NOTREACHED();
return "unknown";
}
MemoryDumpType StringToMemoryDumpType(const std::string& str) {
if (str == "periodic_interval")
return MemoryDumpType::PERIODIC_INTERVAL;
if (str == "explicitly_triggered")
return MemoryDumpType::EXPLICITLY_TRIGGERED;
if (str == "summary_only")
return MemoryDumpType::SUMMARY_ONLY;
NOTREACHED();
return MemoryDumpType::LAST;
}
const char* MemoryDumpLevelOfDetailToString(
const MemoryDumpLevelOfDetail& level_of_detail) {
switch (level_of_detail) {
case MemoryDumpLevelOfDetail::BACKGROUND:
return "background";
case MemoryDumpLevelOfDetail::LIGHT:
return "light";
case MemoryDumpLevelOfDetail::DETAILED:
return "detailed";
}
NOTREACHED();
return "unknown";
}
MemoryDumpLevelOfDetail StringToMemoryDumpLevelOfDetail(
const std::string& str) {
if (str == "background")
return MemoryDumpLevelOfDetail::BACKGROUND;
if (str == "light")
return MemoryDumpLevelOfDetail::LIGHT;
if (str == "detailed")
return MemoryDumpLevelOfDetail::DETAILED;
NOTREACHED();
return MemoryDumpLevelOfDetail::LAST;
}
} // namespace trace_event
} // namespace base

View file

@ -0,0 +1,110 @@
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TRACE_EVENT_MEMORY_DUMP_REQUEST_ARGS_H_
#define BASE_TRACE_EVENT_MEMORY_DUMP_REQUEST_ARGS_H_
// This file defines the types and structs used to issue memory dump requests.
// These are also used in the IPCs for coordinating inter-process memory dumps.
#include <stdint.h>
#include <map>
#include <memory>
#include <string>
#include "base/base_export.h"
#include "base/callback.h"
#include "base/optional.h"
#include "base/process/process_handle.h"
namespace base {
namespace trace_event {
class ProcessMemoryDump;
// Captures the reason why a memory dump is being requested. This is to allow
// selective enabling of dumps, filtering and post-processing. Keep this
// consistent with memory_instrumentation.mojo and
// memory_instrumentation_struct_traits.{h,cc}
enum class MemoryDumpType {
PERIODIC_INTERVAL, // Dumping memory at periodic intervals.
EXPLICITLY_TRIGGERED, // Non maskable dump request.
SUMMARY_ONLY, // Calculate just the summary & don't add to the trace.
LAST = SUMMARY_ONLY
};
// Tells the MemoryDumpProvider(s) how much detailed their dumps should be.
// Keep this consistent with memory_instrumentation.mojo and
// memory_instrumentation_struct_traits.{h,cc}
enum class MemoryDumpLevelOfDetail : uint32_t {
FIRST,
// For background tracing mode. The dump time is quick, and typically just the
// totals are expected. Suballocations need not be specified. Dump name must
// contain only pre-defined strings and string arguments cannot be added.
BACKGROUND = FIRST,
// For the levels below, MemoryDumpProvider instances must guarantee that the
// total size reported in the root node is consistent. Only the granularity of
// the child MemoryAllocatorDump(s) differs with the levels.
// Few entries, typically a fixed number, per dump.
LIGHT,
// Unrestricted amount of entries per dump.
DETAILED,
LAST = DETAILED
};
// Tells the MemoryDumpProvider(s) if they should try to make the result
// more deterministic by forcing garbage collection.
// Keep this consistent with memory_instrumentation.mojo and
// memory_instrumentation_struct_traits.{h,cc}
enum class MemoryDumpDeterminism : uint32_t { NONE, FORCE_GC };
// Keep this consistent with memory_instrumentation.mojo and
// memory_instrumentation_struct_traits.{h,cc}
struct BASE_EXPORT MemoryDumpRequestArgs {
// Globally unique identifier. In multi-process dumps, all processes issue a
// local dump with the same guid. This allows the trace importers to
// reconstruct the global dump.
uint64_t dump_guid;
MemoryDumpType dump_type;
MemoryDumpLevelOfDetail level_of_detail;
MemoryDumpDeterminism determinism;
};
// Args for ProcessMemoryDump and passed to OnMemoryDump calls for memory dump
// providers. Dump providers are expected to read the args for creating dumps.
struct MemoryDumpArgs {
// Specifies how detailed the dumps should be.
MemoryDumpLevelOfDetail level_of_detail;
// Specifies whether the dumps should be more deterministic.
MemoryDumpDeterminism determinism;
// Globally unique identifier. In multi-process dumps, all processes issue a
// local dump with the same guid. This allows the trace importers to
// reconstruct the global dump.
uint64_t dump_guid;
};
using ProcessMemoryDumpCallback = OnceCallback<
void(bool success, uint64_t dump_guid, std::unique_ptr<ProcessMemoryDump>)>;
BASE_EXPORT const char* MemoryDumpTypeToString(const MemoryDumpType& dump_type);
BASE_EXPORT MemoryDumpType StringToMemoryDumpType(const std::string& str);
BASE_EXPORT const char* MemoryDumpLevelOfDetailToString(
const MemoryDumpLevelOfDetail& level_of_detail);
BASE_EXPORT MemoryDumpLevelOfDetail
StringToMemoryDumpLevelOfDetail(const std::string& str);
} // namespace trace_event
} // namespace base
#endif // BASE_TRACE_EVENT_MEMORY_DUMP_REQUEST_ARGS_H_

View file

@ -0,0 +1,118 @@
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/trace_event/memory_dump_scheduler.h"
#include <algorithm>
#include <limits>
#include "base/bind.h"
#include "base/logging.h"
#include "base/threading/sequenced_task_runner_handle.h"
namespace base {
namespace trace_event {
// static
MemoryDumpScheduler* MemoryDumpScheduler::GetInstance() {
static MemoryDumpScheduler* instance = new MemoryDumpScheduler();
return instance;
}
MemoryDumpScheduler::MemoryDumpScheduler() : period_ms_(0), generation_(0) {}
MemoryDumpScheduler::~MemoryDumpScheduler() {
// Hit only in tests. Check that tests don't leave without stopping.
DCHECK(!is_enabled_for_testing());
}
void MemoryDumpScheduler::Start(
MemoryDumpScheduler::Config config,
scoped_refptr<SequencedTaskRunner> task_runner) {
DCHECK(!task_runner_);
task_runner_ = task_runner;
task_runner->PostTask(FROM_HERE, BindOnce(&MemoryDumpScheduler::StartInternal,
Unretained(this), config));
}
void MemoryDumpScheduler::Stop() {
if (!task_runner_)
return;
task_runner_->PostTask(FROM_HERE, BindOnce(&MemoryDumpScheduler::StopInternal,
Unretained(this)));
task_runner_ = nullptr;
}
void MemoryDumpScheduler::StartInternal(MemoryDumpScheduler::Config config) {
uint32_t light_dump_period_ms = 0;
uint32_t heavy_dump_period_ms = 0;
uint32_t min_period_ms = std::numeric_limits<uint32_t>::max();
for (const Config::Trigger& trigger : config.triggers) {
DCHECK_GT(trigger.period_ms, 0u);
switch (trigger.level_of_detail) {
case MemoryDumpLevelOfDetail::BACKGROUND:
break;
case MemoryDumpLevelOfDetail::LIGHT:
DCHECK_EQ(0u, light_dump_period_ms);
light_dump_period_ms = trigger.period_ms;
break;
case MemoryDumpLevelOfDetail::DETAILED:
DCHECK_EQ(0u, heavy_dump_period_ms);
heavy_dump_period_ms = trigger.period_ms;
break;
}
min_period_ms = std::min(min_period_ms, trigger.period_ms);
}
DCHECK_EQ(0u, light_dump_period_ms % min_period_ms);
DCHECK_EQ(0u, heavy_dump_period_ms % min_period_ms);
DCHECK(!config.callback.is_null());
callback_ = config.callback;
period_ms_ = min_period_ms;
tick_count_ = 0;
light_dump_rate_ = light_dump_period_ms / min_period_ms;
heavy_dump_rate_ = heavy_dump_period_ms / min_period_ms;
// Trigger the first dump after 200ms.
// TODO(lalitm): this is a tempoarary hack to delay the first scheduled dump
// so that the child processes get tracing enabled notification via IPC.
// See crbug.com/770151.
SequencedTaskRunnerHandle::Get()->PostDelayedTask(
FROM_HERE,
BindOnce(&MemoryDumpScheduler::Tick, Unretained(this), ++generation_),
TimeDelta::FromMilliseconds(200));
}
void MemoryDumpScheduler::StopInternal() {
period_ms_ = 0;
generation_++;
callback_.Reset();
}
void MemoryDumpScheduler::Tick(uint32_t expected_generation) {
if (period_ms_ == 0 || generation_ != expected_generation)
return;
MemoryDumpLevelOfDetail level_of_detail = MemoryDumpLevelOfDetail::BACKGROUND;
if (light_dump_rate_ > 0 && tick_count_ % light_dump_rate_ == 0)
level_of_detail = MemoryDumpLevelOfDetail::LIGHT;
if (heavy_dump_rate_ > 0 && tick_count_ % heavy_dump_rate_ == 0)
level_of_detail = MemoryDumpLevelOfDetail::DETAILED;
tick_count_++;
callback_.Run(level_of_detail);
SequencedTaskRunnerHandle::Get()->PostDelayedTask(
FROM_HERE,
BindOnce(&MemoryDumpScheduler::Tick, Unretained(this),
expected_generation),
TimeDelta::FromMilliseconds(period_ms_));
}
MemoryDumpScheduler::Config::Config() = default;
MemoryDumpScheduler::Config::~Config() = default;
MemoryDumpScheduler::Config::Config(const MemoryDumpScheduler::Config&) =
default;
} // namespace trace_event
} // namespace base

View file

@ -0,0 +1,76 @@
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TRACE_EVENT_MEMORY_DUMP_SCHEDULER_H
#define BASE_TRACE_EVENT_MEMORY_DUMP_SCHEDULER_H
#include <stdint.h>
#include <vector>
#include "base/base_export.h"
#include "base/callback.h"
#include "base/memory/ref_counted.h"
#include "base/trace_event/memory_dump_request_args.h"
namespace base {
class SequencedTaskRunner;
namespace trace_event {
// Schedules global dump requests based on the triggers added. The methods of
// this class are NOT thread safe and the client has to take care of invoking
// all the methods of the class safely.
class BASE_EXPORT MemoryDumpScheduler {
public:
using PeriodicCallback = RepeatingCallback<void(MemoryDumpLevelOfDetail)>;
// Passed to Start().
struct BASE_EXPORT Config {
struct Trigger {
MemoryDumpLevelOfDetail level_of_detail;
uint32_t period_ms;
};
Config();
Config(const Config&);
~Config();
std::vector<Trigger> triggers;
PeriodicCallback callback;
};
static MemoryDumpScheduler* GetInstance();
void Start(Config, scoped_refptr<SequencedTaskRunner> task_runner);
void Stop();
bool is_enabled_for_testing() const { return bool(task_runner_); }
private:
friend class MemoryDumpSchedulerTest;
MemoryDumpScheduler();
~MemoryDumpScheduler();
void StartInternal(Config);
void StopInternal();
void Tick(uint32_t expected_generation);
// Accessed only by the public methods (never from the task runner itself).
scoped_refptr<SequencedTaskRunner> task_runner_;
// These fields instead are only accessed from within the task runner.
uint32_t period_ms_; // 0 == disabled.
uint32_t generation_; // Used to invalidate outstanding tasks after Stop().
uint32_t tick_count_;
uint32_t light_dump_rate_;
uint32_t heavy_dump_rate_;
PeriodicCallback callback_;
DISALLOW_COPY_AND_ASSIGN(MemoryDumpScheduler);
};
} // namespace trace_event
} // namespace base
#endif // BASE_TRACE_EVENT_MEMORY_DUMP_SCHEDULER_H

View file

@ -0,0 +1,469 @@
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/trace_event/memory_infra_background_allowlist.h"
#include <ctype.h>
#include <string.h>
#include <string>
#include "base/strings/string_util.h"
namespace base {
namespace trace_event {
namespace {
// The names of dump providers allowed to perform background tracing. Dump
// providers can be added here only if the background mode dump has very
// little processor and memory overhead.
// TODO(ssid): Some dump providers do not create ownership edges on background
// dump. So, the effective size will not be correct.
const char* const kDumpProviderAllowlist[] = {
"android::ResourceManagerImpl",
"AutocompleteController",
"BlinkGC",
"BlinkObjectCounters",
"BlobStorageContext",
"ClientDiscardableSharedMemoryManager",
"DevTools",
"DiscardableSharedMemoryManager",
"DOMStorage",
"DownloadService",
"ExtensionFunctions",
"gpu::BufferManager",
"gpu::RenderbufferManager",
"gpu::ServiceDiscardableManager",
"gpu::ServiceTransferCache",
"gpu::SharedImageStub",
"gpu::TextureManager",
"GrShaderCache",
"FontCaches",
"HistoryReport",
"IPCChannel",
"IndexedDBBackingStore",
"IndexedDBFactoryImpl",
"InMemoryURLIndex",
"JavaHeap",
"LevelDB",
"LeveldbValueStore",
"LocalStorage",
"MadvFreeDiscardableMemoryAllocator",
"Malloc",
"MemoryCache",
"MojoHandleTable",
"MojoLevelDB",
"MojoMessages",
"PartitionAlloc",
"ProcessMemoryMetrics",
"SharedContextState",
"SharedMemoryTracker",
"Skia",
"Sql",
"URLRequestContext",
"V8Isolate",
"WebMediaPlayer_MainThread",
"WebMediaPlayer_MediaThread",
"SyncDirectory",
"TabRestoreServiceHelper",
"VizProcessContextProvider",
nullptr // End of list marker.
};
// A list of string names that are allowed for the memory allocator dumps in
// background mode.
const char* const kAllocatorDumpNameAllowlist[] = {
"blink_gc/main/heap",
"blink_gc/workers/heap/worker_0x?",
"blink_objects/AdSubframe",
"blink_objects/AudioHandler",
"blink_objects/ContextLifecycleStateObserver",
"blink_objects/DetachedScriptState",
"blink_objects/Document",
"blink_objects/Frame",
"blink_objects/JSEventListener",
"blink_objects/LayoutObject",
"blink_objects/MediaKeySession",
"blink_objects/MediaKeys",
"blink_objects/Node",
"blink_objects/Resource",
"blink_objects/RTCPeerConnection",
"blink_objects/ScriptPromise",
"blink_objects/V8PerContextData",
"blink_objects/WorkerGlobalScope",
"blink_objects/UACSSResource",
"blink_objects/ResourceFetcher",
"components/download/controller_0x?",
"devtools/file_watcher_0x?",
"discardable",
"discardable/madv_free_allocated",
"discardable/child_0x?",
"extensions/functions",
"extensions/value_store/Extensions.Database.Open.Settings/0x?",
"extensions/value_store/Extensions.Database.Open.Rules/0x?",
"extensions/value_store/Extensions.Database.Open.State/0x?",
"extensions/value_store/Extensions.Database.Open/0x?",
"extensions/value_store/Extensions.Database.Restore/0x?",
"extensions/value_store/Extensions.Database.Value.Restore/0x?",
"font_caches/font_platform_data_cache",
"font_caches/shape_caches",
"gpu/discardable_cache/cache_0x?",
"gpu/discardable_cache/cache_0x?/avg_image_size",
"gpu/gl/buffers/context_group_0x?",
"gpu/gl/renderbuffers/context_group_0x?",
"gpu/gl/textures/context_group_0x?",
"gpu/gr_shader_cache/cache_0x?",
"gpu/shared_images/client_0x?",
"gpu/transfer_cache/cache_0x?",
"gpu/transfer_cache/cache_0x?/avg_image_size",
"history/delta_file_service/leveldb_0x?",
"history/usage_reports_buffer/leveldb_0x?",
"java_heap",
"java_heap/allocated_objects",
"leveldatabase",
"leveldatabase/block_cache/browser",
"leveldatabase/block_cache/in_memory",
"leveldatabase/block_cache/unified",
"leveldatabase/block_cache/web",
"leveldatabase/db_0x?",
"leveldatabase/db_0x?/block_cache",
"leveldatabase/memenv_0x?",
"malloc",
"malloc/allocated_objects",
"malloc/metadata_fragmentation_caches",
"media/webmediaplayer/audio/player_0x?",
"media/webmediaplayer/data_source/player_0x?",
"media/webmediaplayer/demuxer/player_0x?",
"media/webmediaplayer/video/player_0x?",
"media/webmediaplayer/player_0x?",
"mojo",
"mojo/data_pipe_consumer",
"mojo/data_pipe_producer",
"mojo/invitation",
"mojo/messages",
"mojo/message_pipe",
"mojo/platform_handle",
"mojo/queued_ipc_channel_message/0x?",
"mojo/shared_buffer",
"mojo/unknown",
"mojo/watcher",
"net/http_network_session_0x?",
"net/http_network_session_0x?/quic_stream_factory",
"net/http_network_session_0x?/socket_pool",
"net/http_network_session_0x?/spdy_session_pool",
"net/http_network_session_0x?/ssl_client_session_cache",
"net/http_network_session_0x?/stream_factory",
"net/url_request_context",
"net/url_request_context/app_request",
"net/url_request_context/app_request/0x?",
"net/url_request_context/app_request/0x?/cookie_monster",
"net/url_request_context/app_request/0x?/cookie_monster/cookies",
"net/url_request_context/app_request/0x?/cookie_monster/"
"tasks_pending_global",
"net/url_request_context/app_request/0x?/cookie_monster/"
"tasks_pending_for_key",
"net/url_request_context/app_request/0x?/http_cache",
"net/url_request_context/app_request/0x?/http_cache/memory_backend",
"net/url_request_context/app_request/0x?/http_cache/simple_backend",
"net/url_request_context/app_request/0x?/http_network_session",
"net/url_request_context/extensions",
"net/url_request_context/extensions/0x?",
"net/url_request_context/extensions/0x?/cookie_monster",
"net/url_request_context/extensions/0x?/cookie_monster/cookies",
"net/url_request_context/extensions/0x?/cookie_monster/"
"tasks_pending_global",
"net/url_request_context/extensions/0x?/cookie_monster/"
"tasks_pending_for_key",
"net/url_request_context/extensions/0x?/http_cache",
"net/url_request_context/extensions/0x?/http_cache/memory_backend",
"net/url_request_context/extensions/0x?/http_cache/simple_backend",
"net/url_request_context/extensions/0x?/http_network_session",
"net/url_request_context/isolated_media",
"net/url_request_context/isolated_media/0x?",
"net/url_request_context/isolated_media/0x?/cookie_monster",
"net/url_request_context/isolated_media/0x?/cookie_monster/cookies",
"net/url_request_context/isolated_media/0x?/cookie_monster/"
"tasks_pending_global",
"net/url_request_context/isolated_media/0x?/cookie_monster/"
"tasks_pending_for_key",
"net/url_request_context/isolated_media/0x?/http_cache",
"net/url_request_context/isolated_media/0x?/http_cache/memory_backend",
"net/url_request_context/isolated_media/0x?/http_cache/simple_backend",
"net/url_request_context/isolated_media/0x?/http_network_session",
"net/url_request_context/main",
"net/url_request_context/main/0x?",
"net/url_request_context/main/0x?/cookie_monster",
"net/url_request_context/main/0x?/cookie_monster/cookies",
"net/url_request_context/main/0x?/cookie_monster/tasks_pending_global",
"net/url_request_context/main/0x?/cookie_monster/tasks_pending_for_key",
"net/url_request_context/main/0x?/http_cache",
"net/url_request_context/main/0x?/http_cache/memory_backend",
"net/url_request_context/main/0x?/http_cache/simple_backend",
"net/url_request_context/main/0x?/http_network_session",
"net/url_request_context/main_media",
"net/url_request_context/main_media/0x?",
"net/url_request_context/main_media/0x?/cookie_monster",
"net/url_request_context/main_media/0x?/cookie_monster/cookies",
"net/url_request_context/main_media/0x?/cookie_monster/"
"tasks_pending_global",
"net/url_request_context/main_media/0x?/cookie_monster/"
"tasks_pending_for_key",
"net/url_request_context/main_media/0x?/http_cache",
"net/url_request_context/main_media/0x?/http_cache/memory_backend",
"net/url_request_context/main_media/0x?/http_cache/simple_backend",
"net/url_request_context/main_media/0x?/http_network_session",
"net/url_request_context/mirroring",
"net/url_request_context/mirroring/0x?",
"net/url_request_context/mirroring/0x?/cookie_monster",
"net/url_request_context/mirroring/0x?/cookie_monster/cookies",
"net/url_request_context/mirroring/0x?/cookie_monster/tasks_pending_global",
"net/url_request_context/mirroring/0x?/cookie_monster/"
"tasks_pending_for_key",
"net/url_request_context/mirroring/0x?/http_cache",
"net/url_request_context/mirroring/0x?/http_cache/memory_backend",
"net/url_request_context/mirroring/0x?/http_cache/simple_backend",
"net/url_request_context/mirroring/0x?/http_network_session",
"net/url_request_context/proxy",
"net/url_request_context/proxy/0x?",
"net/url_request_context/proxy/0x?/cookie_monster",
"net/url_request_context/proxy/0x?/cookie_monster/cookies",
"net/url_request_context/proxy/0x?/cookie_monster/tasks_pending_global",
"net/url_request_context/proxy/0x?/cookie_monster/tasks_pending_for_key",
"net/url_request_context/proxy/0x?/http_cache",
"net/url_request_context/proxy/0x?/http_cache/memory_backend",
"net/url_request_context/proxy/0x?/http_cache/simple_backend",
"net/url_request_context/proxy/0x?/http_network_session",
"net/url_request_context/safe_browsing",
"net/url_request_context/safe_browsing/0x?",
"net/url_request_context/safe_browsing/0x?/cookie_monster",
"net/url_request_context/safe_browsing/0x?/cookie_monster/cookies",
"net/url_request_context/safe_browsing/0x?/cookie_monster/"
"tasks_pending_global",
"net/url_request_context/safe_browsing/0x?/cookie_monster/"
"tasks_pending_for_key",
"net/url_request_context/safe_browsing/0x?/http_cache",
"net/url_request_context/safe_browsing/0x?/http_cache/memory_backend",
"net/url_request_context/safe_browsing/0x?/http_cache/simple_backend",
"net/url_request_context/safe_browsing/0x?/http_network_session",
"net/url_request_context/system",
"net/url_request_context/system/0x?",
"net/url_request_context/system/0x?/cookie_monster",
"net/url_request_context/system/0x?/cookie_monster/cookies",
"net/url_request_context/system/0x?/cookie_monster/tasks_pending_global",
"net/url_request_context/system/0x?/cookie_monster/tasks_pending_for_key",
"net/url_request_context/system/0x?/http_cache",
"net/url_request_context/system/0x?/http_cache/memory_backend",
"net/url_request_context/system/0x?/http_cache/simple_backend",
"net/url_request_context/system/0x?/http_network_session",
"net/url_request_context/unknown",
"net/url_request_context/unknown/0x?",
"net/url_request_context/unknown/0x?/cookie_monster",
"net/url_request_context/unknown/0x?/cookie_monster/cookies",
"net/url_request_context/unknown/0x?/cookie_monster/tasks_pending_global",
"net/url_request_context/unknown/0x?/cookie_monster/tasks_pending_for_key",
"net/url_request_context/unknown/0x?/http_cache",
"net/url_request_context/unknown/0x?/http_cache/memory_backend",
"net/url_request_context/unknown/0x?/http_cache/simple_backend",
"net/url_request_context/unknown/0x?/http_network_session",
"omnibox/autocomplete_controller/0x?",
"omnibox/in_memory_url_index/0x?",
"web_cache/Image_resources",
"web_cache/CSS stylesheet_resources",
"web_cache/Script_resources",
"web_cache/XSL stylesheet_resources",
"web_cache/Font_resources",
"web_cache/Code_cache",
"web_cache/Encoded_size_duplicated_in_data_urls",
"web_cache/Other_resources",
"partition_alloc/allocated_objects",
"partition_alloc/partitions",
"partition_alloc/partitions/array_buffer",
"partition_alloc/partitions/buffer",
"partition_alloc/partitions/fast_malloc",
"partition_alloc/partitions/layout",
"skia/gpu_resources/context_0x?",
"skia/sk_glyph_cache",
"skia/sk_resource_cache",
"sqlite",
"ui/resource_manager_0x?/default_resource/0x?",
"ui/resource_manager_0x?/tinted_resource",
"site_storage/blob_storage/0x?",
"v8/main/code_stats",
"v8/main/contexts/detached_context",
"v8/main/contexts/native_context",
"v8/main/global_handles",
"v8/main/heap/code_space",
"v8/main/heap/code_stats",
"v8/main/heap/code_large_object_space",
"v8/main/heap/large_object_space",
"v8/main/heap/map_space",
"v8/main/heap/new_large_object_space",
"v8/main/heap/new_space",
"v8/main/heap/old_space",
"v8/main/heap/read_only_space",
"v8/main/malloc",
"v8/main/zapped_for_debug",
"v8/utility/code_stats",
"v8/utility/contexts/detached_context",
"v8/utility/contexts/native_context",
"v8/utility/global_handles",
"v8/utility/heap/code_space",
"v8/utility/heap/code_large_object_space",
"v8/utility/heap/large_object_space",
"v8/utility/heap/map_space",
"v8/utility/heap/new_large_object_space",
"v8/utility/heap/new_space",
"v8/utility/heap/old_space",
"v8/utility/heap/read_only_space",
"v8/utility/malloc",
"v8/utility/zapped_for_debug",
"v8/workers/code_stats/isolate_0x?",
"v8/workers/contexts/detached_context/isolate_0x?",
"v8/workers/contexts/native_context/isolate_0x?",
"v8/workers/global_handles/isolate_0x?",
"v8/workers/heap/code_space/isolate_0x?",
"v8/workers/heap/code_large_object_space/isolate_0x?",
"v8/workers/heap/large_object_space/isolate_0x?",
"v8/workers/heap/map_space/isolate_0x?",
"v8/workers/heap/new_large_object_space/isolate_0x?",
"v8/workers/heap/new_space/isolate_0x?",
"v8/workers/heap/old_space/isolate_0x?",
"v8/workers/heap/read_only_space/isolate_0x?",
"v8/workers/malloc/isolate_0x?",
"v8/workers/zapped_for_debug/isolate_0x?",
"site_storage/index_db/db_0x?",
"site_storage/index_db/memenv_0x?",
"site_storage/index_db/in_flight_0x?",
"site_storage/local_storage/0x?/cache_size",
"site_storage/localstorage/0x?/cache_size",
"site_storage/localstorage/0x?/leveldb",
"site_storage/session_storage/0x?",
"site_storage/session_storage/0x?/cache_size",
"sync/0x?/kernel",
"sync/0x?/store",
"sync/0x?/model_type/APP",
"sync/0x?/model_type/APP_LIST",
"sync/0x?/model_type/APP_NOTIFICATION",
"sync/0x?/model_type/APP_SETTING",
"sync/0x?/model_type/ARC_PACKAGE",
"sync/0x?/model_type/ARTICLE",
"sync/0x?/model_type/AUTOFILL",
"sync/0x?/model_type/AUTOFILL_PROFILE",
"sync/0x?/model_type/AUTOFILL_WALLET",
"sync/0x?/model_type/BOOKMARK",
"sync/0x?/model_type/DEVICE_INFO",
"sync/0x?/model_type/DICTIONARY",
"sync/0x?/model_type/EXPERIMENTS",
"sync/0x?/model_type/EXTENSION",
"sync/0x?/model_type/EXTENSION_SETTING",
"sync/0x?/model_type/FAVICON_IMAGE",
"sync/0x?/model_type/FAVICON_TRACKING",
"sync/0x?/model_type/HISTORY_DELETE_DIRECTIVE",
"sync/0x?/model_type/MANAGED_USER",
"sync/0x?/model_type/MANAGED_USER_SETTING",
"sync/0x?/model_type/MANAGED_USER_SHARED_SETTING",
"sync/0x?/model_type/MANAGED_USER_WHITELIST",
"sync/0x?/model_type/NIGORI",
"sync/0x?/model_type/OS_PREFERENCE",
"sync/0x?/model_type/OS_PRIORITY_PREFERENCE",
"sync/0x?/model_type/PASSWORD",
"sync/0x?/model_type/PREFERENCE",
"sync/0x?/model_type/PRINTER",
"sync/0x?/model_type/PRIORITY_PREFERENCE",
"sync/0x?/model_type/READING_LIST",
"sync/0x?/model_type/SEARCH_ENGINE",
"sync/0x?/model_type/SECURITY_EVENT",
"sync/0x?/model_type/SEND_TAB_TO_SELF",
"sync/0x?/model_type/SESSION",
"sync/0x?/model_type/SHARING_MESSAGE",
"sync/0x?/model_type/SYNCED_NOTIFICATION",
"sync/0x?/model_type/SYNCED_NOTIFICATION_APP_INFO",
"sync/0x?/model_type/THEME",
"sync/0x?/model_type/TYPED_URL",
"sync/0x?/model_type/USER_CONSENT",
"sync/0x?/model_type/USER_EVENT",
"sync/0x?/model_type/WALLET_METADATA",
"sync/0x?/model_type/WEB_APP",
"sync/0x?/model_type/WIFI_CONFIGURATION",
"sync/0x?/model_type/WIFI_CREDENTIAL",
"tab_restore/service_helper_0x?/entries",
"tab_restore/service_helper_0x?/entries/tab_0x?",
"tab_restore/service_helper_0x?/entries/window_0x?",
"tracing/heap_profiler_blink_gc/AllocationRegister",
"tracing/heap_profiler_malloc/AllocationRegister",
"tracing/heap_profiler_partition_alloc/AllocationRegister",
nullptr // End of list marker.
};
const char* const* g_dump_provider_allowlist = kDumpProviderAllowlist;
const char* const* g_allocator_dump_name_allowlist =
kAllocatorDumpNameAllowlist;
bool IsMemoryDumpProviderInList(const char* mdp_name, const char* const* list) {
for (size_t i = 0; list[i] != nullptr; ++i) {
if (strcmp(mdp_name, list[i]) == 0)
return true;
}
return false;
}
} // namespace
bool IsMemoryDumpProviderInAllowlist(const char* mdp_name) {
return IsMemoryDumpProviderInList(mdp_name, g_dump_provider_allowlist);
}
bool IsMemoryAllocatorDumpNameInAllowlist(const std::string& name) {
// Global dumps that are of hex digits are all allowed for background use.
if (base::StartsWith(name, "global/", CompareCase::SENSITIVE)) {
for (size_t i = strlen("global/"); i < name.size(); i++)
if (!base::IsHexDigit(name[i]))
return false;
return true;
}
if (base::StartsWith(name, "shared_memory/", CompareCase::SENSITIVE)) {
for (size_t i = strlen("shared_memory/"); i < name.size(); i++)
if (!base::IsHexDigit(name[i]))
return false;
return true;
}
// Remove special characters, numbers (including hexadecimal which are marked
// by '0x') from the given string.
const size_t length = name.size();
std::string stripped_str;
stripped_str.reserve(length);
bool parsing_hex = false;
for (size_t i = 0; i < length; ++i) {
if (parsing_hex && isxdigit(name[i]))
continue;
parsing_hex = false;
if (i + 1 < length && name[i] == '0' && name[i + 1] == 'x') {
parsing_hex = true;
stripped_str.append("0x?");
++i;
} else {
stripped_str.push_back(name[i]);
}
}
for (size_t i = 0; g_allocator_dump_name_allowlist[i] != nullptr; ++i) {
if (stripped_str == g_allocator_dump_name_allowlist[i]) {
return true;
}
}
return false;
}
void SetDumpProviderAllowlistForTesting(const char* const* list) {
g_dump_provider_allowlist = list;
}
void SetAllocatorDumpNameAllowlistForTesting(const char* const* list) {
g_allocator_dump_name_allowlist = list;
}
} // namespace trace_event
} // namespace base

View file

@ -0,0 +1,33 @@
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TRACE_EVENT_MEMORY_INFRA_BACKGROUND_ALLOWLIST_H_
#define BASE_TRACE_EVENT_MEMORY_INFRA_BACKGROUND_ALLOWLIST_H_
// This file contains the allowlists (aka whitelists) for background mode to
// limit the tracing overhead and remove sensitive information from traces.
#include <string>
#include "base/base_export.h"
namespace base {
namespace trace_event {
// Checks if the given |mdp_name| is in the allow list.
bool BASE_EXPORT IsMemoryDumpProviderInAllowlist(const char* mdp_name);
// Checks if the given |name| matches any of the allowed patterns.
bool BASE_EXPORT IsMemoryAllocatorDumpNameInAllowlist(const std::string& name);
// The allow list is replaced with the given list for tests. The last element
// of the list must be nullptr.
void BASE_EXPORT SetDumpProviderAllowlistForTesting(const char* const* list);
void BASE_EXPORT
SetAllocatorDumpNameAllowlistForTesting(const char* const* list);
} // namespace trace_event
} // namespace base
#endif // BASE_TRACE_EVENT_MEMORY_INFRA_BACKGROUND_ALLOWLIST_H_

View file

@ -0,0 +1,14 @@
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/trace_event/memory_usage_estimator.h"
namespace base {
namespace trace_event {
template size_t EstimateMemoryUsage(const std::string&);
template size_t EstimateMemoryUsage(const string16&);
} // namespace trace_event
} // namespace base

View file

@ -0,0 +1,666 @@
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TRACE_EVENT_MEMORY_USAGE_ESTIMATOR_H_
#define BASE_TRACE_EVENT_MEMORY_USAGE_ESTIMATOR_H_
#include <stdint.h>
#include <array>
#include <deque>
#include <list>
#include <map>
#include <memory>
#include <queue>
#include <set>
#include <stack>
#include <string>
#include <type_traits>
#include <unordered_map>
#include <unordered_set>
#include <vector>
#include "base/base_export.h"
#include "base/containers/circular_deque.h"
#include "base/containers/flat_map.h"
#include "base/containers/flat_set.h"
#include "base/containers/linked_list.h"
#include "base/containers/mru_cache.h"
#include "base/containers/queue.h"
#include "base/stl_util.h"
#include "base/strings/string16.h"
#include "base/template_util.h"
// Composable memory usage estimators.
//
// This file defines set of EstimateMemoryUsage(object) functions that return
// approximate dynamically allocated memory usage of their argument.
//
// The ultimate goal is to make memory usage estimation for a class simply a
// matter of aggregating EstimateMemoryUsage() results over all fields.
//
// That is achieved via composability: if EstimateMemoryUsage() is defined
// for T then EstimateMemoryUsage() is also defined for any combination of
// containers holding T (e.g. std::map<int, std::vector<T>>).
//
// There are two ways of defining EstimateMemoryUsage() for a type:
//
// 1. As a global function 'size_t EstimateMemoryUsage(T)' in
// in base::trace_event namespace.
//
// 2. As 'size_t T::EstimateMemoryUsage() const' method. In this case
// EstimateMemoryUsage(T) function in base::trace_event namespace is
// provided automatically.
//
// Here is an example implementation:
//
// class MyClass {
// ...
// ...
// size_t EstimateMemoryUsage() const {
// return base::trace_event::EstimateMemoryUsage(set_) +
// base::trace_event::EstimateMemoryUsage(name_) +
// base::trace_event::EstimateMemoryUsage(foo_);
// }
// ...
// private:
// ...
// std::set<int> set_;
// std::string name_;
// Foo foo_;
// int id_;
// bool success_;
// }
//
// The approach is simple: first call EstimateMemoryUsage() on all members,
// then recursively fix compilation errors that are caused by types not
// implementing EstimateMemoryUsage().
namespace base {
namespace trace_event {
// Declarations
// If T declares 'EstimateMemoryUsage() const' member function, then
// global function EstimateMemoryUsage(T) is available, and just calls
// the member function.
template <class T>
auto EstimateMemoryUsage(const T& object)
-> decltype(object.EstimateMemoryUsage());
// String
template <class C, class T, class A>
size_t EstimateMemoryUsage(const std::basic_string<C, T, A>& string);
// Arrays
template <class T, size_t N>
size_t EstimateMemoryUsage(const std::array<T, N>& array);
template <class T, size_t N>
size_t EstimateMemoryUsage(T (&array)[N]);
template <class T>
size_t EstimateMemoryUsage(const T* array, size_t array_length);
// std::unique_ptr
template <class T, class D>
size_t EstimateMemoryUsage(const std::unique_ptr<T, D>& ptr);
template <class T, class D>
size_t EstimateMemoryUsage(const std::unique_ptr<T[], D>& array,
size_t array_length);
// std::shared_ptr
template <class T>
size_t EstimateMemoryUsage(const std::shared_ptr<T>& ptr);
// Containers
template <class F, class S>
size_t EstimateMemoryUsage(const std::pair<F, S>& pair);
template <class T, class A>
size_t EstimateMemoryUsage(const std::vector<T, A>& vector);
template <class T, class A>
size_t EstimateMemoryUsage(const std::list<T, A>& list);
template <class T>
size_t EstimateMemoryUsage(const base::LinkedList<T>& list);
template <class T, class C, class A>
size_t EstimateMemoryUsage(const std::set<T, C, A>& set);
template <class T, class C, class A>
size_t EstimateMemoryUsage(const std::multiset<T, C, A>& set);
template <class K, class V, class C, class A>
size_t EstimateMemoryUsage(const std::map<K, V, C, A>& map);
template <class K, class V, class C, class A>
size_t EstimateMemoryUsage(const std::multimap<K, V, C, A>& map);
template <class T, class H, class KE, class A>
size_t EstimateMemoryUsage(const std::unordered_set<T, H, KE, A>& set);
template <class T, class H, class KE, class A>
size_t EstimateMemoryUsage(const std::unordered_multiset<T, H, KE, A>& set);
template <class K, class V, class H, class KE, class A>
size_t EstimateMemoryUsage(const std::unordered_map<K, V, H, KE, A>& map);
template <class K, class V, class H, class KE, class A>
size_t EstimateMemoryUsage(const std::unordered_multimap<K, V, H, KE, A>& map);
template <class T, class A>
size_t EstimateMemoryUsage(const std::deque<T, A>& deque);
template <class T, class C>
size_t EstimateMemoryUsage(const std::queue<T, C>& queue);
template <class T, class C>
size_t EstimateMemoryUsage(const std::priority_queue<T, C>& queue);
template <class T, class C>
size_t EstimateMemoryUsage(const std::stack<T, C>& stack);
template <class T>
size_t EstimateMemoryUsage(const base::circular_deque<T>& deque);
template <class T, class C>
size_t EstimateMemoryUsage(const base::flat_set<T, C>& set);
template <class K, class V, class C>
size_t EstimateMemoryUsage(const base::flat_map<K, V, C>& map);
template <class Key,
class Payload,
class HashOrComp,
template <typename, typename, typename> class Map>
size_t EstimateMemoryUsage(const MRUCacheBase<Key, Payload, HashOrComp, Map>&);
// TODO(dskiba):
// std::forward_list
// Definitions
namespace internal {
// HasEMU<T>::value is true iff EstimateMemoryUsage(T) is available.
// (This is the default version, which is false.)
template <class T, class X = void>
struct HasEMU : std::false_type {};
// This HasEMU specialization is only picked up if there exists function
// EstimateMemoryUsage(const T&) that returns size_t. Simpler ways to
// achieve this don't work on MSVC.
template <class T>
struct HasEMU<
T,
typename std::enable_if<std::is_same<
size_t,
decltype(EstimateMemoryUsage(std::declval<const T&>()))>::value>::type>
: std::true_type {};
// EMUCaller<T> does three things:
// 1. Defines Call() method that calls EstimateMemoryUsage(T) if it's
// available.
// 2. If EstimateMemoryUsage(T) is not available, but T has trivial dtor
// (i.e. it's POD, integer, pointer, enum, etc.) then it defines Call()
// method that returns 0. This is useful for containers, which allocate
// memory regardless of T (also for cases like std::map<int, MyClass>).
// 3. Finally, if EstimateMemoryUsage(T) is not available, then it triggers
// a static_assert with a helpful message. That cuts numbers of errors
// considerably - if you just call EstimateMemoryUsage(T) but it's not
// available for T, then compiler will helpfully list *all* possible
// variants of it, with an explanation for each.
template <class T, class X = void>
struct EMUCaller {
// std::is_same<> below makes static_assert depend on T, in order to
// prevent it from asserting regardless instantiation.
static_assert(std::is_same<T, std::false_type>::value,
"Neither global function 'size_t EstimateMemoryUsage(T)' "
"nor member function 'size_t T::EstimateMemoryUsage() const' "
"is defined for the type.");
static size_t Call(const T&) { return 0; }
};
template <class T>
struct EMUCaller<T, typename std::enable_if<HasEMU<T>::value>::type> {
static size_t Call(const T& value) { return EstimateMemoryUsage(value); }
};
template <template <class...> class Container, class I, class = void>
struct IsComplexIteratorForContainer : std::false_type {};
template <template <class...> class Container, class I>
struct IsComplexIteratorForContainer<
Container,
I,
std::enable_if_t<!std::is_pointer<I>::value &&
base::internal::is_iterator<I>::value>> {
using value_type = typename std::iterator_traits<I>::value_type;
using container_type = Container<value_type>;
// We use enum instead of static constexpr bool, beause we don't have inline
// variables until c++17.
//
// The downside is - value is not of type bool.
enum : bool {
value =
std::is_same<typename container_type::iterator, I>::value ||
std::is_same<typename container_type::const_iterator, I>::value ||
std::is_same<typename container_type::reverse_iterator, I>::value ||
std::is_same<typename container_type::const_reverse_iterator, I>::value,
};
};
template <class I, template <class...> class... Containers>
constexpr bool OneOfContainersComplexIterators() {
// We are forced to create a temporary variable to workaround a compilation
// error in msvs.
const bool all_tests[] = {
IsComplexIteratorForContainer<Containers, I>::value...};
for (bool test : all_tests)
if (test)
return true;
return false;
}
// std::array has an extra required template argument. We curry it.
template <class T>
using array_test_helper = std::array<T, 1>;
template <class I>
constexpr bool IsStandardContainerComplexIterator() {
// TODO(dyaroshev): deal with maps iterators if there is a need.
// It requires to parse pairs into keys and values.
// TODO(dyaroshev): deal with unordered containers: they do not have reverse
// iterators.
return OneOfContainersComplexIterators<
I, array_test_helper, std::vector, std::deque,
/*std::forward_list,*/ std::list, std::set, std::multiset>();
}
// Work around MSVS bug. For some reason constexpr function doesn't work.
// However variable template does.
template <typename T>
constexpr bool IsKnownNonAllocatingType_v =
std::is_trivially_destructible<T>::value ||
IsStandardContainerComplexIterator<T>();
template <class T>
struct EMUCaller<
T,
std::enable_if_t<!HasEMU<T>::value && IsKnownNonAllocatingType_v<T>>> {
static size_t Call(const T& value) { return 0; }
};
} // namespace internal
// Proxy that deducts T and calls EMUCaller<T>.
// To be used by EstimateMemoryUsage() implementations for containers.
template <class T>
size_t EstimateItemMemoryUsage(const T& value) {
return internal::EMUCaller<T>::Call(value);
}
template <class I>
size_t EstimateIterableMemoryUsage(const I& iterable) {
size_t memory_usage = 0;
for (const auto& item : iterable) {
memory_usage += EstimateItemMemoryUsage(item);
}
return memory_usage;
}
// Global EstimateMemoryUsage(T) that just calls T::EstimateMemoryUsage().
template <class T>
auto EstimateMemoryUsage(const T& object)
-> decltype(object.EstimateMemoryUsage()) {
static_assert(
std::is_same<decltype(object.EstimateMemoryUsage()), size_t>::value,
"'T::EstimateMemoryUsage() const' must return size_t.");
return object.EstimateMemoryUsage();
}
// String
template <class C, class T, class A>
size_t EstimateMemoryUsage(const std::basic_string<C, T, A>& string) {
using string_type = std::basic_string<C, T, A>;
using value_type = typename string_type::value_type;
// C++11 doesn't leave much room for implementors - std::string can
// use short string optimization, but that's about it. We detect SSO
// by checking that c_str() points inside |string|.
const uint8_t* cstr = reinterpret_cast<const uint8_t*>(string.c_str());
const uint8_t* inline_cstr = reinterpret_cast<const uint8_t*>(&string);
if (cstr >= inline_cstr && cstr < inline_cstr + sizeof(string)) {
// SSO string
return 0;
}
return (string.capacity() + 1) * sizeof(value_type);
}
// Use explicit instantiations from the .cc file (reduces bloat).
extern template BASE_EXPORT size_t EstimateMemoryUsage(const std::string&);
extern template BASE_EXPORT size_t EstimateMemoryUsage(const string16&);
// Arrays
template <class T, size_t N>
size_t EstimateMemoryUsage(const std::array<T, N>& array) {
return EstimateIterableMemoryUsage(array);
}
template <class T, size_t N>
size_t EstimateMemoryUsage(T (&array)[N]) {
return EstimateIterableMemoryUsage(array);
}
template <class T>
size_t EstimateMemoryUsage(const T* array, size_t array_length) {
size_t memory_usage = sizeof(T) * array_length;
for (size_t i = 0; i != array_length; ++i) {
memory_usage += EstimateItemMemoryUsage(array[i]);
}
return memory_usage;
}
// std::unique_ptr
template <class T, class D>
size_t EstimateMemoryUsage(const std::unique_ptr<T, D>& ptr) {
return ptr ? (sizeof(T) + EstimateItemMemoryUsage(*ptr)) : 0;
}
template <class T, class D>
size_t EstimateMemoryUsage(const std::unique_ptr<T[], D>& array,
size_t array_length) {
return EstimateMemoryUsage(array.get(), array_length);
}
// std::shared_ptr
template <class T>
size_t EstimateMemoryUsage(const std::shared_ptr<T>& ptr) {
auto use_count = ptr.use_count();
if (use_count == 0) {
return 0;
}
// Model shared_ptr after libc++,
// see __shared_ptr_pointer from include/memory
struct SharedPointer {
void* vtbl;
long shared_owners;
long shared_weak_owners;
T* value;
};
// If object of size S shared N > S times we prefer to (potentially)
// overestimate than to return 0.
return sizeof(SharedPointer) +
(EstimateItemMemoryUsage(*ptr) + (use_count - 1)) / use_count;
}
// std::pair
template <class F, class S>
size_t EstimateMemoryUsage(const std::pair<F, S>& pair) {
return EstimateItemMemoryUsage(pair.first) +
EstimateItemMemoryUsage(pair.second);
}
// std::vector
template <class T, class A>
size_t EstimateMemoryUsage(const std::vector<T, A>& vector) {
return sizeof(T) * vector.capacity() + EstimateIterableMemoryUsage(vector);
}
// std::list
template <class T, class A>
size_t EstimateMemoryUsage(const std::list<T, A>& list) {
using value_type = typename std::list<T, A>::value_type;
struct Node {
Node* prev;
Node* next;
value_type value;
};
return sizeof(Node) * list.size() +
EstimateIterableMemoryUsage(list);
}
template <class T>
size_t EstimateMemoryUsage(const base::LinkedList<T>& list) {
size_t memory_usage = 0u;
for (base::LinkNode<T>* node = list.head(); node != list.end();
node = node->next()) {
// Since we increment by calling node = node->next() we know that node
// isn't nullptr.
memory_usage += EstimateMemoryUsage(*node->value()) + sizeof(T);
}
return memory_usage;
}
// Tree containers
template <class V>
size_t EstimateTreeMemoryUsage(size_t size) {
// Tree containers are modeled after libc++
// (__tree_node from include/__tree)
struct Node {
Node* left;
Node* right;
Node* parent;
bool is_black;
V value;
};
return sizeof(Node) * size;
}
template <class T, class C, class A>
size_t EstimateMemoryUsage(const std::set<T, C, A>& set) {
using value_type = typename std::set<T, C, A>::value_type;
return EstimateTreeMemoryUsage<value_type>(set.size()) +
EstimateIterableMemoryUsage(set);
}
template <class T, class C, class A>
size_t EstimateMemoryUsage(const std::multiset<T, C, A>& set) {
using value_type = typename std::multiset<T, C, A>::value_type;
return EstimateTreeMemoryUsage<value_type>(set.size()) +
EstimateIterableMemoryUsage(set);
}
template <class K, class V, class C, class A>
size_t EstimateMemoryUsage(const std::map<K, V, C, A>& map) {
using value_type = typename std::map<K, V, C, A>::value_type;
return EstimateTreeMemoryUsage<value_type>(map.size()) +
EstimateIterableMemoryUsage(map);
}
template <class K, class V, class C, class A>
size_t EstimateMemoryUsage(const std::multimap<K, V, C, A>& map) {
using value_type = typename std::multimap<K, V, C, A>::value_type;
return EstimateTreeMemoryUsage<value_type>(map.size()) +
EstimateIterableMemoryUsage(map);
}
// HashMap containers
namespace internal {
// While hashtable containers model doesn't depend on STL implementation, one
// detail still crept in: bucket_count. It's used in size estimation, but its
// value after inserting N items is not predictable.
// This function is specialized by unittests to return constant value, thus
// excluding bucket_count from testing.
template <class V>
size_t HashMapBucketCountForTesting(size_t bucket_count) {
return bucket_count;
}
template <class MruCacheType>
size_t DoEstimateMemoryUsageForMruCache(const MruCacheType& mru_cache) {
return EstimateMemoryUsage(mru_cache.ordering_) +
EstimateMemoryUsage(mru_cache.index_);
}
} // namespace internal
template <class V>
size_t EstimateHashMapMemoryUsage(size_t bucket_count, size_t size) {
// Hashtable containers are modeled after libc++
// (__hash_node from include/__hash_table)
struct Node {
void* next;
size_t hash;
V value;
};
using Bucket = void*;
bucket_count = internal::HashMapBucketCountForTesting<V>(bucket_count);
return sizeof(Bucket) * bucket_count + sizeof(Node) * size;
}
template <class K, class H, class KE, class A>
size_t EstimateMemoryUsage(const std::unordered_set<K, H, KE, A>& set) {
using value_type = typename std::unordered_set<K, H, KE, A>::value_type;
return EstimateHashMapMemoryUsage<value_type>(set.bucket_count(),
set.size()) +
EstimateIterableMemoryUsage(set);
}
template <class K, class H, class KE, class A>
size_t EstimateMemoryUsage(const std::unordered_multiset<K, H, KE, A>& set) {
using value_type = typename std::unordered_multiset<K, H, KE, A>::value_type;
return EstimateHashMapMemoryUsage<value_type>(set.bucket_count(),
set.size()) +
EstimateIterableMemoryUsage(set);
}
template <class K, class V, class H, class KE, class A>
size_t EstimateMemoryUsage(const std::unordered_map<K, V, H, KE, A>& map) {
using value_type = typename std::unordered_map<K, V, H, KE, A>::value_type;
return EstimateHashMapMemoryUsage<value_type>(map.bucket_count(),
map.size()) +
EstimateIterableMemoryUsage(map);
}
template <class K, class V, class H, class KE, class A>
size_t EstimateMemoryUsage(const std::unordered_multimap<K, V, H, KE, A>& map) {
using value_type =
typename std::unordered_multimap<K, V, H, KE, A>::value_type;
return EstimateHashMapMemoryUsage<value_type>(map.bucket_count(),
map.size()) +
EstimateIterableMemoryUsage(map);
}
// std::deque
template <class T, class A>
size_t EstimateMemoryUsage(const std::deque<T, A>& deque) {
// Since std::deque implementations are wildly different
// (see crbug.com/674287), we can't have one "good enough"
// way to estimate.
// kBlockSize - minimum size of a block, in bytes
// kMinBlockLength - number of elements in a block
// if sizeof(T) > kBlockSize
#if defined(_LIBCPP_VERSION)
size_t kBlockSize = 4096;
size_t kMinBlockLength = 16;
#elif defined(__GLIBCXX__)
size_t kBlockSize = 512;
size_t kMinBlockLength = 1;
#elif defined(_MSC_VER)
size_t kBlockSize = 16;
size_t kMinBlockLength = 1;
#else
size_t kBlockSize = 0;
size_t kMinBlockLength = 1;
#endif
size_t block_length =
(sizeof(T) > kBlockSize) ? kMinBlockLength : kBlockSize / sizeof(T);
size_t blocks = (deque.size() + block_length - 1) / block_length;
#if defined(__GLIBCXX__)
// libstdc++: deque always has at least one block
if (!blocks)
blocks = 1;
#endif
#if defined(_LIBCPP_VERSION)
// libc++: deque keeps at most two blocks when it shrinks,
// so even if the size is zero, deque might be holding up
// to 4096 * 2 bytes. One way to know whether deque has
// ever allocated (and hence has 1 or 2 blocks) is to check
// iterator's pointer. Non-zero value means that deque has
// at least one block.
if (!blocks && deque.begin().operator->())
blocks = 1;
#endif
return (blocks * block_length * sizeof(T)) +
EstimateIterableMemoryUsage(deque);
}
// Container adapters
template <class T, class C>
size_t EstimateMemoryUsage(const std::queue<T, C>& queue) {
return EstimateMemoryUsage(GetUnderlyingContainer(queue));
}
template <class T, class C>
size_t EstimateMemoryUsage(const std::priority_queue<T, C>& queue) {
return EstimateMemoryUsage(GetUnderlyingContainer(queue));
}
template <class T, class C>
size_t EstimateMemoryUsage(const std::stack<T, C>& stack) {
return EstimateMemoryUsage(GetUnderlyingContainer(stack));
}
// base::circular_deque
template <class T>
size_t EstimateMemoryUsage(const base::circular_deque<T>& deque) {
return sizeof(T) * deque.capacity() + EstimateIterableMemoryUsage(deque);
}
// Flat containers
template <class T, class C>
size_t EstimateMemoryUsage(const base::flat_set<T, C>& set) {
using value_type = typename base::flat_set<T, C>::value_type;
return sizeof(value_type) * set.capacity() + EstimateIterableMemoryUsage(set);
}
template <class K, class V, class C>
size_t EstimateMemoryUsage(const base::flat_map<K, V, C>& map) {
using value_type = typename base::flat_map<K, V, C>::value_type;
return sizeof(value_type) * map.capacity() + EstimateIterableMemoryUsage(map);
}
template <class Key,
class Payload,
class HashOrComp,
template <typename, typename, typename> class Map>
size_t EstimateMemoryUsage(
const MRUCacheBase<Key, Payload, HashOrComp, Map>& mru_cache) {
return internal::DoEstimateMemoryUsageForMruCache(mru_cache);
}
} // namespace trace_event
} // namespace base
#endif // BASE_TRACE_EVENT_MEMORY_USAGE_ESTIMATOR_H_

View file

@ -0,0 +1,512 @@
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/trace_event/process_memory_dump.h"
#include <errno.h>
#include <vector>
#include "base/memory/ptr_util.h"
#include "base/memory/shared_memory_tracker.h"
#include "base/process/process_metrics.h"
#include "base/strings/stringprintf.h"
#include "base/trace_event/memory_infra_background_allowlist.h"
#include "base/trace_event/trace_event_impl.h"
#include "base/trace_event/traced_value.h"
#include "base/unguessable_token.h"
#include "build/build_config.h"
#if defined(OS_IOS)
#include <mach/vm_page_size.h>
#endif
#if defined(OS_POSIX) || defined(OS_FUCHSIA)
#include <sys/mman.h>
#endif
#if defined(OS_WIN)
#include <windows.h> // Must be in front of other Windows header files
#include <Psapi.h>
#endif
namespace base {
namespace trace_event {
namespace {
const char kEdgeTypeOwnership[] = "ownership";
std::string GetSharedGlobalAllocatorDumpName(
const MemoryAllocatorDumpGuid& guid) {
return "global/" + guid.ToString();
}
#if defined(COUNT_RESIDENT_BYTES_SUPPORTED)
size_t GetSystemPageCount(size_t mapped_size, size_t page_size) {
return (mapped_size + page_size - 1) / page_size;
}
#endif
UnguessableToken GetTokenForCurrentProcess() {
static UnguessableToken instance = UnguessableToken::Create();
return instance;
}
} // namespace
// static
bool ProcessMemoryDump::is_black_hole_non_fatal_for_testing_ = false;
#if defined(COUNT_RESIDENT_BYTES_SUPPORTED)
// static
size_t ProcessMemoryDump::GetSystemPageSize() {
#if defined(OS_IOS)
// On iOS, getpagesize() returns the user page sizes, but for allocating
// arrays for mincore(), kernel page sizes is needed. Use vm_kernel_page_size
// as recommended by Apple, https://forums.developer.apple.com/thread/47532/.
// Refer to http://crbug.com/542671 and Apple rdar://23651782
return vm_kernel_page_size;
#else
return base::GetPageSize();
#endif // defined(OS_IOS)
}
// static
size_t ProcessMemoryDump::CountResidentBytes(void* start_address,
size_t mapped_size) {
const size_t page_size = GetSystemPageSize();
const uintptr_t start_pointer = reinterpret_cast<uintptr_t>(start_address);
DCHECK_EQ(0u, start_pointer % page_size);
size_t offset = 0;
size_t total_resident_pages = 0;
bool failure = false;
// An array as large as number of pages in memory segment needs to be passed
// to the query function. To avoid allocating a large array, the given block
// of memory is split into chunks of size |kMaxChunkSize|.
const size_t kMaxChunkSize = 8 * 1024 * 1024;
size_t max_vec_size =
GetSystemPageCount(std::min(mapped_size, kMaxChunkSize), page_size);
#if defined(OS_WIN)
std::unique_ptr<PSAPI_WORKING_SET_EX_INFORMATION[]> vec(
new PSAPI_WORKING_SET_EX_INFORMATION[max_vec_size]);
#elif defined(OS_MACOSX)
std::unique_ptr<char[]> vec(new char[max_vec_size]);
#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
std::unique_ptr<unsigned char[]> vec(new unsigned char[max_vec_size]);
#endif
while (offset < mapped_size) {
uintptr_t chunk_start = (start_pointer + offset);
const size_t chunk_size = std::min(mapped_size - offset, kMaxChunkSize);
const size_t page_count = GetSystemPageCount(chunk_size, page_size);
size_t resident_page_count = 0;
#if defined(OS_WIN)
for (size_t i = 0; i < page_count; i++) {
vec[i].VirtualAddress =
reinterpret_cast<void*>(chunk_start + i * page_size);
}
DWORD vec_size = static_cast<DWORD>(
page_count * sizeof(PSAPI_WORKING_SET_EX_INFORMATION));
failure = !QueryWorkingSetEx(GetCurrentProcess(), vec.get(), vec_size);
for (size_t i = 0; i < page_count; i++)
resident_page_count += vec[i].VirtualAttributes.Valid;
#elif defined(OS_FUCHSIA)
// TODO(fuchsia): Port, see https://crbug.com/706592.
ALLOW_UNUSED_LOCAL(chunk_start);
ALLOW_UNUSED_LOCAL(page_count);
#elif defined(OS_MACOSX)
// mincore in MAC does not fail with EAGAIN.
failure =
!!mincore(reinterpret_cast<void*>(chunk_start), chunk_size, vec.get());
for (size_t i = 0; i < page_count; i++)
resident_page_count += vec[i] & MINCORE_INCORE ? 1 : 0;
#elif defined(OS_POSIX)
int error_counter = 0;
int result = 0;
// HANDLE_EINTR tries for 100 times. So following the same pattern.
do {
result =
#if defined(OS_AIX)
mincore(reinterpret_cast<char*>(chunk_start), chunk_size,
reinterpret_cast<char*>(vec.get()));
#else
mincore(reinterpret_cast<void*>(chunk_start), chunk_size, vec.get());
#endif
} while (result == -1 && errno == EAGAIN && error_counter++ < 100);
failure = !!result;
for (size_t i = 0; i < page_count; i++)
resident_page_count += vec[i] & 1;
#endif
if (failure)
break;
total_resident_pages += resident_page_count * page_size;
offset += kMaxChunkSize;
}
DCHECK(!failure);
if (failure) {
total_resident_pages = 0;
LOG(ERROR) << "CountResidentBytes failed. The resident size is invalid";
}
return total_resident_pages;
}
// static
base::Optional<size_t> ProcessMemoryDump::CountResidentBytesInSharedMemory(
void* start_address,
size_t mapped_size) {
#if defined(OS_MACOSX) && !defined(OS_IOS)
// On macOS, use mach_vm_region instead of mincore for performance
// (crbug.com/742042).
mach_vm_size_t dummy_size = 0;
mach_vm_address_t address =
reinterpret_cast<mach_vm_address_t>(start_address);
vm_region_top_info_data_t info;
MachVMRegionResult result =
GetTopInfo(mach_task_self(), &dummy_size, &address, &info);
if (result == MachVMRegionResult::Error) {
LOG(ERROR) << "CountResidentBytesInSharedMemory failed. The resident size "
"is invalid";
return base::Optional<size_t>();
}
size_t resident_pages =
info.private_pages_resident + info.shared_pages_resident;
// On macOS, measurements for private memory footprint overcount by
// faulted pages in anonymous shared memory. To discount for this, we touch
// all the resident pages in anonymous shared memory here, thus making them
// faulted as well. This relies on two assumptions:
//
// 1) Consumers use shared memory from front to back. Thus, if there are
// (N) resident pages, those pages represent the first N * PAGE_SIZE bytes in
// the shared memory region.
//
// 2) This logic is run shortly before the logic that calculates
// phys_footprint, thus ensuring that the discrepancy between faulted and
// resident pages is minimal.
//
// The performance penalty is expected to be small.
//
// * Most of the time, we expect the pages to already be resident and faulted,
// thus incurring a cache penalty read hit [since we read from each resident
// page].
//
// * Rarely, we expect the pages to be resident but not faulted, resulting in
// soft faults + cache penalty.
//
// * If assumption (1) is invalid, this will potentially fault some
// previously non-resident pages, thus increasing memory usage, without fixing
// the accounting.
//
// Sanity check in case the mapped size is less than the total size of the
// region.
size_t pages_to_fault =
std::min(resident_pages, (mapped_size + PAGE_SIZE - 1) / PAGE_SIZE);
volatile char* base_address = static_cast<char*>(start_address);
for (size_t i = 0; i < pages_to_fault; ++i) {
// Reading from a volatile is a visible side-effect for the purposes of
// optimization. This guarantees that the optimizer will not kill this line.
base_address[i * PAGE_SIZE];
}
return resident_pages * PAGE_SIZE;
#else
return CountResidentBytes(start_address, mapped_size);
#endif // defined(OS_MACOSX) && !defined(OS_IOS)
}
#endif // defined(COUNT_RESIDENT_BYTES_SUPPORTED)
ProcessMemoryDump::ProcessMemoryDump(
const MemoryDumpArgs& dump_args)
: process_token_(GetTokenForCurrentProcess()),
dump_args_(dump_args) {}
ProcessMemoryDump::~ProcessMemoryDump() = default;
ProcessMemoryDump::ProcessMemoryDump(ProcessMemoryDump&& other) = default;
ProcessMemoryDump& ProcessMemoryDump::operator=(ProcessMemoryDump&& other) =
default;
MemoryAllocatorDump* ProcessMemoryDump::CreateAllocatorDump(
const std::string& absolute_name) {
return AddAllocatorDumpInternal(std::make_unique<MemoryAllocatorDump>(
absolute_name, dump_args_.level_of_detail, GetDumpId(absolute_name)));
}
MemoryAllocatorDump* ProcessMemoryDump::CreateAllocatorDump(
const std::string& absolute_name,
const MemoryAllocatorDumpGuid& guid) {
return AddAllocatorDumpInternal(std::make_unique<MemoryAllocatorDump>(
absolute_name, dump_args_.level_of_detail, guid));
}
MemoryAllocatorDump* ProcessMemoryDump::AddAllocatorDumpInternal(
std::unique_ptr<MemoryAllocatorDump> mad) {
// In background mode return the black hole dump, if invalid dump name is
// given.
if (dump_args_.level_of_detail == MemoryDumpLevelOfDetail::BACKGROUND &&
!IsMemoryAllocatorDumpNameInAllowlist(mad->absolute_name())) {
return GetBlackHoleMad();
}
auto insertion_result = allocator_dumps_.insert(
std::make_pair(mad->absolute_name(), std::move(mad)));
MemoryAllocatorDump* inserted_mad = insertion_result.first->second.get();
DCHECK(insertion_result.second) << "Duplicate name: "
<< inserted_mad->absolute_name();
return inserted_mad;
}
MemoryAllocatorDump* ProcessMemoryDump::GetAllocatorDump(
const std::string& absolute_name) const {
auto it = allocator_dumps_.find(absolute_name);
if (it != allocator_dumps_.end())
return it->second.get();
return nullptr;
}
MemoryAllocatorDump* ProcessMemoryDump::GetOrCreateAllocatorDump(
const std::string& absolute_name) {
MemoryAllocatorDump* mad = GetAllocatorDump(absolute_name);
return mad ? mad : CreateAllocatorDump(absolute_name);
}
MemoryAllocatorDump* ProcessMemoryDump::CreateSharedGlobalAllocatorDump(
const MemoryAllocatorDumpGuid& guid) {
// A shared allocator dump can be shared within a process and the guid could
// have been created already.
MemoryAllocatorDump* mad = GetSharedGlobalAllocatorDump(guid);
if (mad && mad != black_hole_mad_.get()) {
// The weak flag is cleared because this method should create a non-weak
// dump.
mad->clear_flags(MemoryAllocatorDump::Flags::WEAK);
return mad;
}
return CreateAllocatorDump(GetSharedGlobalAllocatorDumpName(guid), guid);
}
MemoryAllocatorDump* ProcessMemoryDump::CreateWeakSharedGlobalAllocatorDump(
const MemoryAllocatorDumpGuid& guid) {
MemoryAllocatorDump* mad = GetSharedGlobalAllocatorDump(guid);
if (mad && mad != black_hole_mad_.get())
return mad;
mad = CreateAllocatorDump(GetSharedGlobalAllocatorDumpName(guid), guid);
mad->set_flags(MemoryAllocatorDump::Flags::WEAK);
return mad;
}
MemoryAllocatorDump* ProcessMemoryDump::GetSharedGlobalAllocatorDump(
const MemoryAllocatorDumpGuid& guid) const {
return GetAllocatorDump(GetSharedGlobalAllocatorDumpName(guid));
}
void ProcessMemoryDump::DumpHeapUsage(
const std::unordered_map<base::trace_event::AllocationContext,
base::trace_event::AllocationMetrics>&
metrics_by_context,
base::trace_event::TraceEventMemoryOverhead& overhead,
const char* allocator_name) {
std::string base_name = base::StringPrintf("tracing/heap_profiler_%s",
allocator_name);
overhead.DumpInto(base_name.c_str(), this);
}
void ProcessMemoryDump::SetAllocatorDumpsForSerialization(
std::vector<std::unique_ptr<MemoryAllocatorDump>> dumps) {
DCHECK(allocator_dumps_.empty());
for (std::unique_ptr<MemoryAllocatorDump>& dump : dumps)
AddAllocatorDumpInternal(std::move(dump));
}
std::vector<ProcessMemoryDump::MemoryAllocatorDumpEdge>
ProcessMemoryDump::GetAllEdgesForSerialization() const {
std::vector<MemoryAllocatorDumpEdge> edges;
edges.reserve(allocator_dumps_edges_.size());
for (const auto& it : allocator_dumps_edges_)
edges.push_back(it.second);
return edges;
}
void ProcessMemoryDump::SetAllEdgesForSerialization(
const std::vector<ProcessMemoryDump::MemoryAllocatorDumpEdge>& edges) {
DCHECK(allocator_dumps_edges_.empty());
for (const MemoryAllocatorDumpEdge& edge : edges) {
auto it_and_inserted = allocator_dumps_edges_.emplace(edge.source, edge);
DCHECK(it_and_inserted.second);
}
}
void ProcessMemoryDump::Clear() {
allocator_dumps_.clear();
allocator_dumps_edges_.clear();
}
void ProcessMemoryDump::TakeAllDumpsFrom(ProcessMemoryDump* other) {
// Moves the ownership of all MemoryAllocatorDump(s) contained in |other|
// into this ProcessMemoryDump, checking for duplicates.
for (auto& it : other->allocator_dumps_)
AddAllocatorDumpInternal(std::move(it.second));
other->allocator_dumps_.clear();
// Move all the edges.
allocator_dumps_edges_.insert(other->allocator_dumps_edges_.begin(),
other->allocator_dumps_edges_.end());
other->allocator_dumps_edges_.clear();
}
void ProcessMemoryDump::SerializeAllocatorDumpsInto(TracedValue* value) const {
if (allocator_dumps_.size() > 0) {
value->BeginDictionary("allocators");
for (const auto& allocator_dump_it : allocator_dumps_)
allocator_dump_it.second->AsValueInto(value);
value->EndDictionary();
}
value->BeginArray("allocators_graph");
for (const auto& it : allocator_dumps_edges_) {
const MemoryAllocatorDumpEdge& edge = it.second;
value->BeginDictionary();
value->SetString("source", edge.source.ToString());
value->SetString("target", edge.target.ToString());
value->SetInteger("importance", edge.importance);
value->SetString("type", kEdgeTypeOwnership);
value->EndDictionary();
}
value->EndArray();
}
void ProcessMemoryDump::AddOwnershipEdge(const MemoryAllocatorDumpGuid& source,
const MemoryAllocatorDumpGuid& target,
int importance) {
// This will either override an existing edge or create a new one.
auto it = allocator_dumps_edges_.find(source);
int max_importance = importance;
if (it != allocator_dumps_edges_.end()) {
DCHECK_EQ(target.ToUint64(), it->second.target.ToUint64());
max_importance = std::max(importance, it->second.importance);
}
allocator_dumps_edges_[source] = {source, target, max_importance,
false /* overridable */};
}
void ProcessMemoryDump::AddOwnershipEdge(
const MemoryAllocatorDumpGuid& source,
const MemoryAllocatorDumpGuid& target) {
AddOwnershipEdge(source, target, 0 /* importance */);
}
void ProcessMemoryDump::AddOverridableOwnershipEdge(
const MemoryAllocatorDumpGuid& source,
const MemoryAllocatorDumpGuid& target,
int importance) {
if (allocator_dumps_edges_.count(source) == 0) {
allocator_dumps_edges_[source] = {source, target, importance,
true /* overridable */};
} else {
// An edge between the source and target already exits. So, do nothing here
// since the new overridable edge is implicitly overridden by a strong edge
// which was created earlier.
DCHECK(!allocator_dumps_edges_[source].overridable);
}
}
void ProcessMemoryDump::CreateSharedMemoryOwnershipEdge(
const MemoryAllocatorDumpGuid& client_local_dump_guid,
const UnguessableToken& shared_memory_guid,
int importance) {
CreateSharedMemoryOwnershipEdgeInternal(client_local_dump_guid,
shared_memory_guid, importance,
false /*is_weak*/);
}
void ProcessMemoryDump::CreateWeakSharedMemoryOwnershipEdge(
const MemoryAllocatorDumpGuid& client_local_dump_guid,
const UnguessableToken& shared_memory_guid,
int importance) {
CreateSharedMemoryOwnershipEdgeInternal(
client_local_dump_guid, shared_memory_guid, importance, true /*is_weak*/);
}
void ProcessMemoryDump::CreateSharedMemoryOwnershipEdgeInternal(
const MemoryAllocatorDumpGuid& client_local_dump_guid,
const UnguessableToken& shared_memory_guid,
int importance,
bool is_weak) {
DCHECK(!shared_memory_guid.is_empty());
// New model where the global dumps created by SharedMemoryTracker are used
// for the clients.
// The guid of the local dump created by SharedMemoryTracker for the memory
// segment.
auto local_shm_guid =
GetDumpId(SharedMemoryTracker::GetDumpNameForTracing(shared_memory_guid));
// The dump guid of the global dump created by the tracker for the memory
// segment.
auto global_shm_guid =
SharedMemoryTracker::GetGlobalDumpIdForTracing(shared_memory_guid);
// Create an edge between local dump of the client and the local dump of the
// SharedMemoryTracker. Do not need to create the dumps here since the tracker
// would create them. The importance is also required here for the case of
// single process mode.
AddOwnershipEdge(client_local_dump_guid, local_shm_guid, importance);
// TODO(ssid): Handle the case of weak dumps here. This needs a new function
// GetOrCreaetGlobalDump() in PMD since we need to change the behavior of the
// created global dump.
// Create an edge that overrides the edge created by SharedMemoryTracker.
AddOwnershipEdge(local_shm_guid, global_shm_guid, importance);
}
void ProcessMemoryDump::AddSuballocation(const MemoryAllocatorDumpGuid& source,
const std::string& target_node_name) {
// Do not create new dumps for suballocations in background mode.
if (dump_args_.level_of_detail == MemoryDumpLevelOfDetail::BACKGROUND)
return;
std::string child_mad_name = target_node_name + "/__" + source.ToString();
MemoryAllocatorDump* target_child_mad = CreateAllocatorDump(child_mad_name);
AddOwnershipEdge(source, target_child_mad->guid());
}
MemoryAllocatorDump* ProcessMemoryDump::GetBlackHoleMad() {
DCHECK(is_black_hole_non_fatal_for_testing_);
if (!black_hole_mad_) {
std::string name = "discarded";
black_hole_mad_.reset(new MemoryAllocatorDump(
name, dump_args_.level_of_detail, GetDumpId(name)));
}
return black_hole_mad_.get();
}
MemoryAllocatorDumpGuid ProcessMemoryDump::GetDumpId(
const std::string& absolute_name) {
return MemoryAllocatorDumpGuid(StringPrintf(
"%s:%s", process_token().ToString().c_str(), absolute_name.c_str()));
}
bool ProcessMemoryDump::MemoryAllocatorDumpEdge::operator==(
const MemoryAllocatorDumpEdge& other) const {
return source == other.source && target == other.target &&
importance == other.importance && overridable == other.overridable;
}
bool ProcessMemoryDump::MemoryAllocatorDumpEdge::operator!=(
const MemoryAllocatorDumpEdge& other) const {
return !(*this == other);
}
} // namespace trace_event
} // namespace base

View file

@ -0,0 +1,282 @@
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TRACE_EVENT_PROCESS_MEMORY_DUMP_H_
#define BASE_TRACE_EVENT_PROCESS_MEMORY_DUMP_H_
#include <stddef.h>
#include <map>
#include <unordered_map>
#include <vector>
#include "base/base_export.h"
#include "base/macros.h"
#include "base/memory/ref_counted.h"
#include "base/trace_event/heap_profiler_allocation_context.h"
#include "base/trace_event/memory_allocator_dump.h"
#include "base/trace_event/memory_allocator_dump_guid.h"
#include "base/trace_event/memory_dump_request_args.h"
#include "build/build_config.h"
// Define COUNT_RESIDENT_BYTES_SUPPORTED if platform supports counting of the
// resident memory.
#if !defined(OS_NACL)
#define COUNT_RESIDENT_BYTES_SUPPORTED
#endif
namespace base {
class UnguessableToken;
namespace trace_event {
class TracedValue;
// ProcessMemoryDump is as a strongly typed container which holds the dumps
// produced by the MemoryDumpProvider(s) for a specific process.
class BASE_EXPORT ProcessMemoryDump {
public:
struct BASE_EXPORT MemoryAllocatorDumpEdge {
bool operator==(const MemoryAllocatorDumpEdge&) const;
bool operator!=(const MemoryAllocatorDumpEdge&) const;
MemoryAllocatorDumpGuid source;
MemoryAllocatorDumpGuid target;
int importance = 0;
bool overridable = false;
};
// Maps allocator dumps absolute names (allocator_name/heap/subheap) to
// MemoryAllocatorDump instances.
using AllocatorDumpsMap =
std::map<std::string, std::unique_ptr<MemoryAllocatorDump>>;
// Stores allocator dump edges indexed by source allocator dump GUID.
using AllocatorDumpEdgesMap =
std::map<MemoryAllocatorDumpGuid, MemoryAllocatorDumpEdge>;
#if defined(COUNT_RESIDENT_BYTES_SUPPORTED)
// Returns the number of bytes in a kernel memory page. Some platforms may
// have a different value for kernel page sizes from user page sizes. It is
// important to use kernel memory page sizes for resident bytes calculation.
// In most cases, the two are the same.
static size_t GetSystemPageSize();
// Returns the total bytes resident for a virtual address range, with given
// |start_address| and |mapped_size|. |mapped_size| is specified in bytes. The
// value returned is valid only if the given range is currently mmapped by the
// process. The |start_address| must be page-aligned.
static size_t CountResidentBytes(void* start_address, size_t mapped_size);
// The same as above, but the given mapped range should belong to the
// shared_memory's mapped region.
static base::Optional<size_t> CountResidentBytesInSharedMemory(
void* start_address,
size_t mapped_size);
#endif
explicit ProcessMemoryDump(const MemoryDumpArgs& dump_args);
ProcessMemoryDump(ProcessMemoryDump&&);
~ProcessMemoryDump();
ProcessMemoryDump& operator=(ProcessMemoryDump&&);
// Creates a new MemoryAllocatorDump with the given name and returns the
// empty object back to the caller.
// Arguments:
// absolute_name: a name that uniquely identifies allocator dumps produced
// by this provider. It is possible to specify nesting by using a
// path-like string (e.g., v8/isolate1/heap1, v8/isolate1/heap2).
// Leading or trailing slashes are not allowed.
// guid: an optional identifier, unique among all processes within the
// scope of a global dump. This is only relevant when using
// AddOwnershipEdge() to express memory sharing. If omitted,
// it will be automatically generated.
// ProcessMemoryDump handles the memory ownership of its MemoryAllocatorDumps.
MemoryAllocatorDump* CreateAllocatorDump(const std::string& absolute_name);
MemoryAllocatorDump* CreateAllocatorDump(const std::string& absolute_name,
const MemoryAllocatorDumpGuid& guid);
// Looks up a MemoryAllocatorDump given its allocator and heap names, or
// nullptr if not found.
MemoryAllocatorDump* GetAllocatorDump(const std::string& absolute_name) const;
// Do NOT use this method. All dump providers should use
// CreateAllocatorDump(). Tries to create a new MemoryAllocatorDump only if it
// doesn't already exist. Creating multiple dumps with same name using
// GetOrCreateAllocatorDump() would override the existing scalars in MAD and
// cause misreporting. This method is used only in rare cases multiple
// components create allocator dumps with same name and only one of them adds
// size.
MemoryAllocatorDump* GetOrCreateAllocatorDump(
const std::string& absolute_name);
// Creates a shared MemoryAllocatorDump, to express cross-process sharing.
// Shared allocator dumps are allowed to have duplicate guids within the
// global scope, in order to reference the same dump from multiple processes.
// See the design doc goo.gl/keU6Bf for reference usage patterns.
MemoryAllocatorDump* CreateSharedGlobalAllocatorDump(
const MemoryAllocatorDumpGuid& guid);
// Creates a shared MemoryAllocatorDump as CreateSharedGlobalAllocatorDump,
// but with a WEAK flag. A weak dump will be discarded unless a non-weak dump
// is created using CreateSharedGlobalAllocatorDump by at least one process.
// The WEAK flag does not apply if a non-weak dump with the same GUID already
// exists or is created later. All owners and children of the discarded dump
// will also be discarded transitively.
MemoryAllocatorDump* CreateWeakSharedGlobalAllocatorDump(
const MemoryAllocatorDumpGuid& guid);
// Looks up a shared MemoryAllocatorDump given its guid.
MemoryAllocatorDump* GetSharedGlobalAllocatorDump(
const MemoryAllocatorDumpGuid& guid) const;
// Returns the map of the MemoryAllocatorDumps added to this dump.
const AllocatorDumpsMap& allocator_dumps() const { return allocator_dumps_; }
AllocatorDumpsMap* mutable_allocator_dumps_for_serialization() const {
// Mojo takes a const input argument even for move-only types that can be
// mutate while serializing (like this one). Hence the const_cast.
return const_cast<AllocatorDumpsMap*>(&allocator_dumps_);
}
void SetAllocatorDumpsForSerialization(
std::vector<std::unique_ptr<MemoryAllocatorDump>>);
// Only for mojo serialization.
std::vector<MemoryAllocatorDumpEdge> GetAllEdgesForSerialization() const;
void SetAllEdgesForSerialization(const std::vector<MemoryAllocatorDumpEdge>&);
// Dumps heap usage with |allocator_name|.
void DumpHeapUsage(
const std::unordered_map<base::trace_event::AllocationContext,
base::trace_event::AllocationMetrics>&
metrics_by_context,
base::trace_event::TraceEventMemoryOverhead& overhead,
const char* allocator_name);
// Adds an ownership relationship between two MemoryAllocatorDump(s) with the
// semantics: |source| owns |target|, and has the effect of attributing
// the memory usage of |target| to |source|. |importance| is optional and
// relevant only for the cases of co-ownership, where it acts as a z-index:
// the owner with the highest importance will be attributed |target|'s memory.
// If an edge is present, its importance will not be updated unless
// |importance| is larger.
void AddOwnershipEdge(const MemoryAllocatorDumpGuid& source,
const MemoryAllocatorDumpGuid& target,
int importance);
void AddOwnershipEdge(const MemoryAllocatorDumpGuid& source,
const MemoryAllocatorDumpGuid& target);
// Adds edges that can be overriden by a later or earlier call to
// AddOwnershipEdge() with the same source and target with a different
// |importance| value.
void AddOverridableOwnershipEdge(const MemoryAllocatorDumpGuid& source,
const MemoryAllocatorDumpGuid& target,
int importance);
// Creates ownership edges for shared memory. Handles the case of cross
// process sharing and importance of ownership for the case with and without
// the shared memory dump provider. This handles both shared memory from both
// legacy base::SharedMemory as well as current base::SharedMemoryMapping. The
// weak version creates a weak global dump.
// |client_local_dump_guid| The guid of the local dump created by the client
// of base::SharedMemory.
// |shared_memory_guid| The ID of the shared memory that is assigned globally,
// used to create global dump edges in the new model.
// |importance| Importance of the global dump edges to say if the current
// process owns the memory segment.
void CreateSharedMemoryOwnershipEdge(
const MemoryAllocatorDumpGuid& client_local_dump_guid,
const UnguessableToken& shared_memory_guid,
int importance);
void CreateWeakSharedMemoryOwnershipEdge(
const MemoryAllocatorDumpGuid& client_local_dump_guid,
const UnguessableToken& shared_memory_guid,
int importance);
const AllocatorDumpEdgesMap& allocator_dumps_edges() const {
return allocator_dumps_edges_;
}
// Utility method to add a suballocation relationship with the following
// semantics: |source| is suballocated from |target_node_name|.
// This creates a child node of |target_node_name| and adds an ownership edge
// between |source| and the new child node. As a result, the UI will not
// account the memory of |source| in the target node.
void AddSuballocation(const MemoryAllocatorDumpGuid& source,
const std::string& target_node_name);
// Removes all the MemoryAllocatorDump(s) contained in this instance. This
// ProcessMemoryDump can be safely reused as if it was new once this returns.
void Clear();
// Merges all MemoryAllocatorDump(s) contained in |other| inside this
// ProcessMemoryDump, transferring their ownership to this instance.
// |other| will be an empty ProcessMemoryDump after this method returns.
// This is to allow dump providers to pre-populate ProcessMemoryDump instances
// and later move their contents into the ProcessMemoryDump passed as argument
// of the MemoryDumpProvider::OnMemoryDump(ProcessMemoryDump*) callback.
void TakeAllDumpsFrom(ProcessMemoryDump* other);
// Populate the traced value with information about the memory allocator
// dumps.
void SerializeAllocatorDumpsInto(TracedValue* value) const;
const MemoryDumpArgs& dump_args() const { return dump_args_; }
private:
FRIEND_TEST_ALL_PREFIXES(ProcessMemoryDumpTest, BackgroundModeTest);
FRIEND_TEST_ALL_PREFIXES(ProcessMemoryDumpTest, SharedMemoryOwnershipTest);
FRIEND_TEST_ALL_PREFIXES(ProcessMemoryDumpTest, GuidsTest);
MemoryAllocatorDump* AddAllocatorDumpInternal(
std::unique_ptr<MemoryAllocatorDump> mad);
// A per-process token, valid throughout all the lifetime of the current
// process, used to disambiguate dumps with the same name generated in
// different processes.
const UnguessableToken& process_token() const { return process_token_; }
void set_process_token_for_testing(UnguessableToken token) {
process_token_ = token;
}
// Returns the Guid of the dump for the given |absolute_name| for
// for the given process' token. |process_token| is used to disambiguate GUIDs
// derived from the same name under different processes.
MemoryAllocatorDumpGuid GetDumpId(const std::string& absolute_name);
void CreateSharedMemoryOwnershipEdgeInternal(
const MemoryAllocatorDumpGuid& client_local_dump_guid,
const UnguessableToken& shared_memory_guid,
int importance,
bool is_weak);
MemoryAllocatorDump* GetBlackHoleMad();
UnguessableToken process_token_;
AllocatorDumpsMap allocator_dumps_;
// Keeps track of relationships between MemoryAllocatorDump(s).
AllocatorDumpEdgesMap allocator_dumps_edges_;
// Level of detail of the current dump.
MemoryDumpArgs dump_args_;
// This allocator dump is returned when an invalid dump is created in
// background mode. The attributes of the dump are ignored and not added to
// the trace.
std::unique_ptr<MemoryAllocatorDump> black_hole_mad_;
// When set to true, the DCHECK(s) for invalid dump creations on the
// background mode are disabled for testing.
static bool is_black_hole_non_fatal_for_testing_;
DISALLOW_COPY_AND_ASSIGN(ProcessMemoryDump);
};
} // namespace trace_event
} // namespace base
#endif // BASE_TRACE_EVENT_PROCESS_MEMORY_DUMP_H_

View file

@ -0,0 +1,117 @@
// Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/trace_event/thread_instruction_count.h"
#include "base/base_switches.h"
#include "base/command_line.h"
#include "base/logging.h"
#include "base/no_destructor.h"
#include "base/threading/thread_local_storage.h"
#include "build/build_config.h"
#if defined(OS_LINUX)
#include <linux/perf_event.h>
#include <sys/syscall.h>
#include <unistd.h>
#endif // defined(OS_LINUX)
namespace base {
namespace trace_event {
namespace {
#if defined(OS_LINUX)
// Special constants used for counter FD states.
constexpr int kPerfFdDisabled = -2;
constexpr int kPerfFdOpenFailed = -1;
constexpr int kPerfFdUninitialized = 0;
ThreadLocalStorage::Slot& InstructionCounterFdSlot() {
static NoDestructor<ThreadLocalStorage::Slot> fd_slot([](void* fd_ptr) {
int fd = reinterpret_cast<intptr_t>(fd_ptr);
if (fd > kPerfFdUninitialized)
close(fd);
});
return *fd_slot;
}
// Opens a new file descriptor that emits the value of
// PERF_COUNT_HW_INSTRUCTIONS in userspace (excluding kernel and hypervisor
// instructions) for the given |thread_id|, or 0 for the calling thread.
//
// Returns kPerfFdOpenFailed if opening the file descriptor failed, or
// kPerfFdDisabled if performance counters are disabled in the calling process.
int OpenInstructionCounterFdForThread(int thread_id) {
// This switch is only propagated for processes that are unaffected by the
// BPF sandbox, such as the browser process or renderers with --no-sandbox.
const base::CommandLine& command_line =
*base::CommandLine::ForCurrentProcess();
if (!command_line.HasSwitch(switches::kEnableThreadInstructionCount))
return kPerfFdDisabled;
struct perf_event_attr pe = {0};
pe.type = PERF_TYPE_HARDWARE;
pe.size = sizeof(struct perf_event_attr);
pe.config = PERF_COUNT_HW_INSTRUCTIONS;
pe.exclude_kernel = 1;
pe.exclude_hv = 1;
int fd = syscall(__NR_perf_event_open, &pe, thread_id, /* cpu */ -1,
/* group_fd */ -1, /* flags */ 0);
if (fd < 0) {
LOG(ERROR) << "perf_event_open failed, omitting instruction counters";
return kPerfFdOpenFailed;
}
return fd;
}
// Retrieves the active perf counter FD for the current thread, performing
// lazy-initialization if necessary.
int InstructionCounterFdForCurrentThread() {
auto& slot = InstructionCounterFdSlot();
int fd = reinterpret_cast<intptr_t>(slot.Get());
if (fd == kPerfFdUninitialized) {
fd = OpenInstructionCounterFdForThread(0);
slot.Set(reinterpret_cast<void*>(fd));
}
return fd;
}
#endif // defined(OS_LINUX)
} // namespace
bool ThreadInstructionCount::IsSupported() {
#if defined(OS_LINUX)
// If we can't initialize the counter FD, mark as disabled.
int counter_fd = InstructionCounterFdForCurrentThread();
if (counter_fd <= 0)
return false;
return true;
#endif // defined(OS_LINUX)
return false;
}
ThreadInstructionCount ThreadInstructionCount::Now() {
DCHECK(IsSupported());
#if defined(OS_LINUX)
int fd = InstructionCounterFdForCurrentThread();
if (fd <= 0)
return ThreadInstructionCount();
uint64_t instructions = 0;
ssize_t bytes_read = read(fd, &instructions, sizeof(instructions));
CHECK_EQ(bytes_read, static_cast<ssize_t>(sizeof(instructions)))
<< "Short reads of small size from kernel memory is not expected. If "
"this fails, use HANDLE_EINTR.";
return ThreadInstructionCount(instructions);
#endif // defined(OS_LINUX)
return ThreadInstructionCount();
}
} // namespace trace_event
} // namespace base

View file

@ -0,0 +1,59 @@
// Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TRACE_EVENT_THREAD_INSTRUCTION_COUNT_H_
#define BASE_TRACE_EVENT_THREAD_INSTRUCTION_COUNT_H_
#include <stdint.h>
#include "base/base_export.h"
namespace base {
namespace trace_event {
// Represents the number of instructions that were retired between two samples
// of a thread's performance counters.
class BASE_EXPORT ThreadInstructionDelta {
public:
constexpr ThreadInstructionDelta() : delta_(0) {}
explicit constexpr ThreadInstructionDelta(int64_t delta) : delta_(delta) {}
constexpr int64_t ToInternalValue() const { return delta_; }
private:
int64_t delta_;
};
// Uses the system's performance counters in order to measure the number of
// instructions that have been retired on the current thread.
class BASE_EXPORT ThreadInstructionCount {
public:
// Returns true if the platform supports hardware retired instruction
// counters.
static bool IsSupported();
// Returns the number of retired instructions relative to some epoch count,
// or -1 if getting the current instruction count failed / is disabled.
static ThreadInstructionCount Now();
constexpr ThreadInstructionCount() : value_(-1) {}
explicit constexpr ThreadInstructionCount(int64_t value) : value_(value) {}
constexpr bool is_null() const { return value_ == -1; }
constexpr ThreadInstructionDelta operator-(
ThreadInstructionCount other) const {
return ThreadInstructionDelta(value_ - other.value_);
}
constexpr int64_t ToInternalValue() const { return value_; }
private:
int64_t value_;
};
} // namespace trace_event
} // namespace base
#endif // BASE_TRACE_EVENT_THREAD_INSTRUCTION_COUNT_H_

View file

@ -0,0 +1,287 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/trace_event/trace_arguments.h"
#include <inttypes.h>
#include <stddef.h>
#include <stdio.h>
#include <string.h>
#include <cmath>
#include "base/json/string_escape.h"
#include "base/logging.h"
#include "base/stl_util.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/string_util.h"
#include "base/strings/stringprintf.h"
#include "base/strings/utf_string_conversions.h"
namespace base {
namespace trace_event {
namespace {
size_t GetAllocLength(const char* str) {
return str ? strlen(str) + 1 : 0;
}
// Copies |*member| into |*buffer|, sets |*member| to point to this new
// location, and then advances |*buffer| by the amount written.
void CopyTraceEventParameter(char** buffer,
const char** member,
const char* end) {
if (*member) {
size_t written = strlcpy(*buffer, *member, end - *buffer) + 1;
DCHECK_LE(static_cast<int>(written), end - *buffer);
*member = *buffer;
*buffer += written;
}
}
// Append |val| as a JSON output value to |*out|.
void AppendDouble(double val, bool as_json, std::string* out) {
// FIXME: base/json/json_writer.cc is using the same code,
// should be made into a common method.
std::string real;
if (std::isfinite(val)) {
real = NumberToString(val);
// Ensure that the number has a .0 if there's no decimal or 'e'. This
// makes sure that when we read the JSON back, it's interpreted as a
// real rather than an int.
if (real.find('.') == std::string::npos &&
real.find('e') == std::string::npos &&
real.find('E') == std::string::npos) {
real.append(".0");
}
// The JSON spec requires that non-integer values in the range (-1,1)
// have a zero before the decimal point - ".52" is not valid, "0.52" is.
if (real[0] == '.') {
real.insert(0, "0");
} else if (real.length() > 1 && real[0] == '-' && real[1] == '.') {
// "-.1" bad "-0.1" good
real.insert(1, "0");
}
} else if (std::isnan(val)) {
// The JSON spec doesn't allow NaN and Infinity (since these are
// objects in EcmaScript). Use strings instead.
real = as_json ? "\"NaN\"" : "NaN";
} else if (val < 0) {
real = as_json ? "\"-Infinity\"" : "-Infinity";
} else {
real = as_json ? "\"Infinity\"" : "Infinity";
}
StringAppendF(out, "%s", real.c_str());
}
const char* TypeToString(char arg_type) {
switch (arg_type) {
case TRACE_VALUE_TYPE_INT:
return "int";
case TRACE_VALUE_TYPE_UINT:
return "uint";
case TRACE_VALUE_TYPE_DOUBLE:
return "double";
case TRACE_VALUE_TYPE_BOOL:
return "bool";
case TRACE_VALUE_TYPE_POINTER:
return "pointer";
case TRACE_VALUE_TYPE_STRING:
return "string";
case TRACE_VALUE_TYPE_COPY_STRING:
return "copy_string";
case TRACE_VALUE_TYPE_CONVERTABLE:
return "convertable";
default:
NOTREACHED();
return "UNKNOWN_TYPE";
}
}
void AppendValueDebugString(const TraceArguments& args,
size_t idx,
std::string* out) {
*out += (args.names()[idx] ? args.names()[idx] : "NULL_NAME");
*out += "=";
*out += TypeToString(args.types()[idx]);
*out += "(";
args.values()[idx].AppendAsJSON(args.types()[idx], out);
*out += ")";
}
} // namespace
void StringStorage::Reset(size_t alloc_size) {
if (!alloc_size) {
if (data_)
::free(data_);
data_ = nullptr;
} else if (!data_ || alloc_size != data_->size) {
data_ = static_cast<Data*>(::realloc(data_, sizeof(size_t) + alloc_size));
data_->size = alloc_size;
}
}
bool StringStorage::Contains(const TraceArguments& args) const {
for (size_t n = 0; n < args.size(); ++n) {
if (args.types()[n] == TRACE_VALUE_TYPE_COPY_STRING &&
!Contains(args.values()[n].as_string)) {
return false;
}
}
return true;
}
static_assert(
std::is_pod<TraceValue>::value,
"TraceValue must be plain-old-data type for performance reasons!");
void TraceValue::AppendAsJSON(unsigned char type, std::string* out) const {
Append(type, true, out);
}
void TraceValue::AppendAsString(unsigned char type, std::string* out) const {
Append(type, false, out);
}
void TraceValue::Append(unsigned char type,
bool as_json,
std::string* out) const {
switch (type) {
case TRACE_VALUE_TYPE_BOOL:
*out += this->as_bool ? "true" : "false";
break;
case TRACE_VALUE_TYPE_UINT:
StringAppendF(out, "%" PRIu64, static_cast<uint64_t>(this->as_uint));
break;
case TRACE_VALUE_TYPE_INT:
StringAppendF(out, "%" PRId64, static_cast<int64_t>(this->as_int));
break;
case TRACE_VALUE_TYPE_DOUBLE:
AppendDouble(this->as_double, as_json, out);
break;
case TRACE_VALUE_TYPE_POINTER: {
// JSON only supports double and int numbers.
// So as not to lose bits from a 64-bit pointer, output as a hex string.
// For consistency, do the same for non-JSON strings, but without the
// surrounding quotes.
const char* format_string = as_json ? "\"0x%" PRIx64 "\"" : "0x%" PRIx64;
StringAppendF(
out, format_string,
static_cast<uint64_t>(reinterpret_cast<uintptr_t>(this->as_pointer)));
} break;
case TRACE_VALUE_TYPE_STRING:
case TRACE_VALUE_TYPE_COPY_STRING:
if (as_json)
EscapeJSONString(this->as_string ? this->as_string : "NULL", true, out);
else
*out += this->as_string ? this->as_string : "NULL";
break;
case TRACE_VALUE_TYPE_CONVERTABLE:
this->as_convertable->AppendAsTraceFormat(out);
break;
default:
NOTREACHED() << "Don't know how to print this value";
break;
}
}
TraceArguments& TraceArguments::operator=(TraceArguments&& other) noexcept {
if (this != &other) {
this->~TraceArguments();
new (this) TraceArguments(std::move(other));
}
return *this;
}
TraceArguments::TraceArguments(int num_args,
const char* const* arg_names,
const unsigned char* arg_types,
const unsigned long long* arg_values) {
if (num_args > static_cast<int>(kMaxSize))
num_args = static_cast<int>(kMaxSize);
size_ = static_cast<unsigned char>(num_args);
for (size_t n = 0; n < size_; ++n) {
types_[n] = arg_types[n];
names_[n] = arg_names[n];
values_[n].as_uint = arg_values[n];
}
}
void TraceArguments::Reset() {
for (size_t n = 0; n < size_; ++n) {
if (types_[n] == TRACE_VALUE_TYPE_CONVERTABLE)
delete values_[n].as_convertable;
}
size_ = 0;
}
void TraceArguments::CopyStringsTo(StringStorage* storage,
bool copy_all_strings,
const char** extra_string1,
const char** extra_string2) {
// First, compute total allocation size.
size_t alloc_size = 0;
if (copy_all_strings) {
alloc_size +=
GetAllocLength(*extra_string1) + GetAllocLength(*extra_string2);
for (size_t n = 0; n < size_; ++n)
alloc_size += GetAllocLength(names_[n]);
}
for (size_t n = 0; n < size_; ++n) {
if (copy_all_strings && types_[n] == TRACE_VALUE_TYPE_STRING)
types_[n] = TRACE_VALUE_TYPE_COPY_STRING;
if (types_[n] == TRACE_VALUE_TYPE_COPY_STRING)
alloc_size += GetAllocLength(values_[n].as_string);
}
if (alloc_size) {
storage->Reset(alloc_size);
char* ptr = storage->data();
const char* end = ptr + alloc_size;
if (copy_all_strings) {
CopyTraceEventParameter(&ptr, extra_string1, end);
CopyTraceEventParameter(&ptr, extra_string2, end);
for (size_t n = 0; n < size_; ++n)
CopyTraceEventParameter(&ptr, &names_[n], end);
}
for (size_t n = 0; n < size_; ++n) {
if (types_[n] == TRACE_VALUE_TYPE_COPY_STRING)
CopyTraceEventParameter(&ptr, &values_[n].as_string, end);
}
#if DCHECK_IS_ON()
DCHECK_EQ(end, ptr) << "Overrun by " << ptr - end;
if (copy_all_strings) {
if (extra_string1 && *extra_string1)
DCHECK(storage->Contains(*extra_string1));
if (extra_string2 && *extra_string2)
DCHECK(storage->Contains(*extra_string2));
for (size_t n = 0; n < size_; ++n)
DCHECK(storage->Contains(names_[n]));
}
for (size_t n = 0; n < size_; ++n) {
if (types_[n] == TRACE_VALUE_TYPE_COPY_STRING)
DCHECK(storage->Contains(values_[n].as_string));
}
#endif // DCHECK_IS_ON()
} else {
storage->Reset();
}
}
void TraceArguments::AppendDebugString(std::string* out) {
*out += "TraceArguments(";
for (size_t n = 0; n < size_; ++n) {
if (n > 0)
*out += ", ";
AppendValueDebugString(*this, n, out);
}
*out += ")";
}
} // namespace trace_event
} // namespace base

View file

@ -0,0 +1,672 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TRACE_EVENT_TRACE_ARGUMENTS_H_
#define BASE_TRACE_EVENT_TRACE_ARGUMENTS_H_
#include <stdlib.h>
#include <string.h>
#include <algorithm>
#include <memory>
#include <string>
#include "base/base_export.h"
#include "base/macros.h"
#include "base/trace_event/common/trace_event_common.h"
// Trace macro can have one or two optional arguments, each one of them
// identified by a name (a C string literal) and a value, which can be an
// integer, enum, floating point, boolean, string pointer or reference, or
// std::unique_ptr<ConvertableToTraceFormat> compatible values. Additionally,
// custom data types need to be supported, like time values or WTF::CString.
//
// TraceArguments is a helper class used to store 0 to 2 named arguments
// corresponding to an individual trace macro call. As efficiently as possible,
// and with the minimal amount of generated machine code (since this affects
// any TRACE macro call). Each argument has:
//
// - A name (C string literal, e.g "dumps")
// - An 8-bit type value, corresponding to the TRACE_VALUE_TYPE_XXX macros.
// - A value, stored in a TraceValue union
//
// IMPORTANT: For a TRACE_VALUE_TYPE_CONVERTABLE types, the TraceArguments
// instance owns the pointed ConvertableToTraceFormat object, i.e. it will
// delete it automatically on destruction.
//
// TraceArguments instances should be built using one of specialized
// constructors declared below. One cannot modify an instance once it has
// been built, except for move operations, Reset() and destruction. Examples:
//
// TraceArguments args; // No arguments.
// // args.size() == 0
//
// TraceArguments("foo", 100);
// // args.size() == 1
// // args.types()[0] == TRACE_VALUE_TYPE_INT
// // args.names()[0] == "foo"
// // args.values()[0].as_int == 100
//
// TraceArguments("bar", 1ULL);
// // args.size() == 1
// // args.types()[0] == TRACE_VALUE_TYPE_UINT
// // args.names()[0] == "bar"
// // args.values()[0].as_uint == 100
//
// TraceArguments("foo", "Hello", "bar", "World");
// // args.size() == 2
// // args.types()[0] == TRACE_VALUE_TYPE_STRING
// // args.types()[1] == TRACE_VALUE_TYPE_STRING
// // args.names()[0] == "foo"
// // args.names()[1] == "bar"
// // args.values()[0].as_string == "Hello"
// // args.values()[1].as_string == "World"
//
// std::string some_string = ...;
// TraceArguments("str1", some_string);
// // args.size() == 1
// // args.types()[0] == TRACE_VALUE_TYPE_COPY_STRING
// // args.names()[0] == "str1"
// // args.values()[0].as_string == some_string.c_str()
//
// Note that TRACE_VALUE_TYPE_COPY_STRING corresponds to string pointers
// that point to temporary values that may disappear soon. The
// TraceArguments::CopyStringTo() method can be used to copy their content
// into a StringStorage memory block, and update the |as_string| value pointers
// to it to avoid keeping any dangling pointers. This is used by TraceEvent
// to keep copies of such strings in the log after their initialization values
// have disappeared.
//
// The TraceStringWithCopy helper class can be used to initialize a value
// from a regular string pointer with TRACE_VALUE_TYPE_COPY_STRING too, as in:
//
// const char str[] = "....";
// TraceArguments("foo", str, "bar", TraceStringWithCopy(str));
// // args.size() == 2
// // args.types()[0] == TRACE_VALUE_TYPE_STRING
// // args.types()[1] == TRACE_VALUE_TYPE_COPY_STRING
// // args.names()[0] == "foo"
// // args.names()[1] == "bar"
// // args.values()[0].as_string == str
// // args.values()[1].as_string == str
//
// StringStorage storage;
// args.CopyStringTo(&storage, false, nullptr, nullptr);
// // args.size() == 2
// // args.types()[0] == TRACE_VALUE_TYPE_STRING
// // args.types()[1] == TRACE_VALUE_TYPE_COPY_STRING
// // args.names()[0] == "foo"
// // args.names()[1] == "bar"
// // args.values()[0].as_string == str
// // args.values()[1].as_string == Address inside |storage|.
//
// Initialization from a std::unique_ptr<ConvertableToTraceFormat>
// is supported but will move ownership of the pointer objects to the
// TraceArguments instance:
//
// class MyConvertableType :
// public base::trace_event::AsConvertableToTraceFormat {
// ...
// };
//
// {
// TraceArguments args("foo" , std::make_unique<MyConvertableType>(...));
// // args.size() == 1
// // args.values()[0].as_convertable == address of MyConvertable object.
// } // Calls |args| destructor, which will delete the object too.
//
// Finally, it is possible to support initialization from custom values by
// specializing the TraceValue::Helper<> template struct as described below.
//
// This is how values of custom types like WTF::CString can be passed directly
// to trace macros.
namespace base {
class Time;
class TimeTicks;
class ThreadTicks;
namespace trace_event {
class TraceEventMemoryOverhead;
// For any argument of type TRACE_VALUE_TYPE_CONVERTABLE the provided
// class must implement this interface. Note that unlike other values,
// these objects will be owned by the TraceArguments instance that points
// to them.
class BASE_EXPORT ConvertableToTraceFormat {
public:
ConvertableToTraceFormat() = default;
virtual ~ConvertableToTraceFormat() = default;
// Append the class info to the provided |out| string. The appended
// data must be a valid JSON object. Strings must be properly quoted, and
// escaped. There is no processing applied to the content after it is
// appended.
virtual void AppendAsTraceFormat(std::string* out) const = 0;
// Append the class info directly into the Perfetto-defined proto
// format; this is attempted first and if this returns true,
// AppendAsTraceFormat is not called. The ProtoAppender interface
// acts as a bridge to avoid proto/Perfetto dependencies in base.
class BASE_EXPORT ProtoAppender {
public:
virtual ~ProtoAppender() = default;
virtual void AddBuffer(uint8_t* begin, uint8_t* end) = 0;
// Copy all of the previous buffers registered with AddBuffer
// into the proto, with the given |field_id|.
virtual size_t Finalize(uint32_t field_id) = 0;
};
virtual bool AppendToProto(ProtoAppender* appender);
virtual void EstimateTraceMemoryOverhead(TraceEventMemoryOverhead* overhead);
private:
DISALLOW_COPY_AND_ASSIGN(ConvertableToTraceFormat);
};
const int kTraceMaxNumArgs = 2;
// A union used to hold the values of individual trace arguments.
//
// This is a POD union for performance reason. Initialization from an
// explicit C++ trace argument should be performed with the Init()
// templated method described below.
//
// Initialization from custom types is possible by implementing a custom
// TraceValue::Helper<> instantiation as described below.
//
// IMPORTANT: Pointer storage inside a TraceUnion follows specific rules:
//
// - |as_pointer| is for raw pointers that should be treated as a simple
// address and will never be dereferenced. Associated with the
// TRACE_VALUE_TYPE_POINTER type.
//
// - |as_string| is for C-string pointers, associated with both
// TRACE_VALUE_TYPE_STRING and TRACE_VALUE_TYPE_COPY_STRING. The former
// indicates that the string pointer is persistent (e.g. a C string
// literal), while the second indicates that the pointer belongs to a
// temporary variable that may disappear soon. The TraceArguments class
// provides a CopyStringTo() method to copy these strings into a
// StringStorage instance, which is useful if the instance needs to
// survive longer than the temporaries.
//
// - |as_convertable| is equivalent to
// std::unique_ptr<ConvertableToTraceFormat>, except that it is a pointer
// to keep this union POD and avoid un-necessary declarations and potential
// code generation. This means that its ownership is passed to the
// TraceValue instance when Init(std::unique_ptr<ConvertableToTraceFormat>)
// is called, and that it will be deleted by the containing TraceArguments
// destructor, or Reset() method.
//
union BASE_EXPORT TraceValue {
bool as_bool;
unsigned long long as_uint;
long long as_int;
double as_double;
const void* as_pointer;
const char* as_string;
ConvertableToTraceFormat* as_convertable;
// There is no constructor to keep this structure POD intentionally.
// This avoids un-needed initialization when only 0 or 1 arguments are
// used to construct a TraceArguments instance. Use Init() instead to
// perform explicit initialization from a given C++ value.
// Initialize TraceValue instance from a C++ trace value.
// This relies on the proper specialization of TraceValue::Helper<>
// described below. Usage is simply:
//
// TraceValue v;
// v.Init(<value>);
//
// NOTE: For ConvertableToTraceFormat values, see the note above and
// the one for TraceValue::Helper for CONVERTABLE_TYPE below.
template <typename T>
void Init(T&& value) {
using ValueType = typename InnerType<T>::type;
Helper<ValueType>::SetValue(this, std::forward<T>(value));
}
// Static method to create a new TraceValue instance from a given
// initialization value. Note that this deduces the TRACE_VALUE_TYPE_XXX
// type but doesn't return it, use ForType<T>::value for this.
//
// Usage example:
// auto v = TraceValue::Make(100);
// auto v2 = TraceValue::Make("Some text string");
//
// IMPORTANT: Experience shows that the compiler generates worse code when
// using this method rather than calling Init() directly on an existing
// TraceValue union :-(
//
template <typename T>
static TraceValue Make(T&& value) {
TraceValue ret;
ret.Init(std::forward<T>(value));
return ret;
}
// Output current value as a JSON string. |type| must be a valid
// TRACE_VALUE_TYPE_XXX value.
void AppendAsJSON(unsigned char type, std::string* out) const;
// Output current value as a string. If the output string is to be used
// in a JSON format use AppendAsJSON instead. |type| must be valid
// TRACE_VALUE_TYPE_XXX value.
void AppendAsString(unsigned char type, std::string* out) const;
private:
void Append(unsigned char type, bool as_json, std::string* out) const;
// InnerType<T>::type removes reference, cv-qualifications and decays
// function and arrays into pointers. Only used internally.
template <typename T>
struct InnerType {
using type = typename std::remove_cv<typename std::remove_reference<
typename std::decay<T>::type>::type>::type;
};
public:
// TraceValue::Helper is used to provide information about initialization
// value types and an initialization function. It is a struct that should
// provide the following for supported initialization value types:
//
// - kType: is a static TRACE_VALUE_TYPE_XXX constant.
//
// - SetValue(TraceValue*, T): is a static inline method that sets
// TraceValue value from a given T value. Second parameter type
// can also be const T& or T&& to restrict uses.
//
// IMPORTANT: The type T must be InnerType<Q>, where Q is the real C++
// argument type. I.e. you should not have to deal with reference types
// in your specialization.
//
// Specializations are defined for integers, enums, floating point, pointers,
// constant C string literals and pointers, std::string, time values below.
//
// Specializations for custom types are possible provided that there exists
// a corresponding Helper specialization, for example:
//
// template <>
// struct base::trace_event::TraceValue::Helper<Foo> {
// static constexpr unsigned char kTypes = TRACE_VALUE_TYPE_COPY_STRING;
// static inline void SetValue(TraceValue* v, const Foo& value) {
// v->as_string = value.c_str();
// }
// };
//
// Will allow code like:
//
// Foo foo = ...;
// auto v = TraceValue::Make(foo);
//
// Or even:
// Foo foo = ...;
// TraceArguments args("foo_arg1", foo);
//
template <typename T, class = void>
struct Helper {};
// TraceValue::TypeFor<T>::value returns the TRACE_VALUE_TYPE_XXX
// corresponding to initialization values of type T.
template <typename T>
struct TypeFor {
using ValueType = typename InnerType<T>::type;
static const unsigned char value = Helper<ValueType>::kType;
};
// TraceValue::TypeCheck<T>::value is only defined iff T can be used to
// initialize a TraceValue instance. This is useful to restrict template
// instantiation to only the appropriate type (see TraceArguments
// constructors below).
template <typename T,
class = decltype(TraceValue::Helper<
typename TraceValue::InnerType<T>::type>::kType)>
struct TypeCheck {
static const bool value = true;
};
};
// TraceValue::Helper for integers and enums.
template <typename T>
struct TraceValue::Helper<
T,
typename std::enable_if<std::is_integral<T>::value ||
std::is_enum<T>::value>::type> {
static constexpr unsigned char kType =
std::is_signed<T>::value ? TRACE_VALUE_TYPE_INT : TRACE_VALUE_TYPE_UINT;
static inline void SetValue(TraceValue* v, T value) {
v->as_uint = static_cast<unsigned long long>(value);
}
};
// TraceValue::Helper for floating-point types
template <typename T>
struct TraceValue::
Helper<T, typename std::enable_if<std::is_floating_point<T>::value>::type> {
static constexpr unsigned char kType = TRACE_VALUE_TYPE_DOUBLE;
static inline void SetValue(TraceValue* v, T value) { v->as_double = value; }
};
// TraceValue::Helper for bool.
template <>
struct TraceValue::Helper<bool> {
static constexpr unsigned char kType = TRACE_VALUE_TYPE_BOOL;
static inline void SetValue(TraceValue* v, bool value) { v->as_bool = value; }
};
// TraceValue::Helper for generic pointer types.
template <typename T>
struct TraceValue::Helper<T*> {
static constexpr unsigned char kType = TRACE_VALUE_TYPE_POINTER;
static inline void SetValue(TraceValue* v,
const typename std::decay<T>::type* value) {
v->as_pointer = value;
}
};
// TraceValue::Helper for raw persistent C strings.
template <>
struct TraceValue::Helper<const char*> {
static constexpr unsigned char kType = TRACE_VALUE_TYPE_STRING;
static inline void SetValue(TraceValue* v, const char* value) {
v->as_string = value;
}
};
// TraceValue::Helper for std::string values.
template <>
struct TraceValue::Helper<std::string> {
static constexpr unsigned char kType = TRACE_VALUE_TYPE_COPY_STRING;
static inline void SetValue(TraceValue* v, const std::string& value) {
v->as_string = value.c_str();
}
};
// Special case for scoped pointers to convertables to trace format.
// |CONVERTABLE_TYPE| must be a type whose pointers can be converted to a
// ConvertableToTraceFormat* pointer as well (e.g. a derived class).
// IMPORTANT: This takes an std::unique_ptr<CONVERTABLE_TYPE> value, and takes
// ownership of the pointed object!
template <typename CONVERTABLE_TYPE>
struct TraceValue::Helper<std::unique_ptr<CONVERTABLE_TYPE>,
typename std::enable_if<std::is_convertible<
CONVERTABLE_TYPE*,
ConvertableToTraceFormat*>::value>::type> {
static constexpr unsigned char kType = TRACE_VALUE_TYPE_CONVERTABLE;
static inline void SetValue(TraceValue* v,
std::unique_ptr<CONVERTABLE_TYPE> value) {
v->as_convertable = value.release();
}
};
// Specialization for time-based values like base::Time, which provide a
// a ToInternalValue() method.
template <typename T>
struct TraceValue::Helper<
T,
typename std::enable_if<std::is_same<T, base::Time>::value ||
std::is_same<T, base::TimeTicks>::value ||
std::is_same<T, base::ThreadTicks>::value>::type> {
static constexpr unsigned char kType = TRACE_VALUE_TYPE_INT;
static inline void SetValue(TraceValue* v, const T& value) {
v->as_int = value.ToInternalValue();
}
};
// Simple container for const char* that should be copied instead of retained.
// The goal is to indicate that the C string is copyable, unlike the default
// Init(const char*) implementation. Usage is:
//
// const char* str = ...;
// v.Init(TraceStringWithCopy(str));
//
// Which will mark the string as TRACE_VALUE_TYPE_COPY_STRING, instead of
// TRACE_VALUE_TYPE_STRING.
//
class TraceStringWithCopy {
public:
explicit TraceStringWithCopy(const char* str) : str_(str) {}
const char* str() const { return str_; }
private:
const char* str_;
};
template <>
struct TraceValue::Helper<TraceStringWithCopy> {
static constexpr unsigned char kType = TRACE_VALUE_TYPE_COPY_STRING;
static inline void SetValue(TraceValue* v, const TraceStringWithCopy& value) {
v->as_string = value.str();
}
};
class TraceArguments;
// A small class used to store a copy of all strings from a given
// TraceArguments instance (see below). When empty, this should only
// take the size of a pointer. Otherwise, this will point to a heap
// allocated block containing a size_t value followed by all characters
// in the storage area. For most cases, this is more efficient
// than using a std::unique_ptr<std::string> or an std::vector<char>.
class BASE_EXPORT StringStorage {
public:
constexpr StringStorage() = default;
explicit StringStorage(size_t alloc_size) { Reset(alloc_size); }
~StringStorage() {
if (data_)
::free(data_);
}
StringStorage(StringStorage&& other) noexcept : data_(other.data_) {
other.data_ = nullptr;
}
StringStorage& operator=(StringStorage&& other) noexcept {
if (this != &other) {
if (data_)
::free(data_);
data_ = other.data_;
other.data_ = nullptr;
}
return *this;
}
// Reset storage area to new allocation size. Existing content might not
// be preserved. If |alloc_size| is 0, this will free the storage area
// as well.
void Reset(size_t alloc_size = 0);
// Accessors.
constexpr size_t size() const { return data_ ? data_->size : 0u; }
constexpr const char* data() const { return data_ ? data_->chars : nullptr; }
constexpr char* data() { return data_ ? data_->chars : nullptr; }
constexpr const char* begin() const { return data(); }
constexpr const char* end() const { return data() + size(); }
inline char* begin() { return data(); }
inline char* end() { return data() + size(); }
// True iff storage is empty.
constexpr bool empty() const { return size() == 0; }
// Returns true if |ptr| is inside the storage area, false otherwise.
// Used during unit-testing.
constexpr bool Contains(const void* ptr) const {
const char* char_ptr = static_cast<const char*>(ptr);
return (char_ptr >= begin() && char_ptr < end());
}
// Returns true if all string pointers in |args| are contained in this
// storage area.
bool Contains(const TraceArguments& args) const;
// Return an estimate of the memory overhead of this instance. This doesn't
// count the size of |data_| itself.
constexpr size_t EstimateTraceMemoryOverhead() const {
return data_ ? sizeof(size_t) + data_->size : 0u;
}
private:
// Heap allocated data block (variable size), made of:
//
// - size: a size_t field, giving the size of the following |chars| array.
// - chars: an array of |size| characters, holding all zero-terminated
// strings referenced from a TraceArguments instance.
struct Data {
size_t size = 0;
char chars[1]; // really |size| character items in storage.
};
// This is an owning pointer. Normally, using a std::unique_ptr<> would be
// enough, but the compiler will then complaing about inlined constructors
// and destructors being too complex (!), resulting in larger code for no
// good reason.
Data* data_ = nullptr;
};
// TraceArguments models an array of kMaxSize trace-related items,
// each one of them having:
// - a name, which is a constant char array literal.
// - a type, as described by TRACE_VALUE_TYPE_XXX macros.
// - a value, stored in a TraceValue union.
//
// IMPORTANT: For TRACE_VALUE_TYPE_CONVERTABLE, the value holds an owning
// pointer to an AsConvertableToTraceFormat instance, which will
// be destroyed with the array (or moved out of it when passed
// to a TraceEvent instance).
//
// For TRACE_VALUE_TYPE_COPY_STRING, the value holds a const char* pointer
// whose content will be copied when creating a TraceEvent instance.
//
// IMPORTANT: Most constructors and the destructor are all inlined
// intentionally, in order to let the compiler remove un-necessary operations
// and reduce machine code.
//
class BASE_EXPORT TraceArguments {
public:
// Maximum number of arguments held by this structure.
static constexpr size_t kMaxSize = 2;
// Default constructor, no arguments.
TraceArguments() : size_(0) {}
// Constructor for a single argument.
template <typename T, class = decltype(TraceValue::TypeCheck<T>::value)>
TraceArguments(const char* arg1_name, T&& arg1_value) : size_(1) {
types_[0] = TraceValue::TypeFor<T>::value;
names_[0] = arg1_name;
values_[0].Init(std::forward<T>(arg1_value));
}
// Constructor for two arguments.
template <typename T1,
typename T2,
class = decltype(TraceValue::TypeCheck<T1>::value &&
TraceValue::TypeCheck<T2>::value)>
TraceArguments(const char* arg1_name,
T1&& arg1_value,
const char* arg2_name,
T2&& arg2_value)
: size_(2) {
types_[0] = TraceValue::TypeFor<T1>::value;
types_[1] = TraceValue::TypeFor<T2>::value;
names_[0] = arg1_name;
names_[1] = arg2_name;
values_[0].Init(std::forward<T1>(arg1_value));
values_[1].Init(std::forward<T2>(arg2_value));
}
// Constructor used to convert a legacy set of arguments when there
// are no convertable values at all.
TraceArguments(int num_args,
const char* const* arg_names,
const unsigned char* arg_types,
const unsigned long long* arg_values);
// Constructor used to convert legacy set of arguments, where the
// convertable values are also provided by an array of CONVERTABLE_TYPE.
template <typename CONVERTABLE_TYPE>
TraceArguments(int num_args,
const char* const* arg_names,
const unsigned char* arg_types,
const unsigned long long* arg_values,
CONVERTABLE_TYPE* arg_convertables) {
static int max_args = static_cast<int>(kMaxSize);
if (num_args > max_args)
num_args = max_args;
size_ = static_cast<unsigned char>(num_args);
for (size_t n = 0; n < size_; ++n) {
types_[n] = arg_types[n];
names_[n] = arg_names[n];
if (arg_types[n] == TRACE_VALUE_TYPE_CONVERTABLE) {
values_[n].Init(
std::forward<CONVERTABLE_TYPE>(std::move(arg_convertables[n])));
} else {
values_[n].as_uint = arg_values[n];
}
}
}
// Destructor. NOTE: Intentionally inlined (see note above).
~TraceArguments() {
for (size_t n = 0; n < size_; ++n) {
if (types_[n] == TRACE_VALUE_TYPE_CONVERTABLE)
delete values_[n].as_convertable;
}
}
// Disallow copy operations.
TraceArguments(const TraceArguments&) = delete;
TraceArguments& operator=(const TraceArguments&) = delete;
// Allow move operations.
TraceArguments(TraceArguments&& other) noexcept {
::memcpy(this, &other, sizeof(*this));
// All owning pointers were copied to |this|. Setting |other.size_| will
// mask the pointer values still in |other|.
other.size_ = 0;
}
TraceArguments& operator=(TraceArguments&&) noexcept;
// Accessors
size_t size() const { return size_; }
const unsigned char* types() const { return types_; }
const char* const* names() const { return names_; }
const TraceValue* values() const { return values_; }
// Reset to empty arguments list.
void Reset();
// Use |storage| to copy all copyable strings.
// If |copy_all_strings| is false, then only the TRACE_VALUE_TYPE_COPY_STRING
// values will be copied into storage. If it is true, then argument names are
// also copied to storage, as well as the strings pointed to by
// |*extra_string1| and |*extra_string2|.
// NOTE: If there are no strings to copy, |*storage| is left untouched.
void CopyStringsTo(StringStorage* storage,
bool copy_all_strings,
const char** extra_string1,
const char** extra_string2);
// Append debug string representation to |*out|.
void AppendDebugString(std::string* out);
private:
unsigned char size_;
unsigned char types_[kMaxSize];
const char* names_[kMaxSize];
TraceValue values_[kMaxSize];
};
} // namespace trace_event
} // namespace base
#endif // BASE_TRACE_EVENT_TRACE_ARGUMENTS_H_

View file

@ -0,0 +1,347 @@
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/trace_event/trace_buffer.h"
#include <memory>
#include <utility>
#include <vector>
#include "base/bind.h"
#include "base/macros.h"
#include "base/trace_event/heap_profiler.h"
#include "base/trace_event/trace_event_impl.h"
namespace base {
namespace trace_event {
namespace {
class TraceBufferRingBuffer : public TraceBuffer {
public:
TraceBufferRingBuffer(size_t max_chunks)
: max_chunks_(max_chunks),
recyclable_chunks_queue_(new size_t[queue_capacity()]),
queue_head_(0),
queue_tail_(max_chunks),
current_iteration_index_(0),
current_chunk_seq_(1) {
chunks_.reserve(max_chunks);
for (size_t i = 0; i < max_chunks; ++i)
recyclable_chunks_queue_[i] = i;
}
std::unique_ptr<TraceBufferChunk> GetChunk(size_t* index) override {
HEAP_PROFILER_SCOPED_IGNORE;
// Because the number of threads is much less than the number of chunks,
// the queue should never be empty.
DCHECK(!QueueIsEmpty());
*index = recyclable_chunks_queue_[queue_head_];
queue_head_ = NextQueueIndex(queue_head_);
current_iteration_index_ = queue_head_;
if (*index >= chunks_.size())
chunks_.resize(*index + 1);
TraceBufferChunk* chunk = chunks_[*index].release();
chunks_[*index] = nullptr; // Put nullptr in the slot of a in-flight chunk.
if (chunk)
chunk->Reset(current_chunk_seq_++);
else
chunk = new TraceBufferChunk(current_chunk_seq_++);
return std::unique_ptr<TraceBufferChunk>(chunk);
}
void ReturnChunk(size_t index,
std::unique_ptr<TraceBufferChunk> chunk) override {
// When this method is called, the queue should not be full because it
// can contain all chunks including the one to be returned.
DCHECK(!QueueIsFull());
DCHECK(chunk);
DCHECK_LT(index, chunks_.size());
DCHECK(!chunks_[index]);
chunks_[index] = std::move(chunk);
recyclable_chunks_queue_[queue_tail_] = index;
queue_tail_ = NextQueueIndex(queue_tail_);
}
bool IsFull() const override { return false; }
size_t Size() const override {
// This is approximate because not all of the chunks are full.
return chunks_.size() * TraceBufferChunk::kTraceBufferChunkSize;
}
size_t Capacity() const override {
return max_chunks_ * TraceBufferChunk::kTraceBufferChunkSize;
}
TraceEvent* GetEventByHandle(TraceEventHandle handle) override {
if (handle.chunk_index >= chunks_.size())
return nullptr;
TraceBufferChunk* chunk = chunks_[handle.chunk_index].get();
if (!chunk || chunk->seq() != handle.chunk_seq)
return nullptr;
return chunk->GetEventAt(handle.event_index);
}
const TraceBufferChunk* NextChunk() override {
if (chunks_.empty())
return nullptr;
while (current_iteration_index_ != queue_tail_) {
size_t chunk_index = recyclable_chunks_queue_[current_iteration_index_];
current_iteration_index_ = NextQueueIndex(current_iteration_index_);
if (chunk_index >= chunks_.size()) // Skip uninitialized chunks.
continue;
DCHECK(chunks_[chunk_index]);
return chunks_[chunk_index].get();
}
return nullptr;
}
void EstimateTraceMemoryOverhead(
TraceEventMemoryOverhead* overhead) override {
overhead->Add(TraceEventMemoryOverhead::kTraceBuffer, sizeof(*this));
for (size_t queue_index = queue_head_; queue_index != queue_tail_;
queue_index = NextQueueIndex(queue_index)) {
size_t chunk_index = recyclable_chunks_queue_[queue_index];
if (chunk_index >= chunks_.size()) // Skip uninitialized chunks.
continue;
chunks_[chunk_index]->EstimateTraceMemoryOverhead(overhead);
}
}
private:
bool QueueIsEmpty() const { return queue_head_ == queue_tail_; }
size_t QueueSize() const {
return queue_tail_ > queue_head_
? queue_tail_ - queue_head_
: queue_tail_ + queue_capacity() - queue_head_;
}
bool QueueIsFull() const { return QueueSize() == queue_capacity() - 1; }
size_t queue_capacity() const {
// One extra space to help distinguish full state and empty state.
return max_chunks_ + 1;
}
size_t NextQueueIndex(size_t index) const {
index++;
if (index >= queue_capacity())
index = 0;
return index;
}
size_t max_chunks_;
std::vector<std::unique_ptr<TraceBufferChunk>> chunks_;
std::unique_ptr<size_t[]> recyclable_chunks_queue_;
size_t queue_head_;
size_t queue_tail_;
size_t current_iteration_index_;
uint32_t current_chunk_seq_;
DISALLOW_COPY_AND_ASSIGN(TraceBufferRingBuffer);
};
class TraceBufferVector : public TraceBuffer {
public:
TraceBufferVector(size_t max_chunks)
: in_flight_chunk_count_(0),
current_iteration_index_(0),
max_chunks_(max_chunks) {
chunks_.reserve(max_chunks_);
}
std::unique_ptr<TraceBufferChunk> GetChunk(size_t* index) override {
HEAP_PROFILER_SCOPED_IGNORE;
// This function may be called when adding normal events or indirectly from
// AddMetadataEventsWhileLocked(). We can not DECHECK(!IsFull()) because we
// have to add the metadata events and flush thread-local buffers even if
// the buffer is full.
*index = chunks_.size();
// Put nullptr in the slot of a in-flight chunk.
chunks_.push_back(nullptr);
++in_flight_chunk_count_;
// + 1 because zero chunk_seq is not allowed.
return std::unique_ptr<TraceBufferChunk>(
new TraceBufferChunk(static_cast<uint32_t>(*index) + 1));
}
void ReturnChunk(size_t index,
std::unique_ptr<TraceBufferChunk> chunk) override {
DCHECK_GT(in_flight_chunk_count_, 0u);
DCHECK_LT(index, chunks_.size());
DCHECK(!chunks_[index]);
--in_flight_chunk_count_;
chunks_[index] = std::move(chunk);
}
bool IsFull() const override { return chunks_.size() >= max_chunks_; }
size_t Size() const override {
// This is approximate because not all of the chunks are full.
return chunks_.size() * TraceBufferChunk::kTraceBufferChunkSize;
}
size_t Capacity() const override {
return max_chunks_ * TraceBufferChunk::kTraceBufferChunkSize;
}
TraceEvent* GetEventByHandle(TraceEventHandle handle) override {
if (handle.chunk_index >= chunks_.size())
return nullptr;
TraceBufferChunk* chunk = chunks_[handle.chunk_index].get();
if (!chunk || chunk->seq() != handle.chunk_seq)
return nullptr;
return chunk->GetEventAt(handle.event_index);
}
const TraceBufferChunk* NextChunk() override {
while (current_iteration_index_ < chunks_.size()) {
// Skip in-flight chunks.
const TraceBufferChunk* chunk = chunks_[current_iteration_index_++].get();
if (chunk)
return chunk;
}
return nullptr;
}
void EstimateTraceMemoryOverhead(
TraceEventMemoryOverhead* overhead) override {
const size_t chunks_ptr_vector_allocated_size =
sizeof(*this) + max_chunks_ * sizeof(decltype(chunks_)::value_type);
const size_t chunks_ptr_vector_resident_size =
sizeof(*this) + chunks_.size() * sizeof(decltype(chunks_)::value_type);
overhead->Add(TraceEventMemoryOverhead::kTraceBuffer,
chunks_ptr_vector_allocated_size,
chunks_ptr_vector_resident_size);
for (size_t i = 0; i < chunks_.size(); ++i) {
TraceBufferChunk* chunk = chunks_[i].get();
// Skip the in-flight (nullptr) chunks. They will be accounted by the
// per-thread-local dumpers, see ThreadLocalEventBuffer::OnMemoryDump.
if (chunk)
chunk->EstimateTraceMemoryOverhead(overhead);
}
}
private:
size_t in_flight_chunk_count_;
size_t current_iteration_index_;
size_t max_chunks_;
std::vector<std::unique_ptr<TraceBufferChunk>> chunks_;
DISALLOW_COPY_AND_ASSIGN(TraceBufferVector);
};
} // namespace
TraceBufferChunk::TraceBufferChunk(uint32_t seq) : next_free_(0), seq_(seq) {}
TraceBufferChunk::~TraceBufferChunk() = default;
void TraceBufferChunk::Reset(uint32_t new_seq) {
for (size_t i = 0; i < next_free_; ++i)
chunk_[i].Reset();
next_free_ = 0;
seq_ = new_seq;
cached_overhead_estimate_.reset();
}
TraceEvent* TraceBufferChunk::AddTraceEvent(size_t* event_index) {
DCHECK(!IsFull());
*event_index = next_free_++;
return &chunk_[*event_index];
}
void TraceBufferChunk::EstimateTraceMemoryOverhead(
TraceEventMemoryOverhead* overhead) {
if (!cached_overhead_estimate_) {
cached_overhead_estimate_.reset(new TraceEventMemoryOverhead);
// When estimating the size of TraceBufferChunk, exclude the array of trace
// events, as they are computed individually below.
cached_overhead_estimate_->Add(TraceEventMemoryOverhead::kTraceBufferChunk,
sizeof(*this) - sizeof(chunk_));
}
const size_t num_cached_estimated_events =
cached_overhead_estimate_->GetCount(
TraceEventMemoryOverhead::kTraceEvent);
DCHECK_LE(num_cached_estimated_events, size());
if (IsFull() && num_cached_estimated_events == size()) {
overhead->Update(*cached_overhead_estimate_);
return;
}
for (size_t i = num_cached_estimated_events; i < size(); ++i)
chunk_[i].EstimateTraceMemoryOverhead(cached_overhead_estimate_.get());
if (IsFull()) {
cached_overhead_estimate_->AddSelf();
} else {
// The unused TraceEvents in |chunks_| are not cached. They will keep
// changing as new TraceEvents are added to this chunk, so they are
// computed on the fly.
const size_t num_unused_trace_events = capacity() - size();
overhead->Add(TraceEventMemoryOverhead::kUnusedTraceEvent,
num_unused_trace_events * sizeof(TraceEvent));
}
overhead->Update(*cached_overhead_estimate_);
}
TraceResultBuffer::OutputCallback
TraceResultBuffer::SimpleOutput::GetCallback() {
return BindRepeating(&SimpleOutput::Append, Unretained(this));
}
void TraceResultBuffer::SimpleOutput::Append(
const std::string& json_trace_output) {
json_output += json_trace_output;
}
TraceResultBuffer::TraceResultBuffer() : append_comma_(false) {}
TraceResultBuffer::~TraceResultBuffer() = default;
void TraceResultBuffer::SetOutputCallback(OutputCallback json_chunk_callback) {
output_callback_ = std::move(json_chunk_callback);
}
void TraceResultBuffer::Start() {
append_comma_ = false;
output_callback_.Run("[");
}
void TraceResultBuffer::AddFragment(const std::string& trace_fragment) {
if (append_comma_)
output_callback_.Run(",");
append_comma_ = true;
output_callback_.Run(trace_fragment);
}
void TraceResultBuffer::Finish() {
output_callback_.Run("]");
}
TraceBuffer* TraceBuffer::CreateTraceBufferRingBuffer(size_t max_chunks) {
return new TraceBufferRingBuffer(max_chunks);
}
TraceBuffer* TraceBuffer::CreateTraceBufferVectorOfSize(size_t max_chunks) {
return new TraceBufferVector(max_chunks);
}
} // namespace trace_event
} // namespace base

View file

@ -0,0 +1,130 @@
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TRACE_EVENT_TRACE_BUFFER_H_
#define BASE_TRACE_EVENT_TRACE_BUFFER_H_
#include <stddef.h>
#include <stdint.h>
#include "base/base_export.h"
#include "base/trace_event/trace_event.h"
#include "base/trace_event/trace_event_impl.h"
namespace base {
namespace trace_event {
// TraceBufferChunk is the basic unit of TraceBuffer.
class BASE_EXPORT TraceBufferChunk {
public:
explicit TraceBufferChunk(uint32_t seq);
~TraceBufferChunk();
void Reset(uint32_t new_seq);
TraceEvent* AddTraceEvent(size_t* event_index);
bool IsFull() const { return next_free_ == kTraceBufferChunkSize; }
uint32_t seq() const { return seq_; }
size_t capacity() const { return kTraceBufferChunkSize; }
size_t size() const { return next_free_; }
TraceEvent* GetEventAt(size_t index) {
DCHECK(index < size());
return &chunk_[index];
}
const TraceEvent* GetEventAt(size_t index) const {
DCHECK(index < size());
return &chunk_[index];
}
void EstimateTraceMemoryOverhead(TraceEventMemoryOverhead* overhead);
// These values must be kept consistent with the numbers of bits of
// chunk_index and event_index fields in TraceEventHandle
// (in trace_event_impl.h).
static const size_t kMaxChunkIndex = (1u << 26) - 1;
static const size_t kTraceBufferChunkSize = 64;
private:
size_t next_free_;
std::unique_ptr<TraceEventMemoryOverhead> cached_overhead_estimate_;
TraceEvent chunk_[kTraceBufferChunkSize];
uint32_t seq_;
};
// TraceBuffer holds the events as they are collected.
class BASE_EXPORT TraceBuffer {
public:
virtual ~TraceBuffer() = default;
virtual std::unique_ptr<TraceBufferChunk> GetChunk(size_t* index) = 0;
virtual void ReturnChunk(size_t index,
std::unique_ptr<TraceBufferChunk> chunk) = 0;
virtual bool IsFull() const = 0;
virtual size_t Size() const = 0;
virtual size_t Capacity() const = 0;
virtual TraceEvent* GetEventByHandle(TraceEventHandle handle) = 0;
// For iteration. Each TraceBuffer can only be iterated once.
virtual const TraceBufferChunk* NextChunk() = 0;
// Computes an estimate of the size of the buffer, including all the retained
// objects.
virtual void EstimateTraceMemoryOverhead(
TraceEventMemoryOverhead* overhead) = 0;
static TraceBuffer* CreateTraceBufferRingBuffer(size_t max_chunks);
static TraceBuffer* CreateTraceBufferVectorOfSize(size_t max_chunks);
};
// TraceResultBuffer collects and converts trace fragments returned by TraceLog
// to JSON output.
class BASE_EXPORT TraceResultBuffer {
public:
using OutputCallback = base::RepeatingCallback<void(const std::string&)>;
// If you don't need to stream JSON chunks out efficiently, and just want to
// get a complete JSON string after calling Finish, use this struct to collect
// JSON trace output.
struct BASE_EXPORT SimpleOutput {
OutputCallback GetCallback();
void Append(const std::string& json_string);
// Do what you want with the json_output_ string after calling
// TraceResultBuffer::Finish.
std::string json_output;
};
TraceResultBuffer();
~TraceResultBuffer();
// Set callback. The callback will be called during Start with the initial
// JSON output and during AddFragment and Finish with following JSON output
// chunks. The callback target must live past the last calls to
// TraceResultBuffer::Start/AddFragment/Finish.
void SetOutputCallback(OutputCallback json_chunk_callback);
// Start JSON output. This resets all internal state, so you can reuse
// the TraceResultBuffer by calling Start.
void Start();
// Call AddFragment 0 or more times to add trace fragments from TraceLog.
void AddFragment(const std::string& trace_fragment);
// When all fragments have been added, call Finish to complete the JSON
// formatted output.
void Finish();
private:
OutputCallback output_callback_;
bool append_comma_;
};
} // namespace trace_event
} // namespace base
#endif // BASE_TRACE_EVENT_TRACE_BUFFER_H_

View file

@ -0,0 +1,109 @@
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TRACE_EVENT_TRACE_CATEGORY_H_
#define BASE_TRACE_EVENT_TRACE_CATEGORY_H_
#include <stdint.h>
namespace base {
namespace trace_event {
// Captures the state of an invidivual trace category. Nothing except tracing
// internals (e.g., TraceLog) is supposed to have non-const Category pointers.
struct TraceCategory {
// The TRACE_EVENT macros should only use this value as a bool.
// These enum values are effectively a public API and third_party projects
// depend on their value. Hence, never remove or recycle existing bits, unless
// you are sure that all the third-party projects that depend on this have
// been updated.
enum StateFlags : uint8_t {
ENABLED_FOR_RECORDING = 1 << 0,
// Not used anymore.
DEPRECATED_ENABLED_FOR_MONITORING = 1 << 1,
DEPRECATED_ENABLED_FOR_EVENT_CALLBACK = 1 << 2,
ENABLED_FOR_ETW_EXPORT = 1 << 3,
ENABLED_FOR_FILTERING = 1 << 4
};
static const TraceCategory* FromStatePtr(const uint8_t* state_ptr) {
static_assert(
offsetof(TraceCategory, state_) == 0,
"|state_| must be the first field of the TraceCategory class.");
return reinterpret_cast<const TraceCategory*>(state_ptr);
}
bool is_valid() const { return name_ != nullptr; }
void set_name(const char* name) { name_ = name; }
const char* name() const {
DCHECK(is_valid());
return name_;
}
// TODO(primiano): This is an intermediate solution to deal with the fact that
// today TRACE_EVENT* macros cache the state ptr. They should just cache the
// full TraceCategory ptr, which is immutable, and use these helper function
// here. This will get rid of the need of this awkward ptr getter completely.
constexpr const uint8_t* state_ptr() const {
return const_cast<const uint8_t*>(&state_);
}
uint8_t state() const {
return *const_cast<volatile const uint8_t*>(&state_);
}
bool is_enabled() const { return state() != 0; }
void set_state(uint8_t state) {
*const_cast<volatile uint8_t*>(&state_) = state;
}
void clear_state_flag(StateFlags flag) { set_state(state() & (~flag)); }
void set_state_flag(StateFlags flag) { set_state(state() | flag); }
uint32_t enabled_filters() const {
return *const_cast<volatile const uint32_t*>(&enabled_filters_);
}
bool is_filter_enabled(size_t index) const {
DCHECK(index < sizeof(enabled_filters_) * 8);
return (enabled_filters() & (1 << index)) != 0;
}
void set_enabled_filters(uint32_t enabled_filters) {
*const_cast<volatile uint32_t*>(&enabled_filters_) = enabled_filters;
}
void reset_for_testing() {
set_state(0);
set_enabled_filters(0);
}
// These fields should not be accessed directly, not even by tracing code.
// The only reason why these are not private is because it makes it impossible
// to have a global array of TraceCategory in category_registry.cc without
// creating initializers. See discussion on goo.gl/qhZN94 and
// crbug.com/{660967,660828}.
// The enabled state. TRACE_EVENT* macros will capture events if any of the
// flags here are set. Since TRACE_EVENTx macros are used in a lot of
// fast-paths, accesses to this field are non-barriered and racy by design.
// This field is mutated when starting/stopping tracing and we don't care
// about missing some events.
uint8_t state_;
// When ENABLED_FOR_FILTERING is set, this contains a bitmap to the
// corresponding filter (see event_filters.h).
uint32_t enabled_filters_;
// TraceCategory group names are long lived static strings.
const char* name_;
};
} // namespace trace_event
} // namespace base
#endif // BASE_TRACE_EVENT_TRACE_CATEGORY_H_

View file

@ -0,0 +1,719 @@
// Copyright (c) 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/trace_event/trace_config.h"
#include <stddef.h>
#include <algorithm>
#include <utility>
#include "base/json/json_reader.h"
#include "base/json/json_writer.h"
#include "base/memory/ptr_util.h"
#include "base/strings/string_split.h"
#include "base/trace_event/memory_dump_manager.h"
#include "base/trace_event/memory_dump_request_args.h"
#include "base/trace_event/trace_event.h"
namespace base {
namespace trace_event {
namespace {
// String options that can be used to initialize TraceOptions.
const char kRecordUntilFull[] = "record-until-full";
const char kRecordContinuously[] = "record-continuously";
const char kRecordAsMuchAsPossible[] = "record-as-much-as-possible";
const char kTraceToConsole[] = "trace-to-console";
const char kEnableSystrace[] = "enable-systrace";
constexpr int kEnableSystraceLength = sizeof(kEnableSystrace) - 1;
const char kEnableArgumentFilter[] = "enable-argument-filter";
// String parameters that can be used to parse the trace config string.
const char kRecordModeParam[] = "record_mode";
const char kTraceBufferSizeInEvents[] = "trace_buffer_size_in_events";
const char kTraceBufferSizeInKb[] = "trace_buffer_size_in_kb";
const char kEnableSystraceParam[] = "enable_systrace";
const char kSystraceEventsParam[] = "enable_systrace_events";
const char kEnableArgumentFilterParam[] = "enable_argument_filter";
// String parameters that is used to parse memory dump config in trace config
// string.
const char kMemoryDumpConfigParam[] = "memory_dump_config";
const char kAllowedDumpModesParam[] = "allowed_dump_modes";
const char kTriggersParam[] = "triggers";
const char kTriggerModeParam[] = "mode";
const char kMinTimeBetweenDumps[] = "min_time_between_dumps_ms";
const char kTriggerTypeParam[] = "type";
const char kPeriodicIntervalLegacyParam[] = "periodic_interval_ms";
const char kHeapProfilerOptions[] = "heap_profiler_options";
const char kBreakdownThresholdBytes[] = "breakdown_threshold_bytes";
// String parameters used to parse category event filters.
const char kEventFiltersParam[] = "event_filters";
const char kFilterPredicateParam[] = "filter_predicate";
const char kFilterArgsParam[] = "filter_args";
// String parameter used to parse process filter.
const char kIncludedProcessesParam[] = "included_process_ids";
const char kHistogramNamesParam[] = "histogram_names";
class ConvertableTraceConfigToTraceFormat
: public base::trace_event::ConvertableToTraceFormat {
public:
explicit ConvertableTraceConfigToTraceFormat(const TraceConfig& trace_config)
: trace_config_(trace_config) {}
~ConvertableTraceConfigToTraceFormat() override = default;
void AppendAsTraceFormat(std::string* out) const override {
out->append(trace_config_.ToString());
}
private:
const TraceConfig trace_config_;
};
std::set<MemoryDumpLevelOfDetail> GetDefaultAllowedMemoryDumpModes() {
std::set<MemoryDumpLevelOfDetail> all_modes;
for (uint32_t mode = static_cast<uint32_t>(MemoryDumpLevelOfDetail::FIRST);
mode <= static_cast<uint32_t>(MemoryDumpLevelOfDetail::LAST); mode++) {
all_modes.insert(static_cast<MemoryDumpLevelOfDetail>(mode));
}
return all_modes;
}
} // namespace
TraceConfig::MemoryDumpConfig::HeapProfiler::HeapProfiler()
: breakdown_threshold_bytes(kDefaultBreakdownThresholdBytes) {}
void TraceConfig::MemoryDumpConfig::HeapProfiler::Clear() {
breakdown_threshold_bytes = kDefaultBreakdownThresholdBytes;
}
void TraceConfig::ResetMemoryDumpConfig(
const TraceConfig::MemoryDumpConfig& memory_dump_config) {
memory_dump_config_.Clear();
memory_dump_config_ = memory_dump_config;
}
TraceConfig::MemoryDumpConfig::MemoryDumpConfig() = default;
TraceConfig::MemoryDumpConfig::MemoryDumpConfig(
const MemoryDumpConfig& other) = default;
TraceConfig::MemoryDumpConfig::~MemoryDumpConfig() = default;
void TraceConfig::MemoryDumpConfig::Clear() {
allowed_dump_modes.clear();
triggers.clear();
heap_profiler_options.Clear();
}
void TraceConfig::MemoryDumpConfig::Merge(
const TraceConfig::MemoryDumpConfig& config) {
triggers.insert(triggers.end(), config.triggers.begin(),
config.triggers.end());
allowed_dump_modes.insert(config.allowed_dump_modes.begin(),
config.allowed_dump_modes.end());
heap_profiler_options.breakdown_threshold_bytes =
std::min(heap_profiler_options.breakdown_threshold_bytes,
config.heap_profiler_options.breakdown_threshold_bytes);
}
TraceConfig::ProcessFilterConfig::ProcessFilterConfig() = default;
TraceConfig::ProcessFilterConfig::ProcessFilterConfig(
const ProcessFilterConfig& other) = default;
TraceConfig::ProcessFilterConfig::ProcessFilterConfig(
const std::unordered_set<base::ProcessId>& included_process_ids)
: included_process_ids_(included_process_ids) {}
TraceConfig::ProcessFilterConfig::~ProcessFilterConfig() = default;
void TraceConfig::ProcessFilterConfig::Clear() {
included_process_ids_.clear();
}
void TraceConfig::ProcessFilterConfig::Merge(
const ProcessFilterConfig& config) {
included_process_ids_.insert(config.included_process_ids_.begin(),
config.included_process_ids_.end());
}
void TraceConfig::ProcessFilterConfig::InitializeFromConfigDict(
const Value& dict) {
included_process_ids_.clear();
const Value* value = dict.FindListKey(kIncludedProcessesParam);
if (!value)
return;
for (auto& pid_value : value->GetList()) {
if (pid_value.is_int())
included_process_ids_.insert(pid_value.GetInt());
}
}
void TraceConfig::ProcessFilterConfig::ToDict(Value* dict) const {
if (included_process_ids_.empty())
return;
Value* list = dict->SetKey(kIncludedProcessesParam, Value(Value::Type::LIST));
std::set<base::ProcessId> ordered_set(included_process_ids_.begin(),
included_process_ids_.end());
for (auto process_id : ordered_set)
list->Append(static_cast<int>(process_id));
}
bool TraceConfig::ProcessFilterConfig::IsEnabled(
base::ProcessId process_id) const {
return included_process_ids_.empty() ||
included_process_ids_.count(process_id);
}
TraceConfig::EventFilterConfig::EventFilterConfig(
const std::string& predicate_name)
: predicate_name_(predicate_name) {}
TraceConfig::EventFilterConfig::~EventFilterConfig() = default;
TraceConfig::EventFilterConfig::EventFilterConfig(const EventFilterConfig& tc) {
*this = tc;
}
TraceConfig::EventFilterConfig& TraceConfig::EventFilterConfig::operator=(
const TraceConfig::EventFilterConfig& rhs) {
if (this == &rhs)
return *this;
predicate_name_ = rhs.predicate_name_;
category_filter_ = rhs.category_filter_;
if (!rhs.args_.is_none())
args_ = rhs.args_.Clone();
return *this;
}
void TraceConfig::EventFilterConfig::InitializeFromConfigDict(
const Value& event_filter) {
category_filter_.InitializeFromConfigDict(event_filter);
const Value* args_dict = event_filter.FindDictKey(kFilterArgsParam);
if (args_dict)
args_ = args_dict->Clone();
}
void TraceConfig::EventFilterConfig::SetCategoryFilter(
const TraceConfigCategoryFilter& category_filter) {
category_filter_ = category_filter;
}
void TraceConfig::EventFilterConfig::ToDict(Value* filter_dict) const {
filter_dict->SetStringKey(kFilterPredicateParam, predicate_name());
category_filter_.ToDict(filter_dict);
if (!args_.is_none())
filter_dict->SetKey(kFilterArgsParam, args_.Clone());
}
bool TraceConfig::EventFilterConfig::GetArgAsSet(
const char* key,
std::unordered_set<std::string>* out_set) const {
const Value* list = args_.FindListPath(key);
if (!list)
return false;
for (const Value& item : list->GetList()) {
if (item.is_string())
out_set->insert(item.GetString());
}
return true;
}
bool TraceConfig::EventFilterConfig::IsCategoryGroupEnabled(
const StringPiece& category_group_name) const {
return category_filter_.IsCategoryGroupEnabled(category_group_name);
}
// static
std::string TraceConfig::TraceRecordModeToStr(TraceRecordMode record_mode) {
switch (record_mode) {
case RECORD_UNTIL_FULL:
return kRecordUntilFull;
case RECORD_CONTINUOUSLY:
return kRecordContinuously;
case RECORD_AS_MUCH_AS_POSSIBLE:
return kRecordAsMuchAsPossible;
case ECHO_TO_CONSOLE:
return kTraceToConsole;
default:
NOTREACHED();
}
return kRecordUntilFull;
}
TraceConfig::TraceConfig() {
InitializeDefault();
}
TraceConfig::TraceConfig(StringPiece category_filter_string,
StringPiece trace_options_string) {
InitializeFromStrings(category_filter_string, trace_options_string);
}
TraceConfig::TraceConfig(StringPiece category_filter_string,
TraceRecordMode record_mode) {
InitializeFromStrings(category_filter_string,
TraceConfig::TraceRecordModeToStr(record_mode));
}
TraceConfig::TraceConfig(const Value& config) {
InitializeFromConfigDict(config);
}
TraceConfig::TraceConfig(StringPiece config_string) {
if (!config_string.empty())
InitializeFromConfigString(config_string);
else
InitializeDefault();
}
TraceConfig::TraceConfig(const TraceConfig& tc) = default;
TraceConfig::~TraceConfig() = default;
TraceConfig& TraceConfig::operator=(const TraceConfig& rhs) {
if (this == &rhs)
return *this;
record_mode_ = rhs.record_mode_;
trace_buffer_size_in_events_ = rhs.trace_buffer_size_in_events_;
trace_buffer_size_in_kb_ = rhs.trace_buffer_size_in_kb_;
enable_systrace_ = rhs.enable_systrace_;
enable_argument_filter_ = rhs.enable_argument_filter_;
category_filter_ = rhs.category_filter_;
process_filter_config_ = rhs.process_filter_config_;
memory_dump_config_ = rhs.memory_dump_config_;
event_filters_ = rhs.event_filters_;
histogram_names_ = rhs.histogram_names_;
systrace_events_ = rhs.systrace_events_;
return *this;
}
std::string TraceConfig::ToString() const {
Value dict = ToValue();
std::string json;
JSONWriter::Write(dict, &json);
return json;
}
std::unique_ptr<ConvertableToTraceFormat>
TraceConfig::AsConvertableToTraceFormat() const {
return std::make_unique<ConvertableTraceConfigToTraceFormat>(*this);
}
std::string TraceConfig::ToCategoryFilterString() const {
return category_filter_.ToFilterString();
}
bool TraceConfig::IsCategoryGroupEnabled(
const StringPiece& category_group_name) const {
// TraceLog should call this method only as part of enabling/disabling
// categories.
return category_filter_.IsCategoryGroupEnabled(category_group_name);
}
void TraceConfig::Merge(const TraceConfig& config) {
if (record_mode_ != config.record_mode_
|| enable_systrace_ != config.enable_systrace_
|| enable_argument_filter_ != config.enable_argument_filter_) {
DLOG(ERROR) << "Attempting to merge trace config with a different "
<< "set of options.";
}
DCHECK_EQ(trace_buffer_size_in_events_, config.trace_buffer_size_in_events_)
<< "Cannot change trace buffer size";
category_filter_.Merge(config.category_filter_);
memory_dump_config_.Merge(config.memory_dump_config_);
process_filter_config_.Merge(config.process_filter_config_);
event_filters_.insert(event_filters_.end(), config.event_filters().begin(),
config.event_filters().end());
histogram_names_.insert(config.histogram_names().begin(),
config.histogram_names().end());
}
void TraceConfig::Clear() {
record_mode_ = RECORD_UNTIL_FULL;
trace_buffer_size_in_events_ = 0;
trace_buffer_size_in_kb_ = 0;
enable_systrace_ = false;
enable_argument_filter_ = false;
category_filter_.Clear();
memory_dump_config_.Clear();
process_filter_config_.Clear();
event_filters_.clear();
histogram_names_.clear();
systrace_events_.clear();
}
void TraceConfig::InitializeDefault() {
record_mode_ = RECORD_UNTIL_FULL;
trace_buffer_size_in_events_ = 0;
trace_buffer_size_in_kb_ = 0;
enable_systrace_ = false;
enable_argument_filter_ = false;
}
void TraceConfig::InitializeFromConfigDict(const Value& dict) {
record_mode_ = RECORD_UNTIL_FULL;
const std::string* record_mode = dict.FindStringKey(kRecordModeParam);
if (record_mode) {
if (*record_mode == kRecordUntilFull) {
record_mode_ = RECORD_UNTIL_FULL;
} else if (*record_mode == kRecordContinuously) {
record_mode_ = RECORD_CONTINUOUSLY;
} else if (*record_mode == kTraceToConsole) {
record_mode_ = ECHO_TO_CONSOLE;
} else if (*record_mode == kRecordAsMuchAsPossible) {
record_mode_ = RECORD_AS_MUCH_AS_POSSIBLE;
}
}
trace_buffer_size_in_events_ =
dict.FindIntKey(kTraceBufferSizeInEvents).value_or(0);
trace_buffer_size_in_kb_ = dict.FindIntKey(kTraceBufferSizeInKb).value_or(0);
enable_systrace_ = dict.FindBoolKey(kEnableSystraceParam).value_or(false);
enable_argument_filter_ =
dict.FindBoolKey(kEnableArgumentFilterParam).value_or(false);
category_filter_.InitializeFromConfigDict(dict);
process_filter_config_.InitializeFromConfigDict(dict);
const Value* category_event_filters = dict.FindListKey(kEventFiltersParam);
if (category_event_filters)
SetEventFiltersFromConfigList(*category_event_filters);
const Value* histogram_names = dict.FindListKey(kHistogramNamesParam);
if (histogram_names)
SetHistogramNamesFromConfigList(*histogram_names);
if (category_filter_.IsCategoryEnabled(MemoryDumpManager::kTraceCategory)) {
// If dump triggers not set, the client is using the legacy with just
// category enabled. So, use the default periodic dump config.
const Value* memory_dump_config = dict.FindDictKey(kMemoryDumpConfigParam);
if (memory_dump_config)
SetMemoryDumpConfigFromConfigDict(*memory_dump_config);
else
SetDefaultMemoryDumpConfig();
}
systrace_events_.clear();
if (enable_systrace_) {
const Value* systrace_events = dict.FindListKey(kSystraceEventsParam);
if (systrace_events) {
for (const Value& value : systrace_events->GetList())
systrace_events_.insert(value.GetString());
}
}
}
void TraceConfig::InitializeFromConfigString(StringPiece config_string) {
base::Optional<Value> dict = JSONReader::Read(config_string);
if (dict && dict->is_dict())
InitializeFromConfigDict(*dict);
else
InitializeDefault();
}
void TraceConfig::InitializeFromStrings(StringPiece category_filter_string,
StringPiece trace_options_string) {
if (!category_filter_string.empty())
category_filter_.InitializeFromString(category_filter_string);
record_mode_ = RECORD_UNTIL_FULL;
trace_buffer_size_in_events_ = 0;
trace_buffer_size_in_kb_ = 0;
enable_systrace_ = false;
systrace_events_.clear();
enable_argument_filter_ = false;
if (!trace_options_string.empty()) {
std::vector<std::string> split =
SplitString(trace_options_string, ",", TRIM_WHITESPACE, SPLIT_WANT_ALL);
for (const std::string& token : split) {
if (token == kRecordUntilFull) {
record_mode_ = RECORD_UNTIL_FULL;
} else if (token == kRecordContinuously) {
record_mode_ = RECORD_CONTINUOUSLY;
} else if (token == kTraceToConsole) {
record_mode_ = ECHO_TO_CONSOLE;
} else if (token == kRecordAsMuchAsPossible) {
record_mode_ = RECORD_AS_MUCH_AS_POSSIBLE;
} else if (token.find(kEnableSystrace) == 0) {
// Find optional events list.
const size_t length = token.length();
if (length == kEnableSystraceLength) {
// Use all predefined categories.
enable_systrace_ = true;
continue;
}
const auto system_events_not_trimmed =
token.substr(kEnableSystraceLength);
const auto system_events =
TrimString(system_events_not_trimmed, kWhitespaceASCII, TRIM_ALL);
if (system_events[0] != '=') {
LOG(ERROR) << "Failed to parse " << token;
continue;
}
enable_systrace_ = true;
const std::vector<std::string> split_systrace_events = SplitString(
system_events.substr(1), " ", TRIM_WHITESPACE, SPLIT_WANT_NONEMPTY);
for (const std::string& systrace_event : split_systrace_events)
systrace_events_.insert(systrace_event);
} else if (token == kEnableArgumentFilter) {
enable_argument_filter_ = true;
}
}
}
if (category_filter_.IsCategoryEnabled(MemoryDumpManager::kTraceCategory)) {
SetDefaultMemoryDumpConfig();
}
}
void TraceConfig::SetMemoryDumpConfigFromConfigDict(
const Value& memory_dump_config) {
// Set allowed dump modes.
memory_dump_config_.allowed_dump_modes.clear();
const Value* allowed_modes_list =
memory_dump_config.FindListKey(kAllowedDumpModesParam);
if (allowed_modes_list) {
for (const Value& item : allowed_modes_list->GetList()) {
DCHECK(item.is_string());
memory_dump_config_.allowed_dump_modes.insert(
StringToMemoryDumpLevelOfDetail(item.GetString()));
}
} else {
// If allowed modes param is not given then allow all modes by default.
memory_dump_config_.allowed_dump_modes = GetDefaultAllowedMemoryDumpModes();
}
// Set triggers
memory_dump_config_.triggers.clear();
const Value* trigger_list = memory_dump_config.FindListKey(kTriggersParam);
if (trigger_list) {
for (const Value& trigger : trigger_list->GetList()) {
if (!trigger.is_dict())
continue;
MemoryDumpConfig::Trigger dump_config;
base::Optional<int> interval = trigger.FindIntKey(kMinTimeBetweenDumps);
if (!interval) {
// If "min_time_between_dumps_ms" param was not given, then the trace
// config uses old format where only periodic dumps are supported.
interval = trigger.FindIntKey(kPeriodicIntervalLegacyParam);
dump_config.trigger_type = MemoryDumpType::PERIODIC_INTERVAL;
} else {
const std::string* trigger_type_str =
trigger.FindStringKey(kTriggerTypeParam);
DCHECK(trigger_type_str);
dump_config.trigger_type = StringToMemoryDumpType(*trigger_type_str);
}
DCHECK(interval.has_value());
DCHECK_GT(*interval, 0);
dump_config.min_time_between_dumps_ms = static_cast<uint32_t>(*interval);
const std::string* level_of_detail_str =
trigger.FindStringKey(kTriggerModeParam);
DCHECK(level_of_detail_str);
dump_config.level_of_detail =
StringToMemoryDumpLevelOfDetail(*level_of_detail_str);
memory_dump_config_.triggers.push_back(dump_config);
}
}
// Set heap profiler options
const Value* heap_profiler_options =
memory_dump_config.FindDictKey(kHeapProfilerOptions);
if (heap_profiler_options) {
base::Optional<int> min_size_bytes =
heap_profiler_options->FindIntKey(kBreakdownThresholdBytes);
if (min_size_bytes && *min_size_bytes >= 0) {
memory_dump_config_.heap_profiler_options.breakdown_threshold_bytes =
*min_size_bytes;
} else {
memory_dump_config_.heap_profiler_options.breakdown_threshold_bytes =
MemoryDumpConfig::HeapProfiler::kDefaultBreakdownThresholdBytes;
}
}
}
void TraceConfig::SetDefaultMemoryDumpConfig() {
memory_dump_config_.Clear();
memory_dump_config_.allowed_dump_modes = GetDefaultAllowedMemoryDumpModes();
}
void TraceConfig::SetProcessFilterConfig(const ProcessFilterConfig& config) {
process_filter_config_ = config;
}
void TraceConfig::SetHistogramNamesFromConfigList(
const Value& histogram_names) {
histogram_names_.clear();
for (const Value& value : histogram_names.GetList())
histogram_names_.insert(value.GetString());
}
void TraceConfig::SetEventFiltersFromConfigList(
const Value& category_event_filters) {
event_filters_.clear();
for (const Value& event_filter : category_event_filters.GetList()) {
if (!event_filter.is_dict())
continue;
const std::string* predicate_name =
event_filter.FindStringKey(kFilterPredicateParam);
CHECK(predicate_name) << "Invalid predicate name in category event filter.";
EventFilterConfig new_config(*predicate_name);
new_config.InitializeFromConfigDict(event_filter);
event_filters_.push_back(new_config);
}
}
Value TraceConfig::ToValue() const {
Value dict(Value::Type::DICTIONARY);
dict.SetStringKey(kRecordModeParam,
TraceConfig::TraceRecordModeToStr(record_mode_));
dict.SetBoolKey(kEnableSystraceParam, enable_systrace_);
dict.SetBoolKey(kEnableArgumentFilterParam, enable_argument_filter_);
if (trace_buffer_size_in_events_ > 0)
dict.SetIntKey(kTraceBufferSizeInEvents, trace_buffer_size_in_events_);
if (trace_buffer_size_in_kb_ > 0)
dict.SetIntKey(kTraceBufferSizeInKb, trace_buffer_size_in_kb_);
category_filter_.ToDict(&dict);
process_filter_config_.ToDict(&dict);
if (!event_filters_.empty()) {
std::vector<Value> filter_list;
for (const EventFilterConfig& filter : event_filters_) {
filter_list.emplace_back(Value::Type::DICTIONARY);
filter.ToDict(&filter_list.back());
}
dict.SetKey(kEventFiltersParam, Value(std::move(filter_list)));
}
if (category_filter_.IsCategoryEnabled(MemoryDumpManager::kTraceCategory)) {
std::vector<Value> allowed_modes;
for (auto dump_mode : memory_dump_config_.allowed_dump_modes)
allowed_modes.emplace_back(MemoryDumpLevelOfDetailToString(dump_mode));
Value memory_dump_config(Value::Type::DICTIONARY);
memory_dump_config.SetKey(kAllowedDumpModesParam,
Value(std::move(allowed_modes)));
std::vector<Value> triggers_list;
for (const auto& config : memory_dump_config_.triggers) {
triggers_list.emplace_back(Value::Type::DICTIONARY);
Value& trigger_dict = triggers_list.back();
trigger_dict.SetStringKey(kTriggerTypeParam,
MemoryDumpTypeToString(config.trigger_type));
trigger_dict.SetIntKey(
kMinTimeBetweenDumps,
static_cast<int>(config.min_time_between_dumps_ms));
trigger_dict.SetStringKey(
kTriggerModeParam,
MemoryDumpLevelOfDetailToString(config.level_of_detail));
}
// Empty triggers will still be specified explicitly since it means that
// the periodic dumps are not enabled.
memory_dump_config.SetKey(kTriggersParam, Value(std::move(triggers_list)));
if (memory_dump_config_.heap_profiler_options.breakdown_threshold_bytes !=
MemoryDumpConfig::HeapProfiler::kDefaultBreakdownThresholdBytes) {
Value options(Value::Type::DICTIONARY);
options.SetIntKey(
kBreakdownThresholdBytes,
memory_dump_config_.heap_profiler_options.breakdown_threshold_bytes);
memory_dump_config.SetKey(kHeapProfilerOptions, std::move(options));
}
dict.SetKey(kMemoryDumpConfigParam, std::move(memory_dump_config));
}
if (!histogram_names_.empty()) {
std::vector<Value> histogram_names;
for (const std::string& histogram_name : histogram_names_)
histogram_names.emplace_back(histogram_name);
dict.SetKey(kHistogramNamesParam, Value(std::move(histogram_names)));
}
if (enable_systrace_) {
if (!systrace_events_.empty()) {
std::vector<Value> systrace_events;
for (const std::string& systrace_event : systrace_events_)
systrace_events.emplace_back(systrace_event);
dict.SetKey(kSystraceEventsParam, Value(std::move(systrace_events)));
}
}
return dict;
}
void TraceConfig::EnableSystraceEvent(const std::string& systrace_event) {
systrace_events_.insert(systrace_event);
}
void TraceConfig::EnableHistogram(const std::string& histogram_name) {
histogram_names_.insert(histogram_name);
}
std::string TraceConfig::ToTraceOptionsString() const {
std::string ret;
switch (record_mode_) {
case RECORD_UNTIL_FULL:
ret = kRecordUntilFull;
break;
case RECORD_CONTINUOUSLY:
ret = kRecordContinuously;
break;
case RECORD_AS_MUCH_AS_POSSIBLE:
ret = kRecordAsMuchAsPossible;
break;
case ECHO_TO_CONSOLE:
ret = kTraceToConsole;
break;
default:
NOTREACHED();
}
if (enable_systrace_) {
ret += ",";
ret += kEnableSystrace;
bool first_param = true;
for (const std::string& systrace_event : systrace_events_) {
if (first_param) {
ret += "=";
first_param = false;
} else {
ret += " ";
}
ret = ret + systrace_event;
}
}
if (enable_argument_filter_) {
ret += ",";
ret += kEnableArgumentFilter;
}
return ret;
}
} // namespace trace_event
} // namespace base

View file

@ -0,0 +1,350 @@
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TRACE_EVENT_TRACE_CONFIG_H_
#define BASE_TRACE_EVENT_TRACE_CONFIG_H_
#include <stdint.h>
#include <memory>
#include <set>
#include <string>
#include <unordered_set>
#include <vector>
#include "base/base_export.h"
#include "base/gtest_prod_util.h"
#include "base/strings/string_piece.h"
#include "base/trace_event/memory_dump_request_args.h"
#include "base/trace_event/trace_config_category_filter.h"
#include "base/values.h"
namespace base {
namespace trace_event {
class ConvertableToTraceFormat;
// Options determines how the trace buffer stores data.
// A Java counterpart will be generated for this enum.
// GENERATED_JAVA_ENUM_PACKAGE: org.chromium.base
enum TraceRecordMode {
// Record until the trace buffer is full.
RECORD_UNTIL_FULL,
// Record until the user ends the trace. The trace buffer is a fixed size
// and we use it as a ring buffer during recording.
RECORD_CONTINUOUSLY,
// Record until the trace buffer is full, but with a huge buffer size.
RECORD_AS_MUCH_AS_POSSIBLE,
// Echo to console. Events are discarded.
ECHO_TO_CONSOLE,
};
class BASE_EXPORT TraceConfig {
public:
using StringList = std::vector<std::string>;
// Specifies the memory dump config for tracing.
// Used only when "memory-infra" category is enabled.
struct BASE_EXPORT MemoryDumpConfig {
MemoryDumpConfig();
MemoryDumpConfig(const MemoryDumpConfig& other);
~MemoryDumpConfig();
// Specifies the triggers in the memory dump config.
struct Trigger {
uint32_t min_time_between_dumps_ms;
MemoryDumpLevelOfDetail level_of_detail;
MemoryDumpType trigger_type;
};
// Specifies the configuration options for the heap profiler.
struct HeapProfiler {
// Default value for |breakdown_threshold_bytes|.
enum { kDefaultBreakdownThresholdBytes = 1024 };
HeapProfiler();
// Reset the options to default.
void Clear();
uint32_t breakdown_threshold_bytes;
};
// Reset the values in the config.
void Clear();
void Merge(const MemoryDumpConfig& config);
// Set of memory dump modes allowed for the tracing session. The explicitly
// triggered dumps will be successful only if the dump mode is allowed in
// the config.
std::set<MemoryDumpLevelOfDetail> allowed_dump_modes;
std::vector<Trigger> triggers;
HeapProfiler heap_profiler_options;
};
class BASE_EXPORT ProcessFilterConfig {
public:
ProcessFilterConfig();
explicit ProcessFilterConfig(
const std::unordered_set<base::ProcessId>& included_process_ids);
ProcessFilterConfig(const ProcessFilterConfig&);
~ProcessFilterConfig();
bool empty() const { return included_process_ids_.empty(); }
void Clear();
void Merge(const ProcessFilterConfig&);
void InitializeFromConfigDict(const Value&);
void ToDict(Value*) const;
bool IsEnabled(base::ProcessId) const;
const std::unordered_set<base::ProcessId>& included_process_ids() const {
return included_process_ids_;
}
bool operator==(const ProcessFilterConfig& other) const {
return included_process_ids_ == other.included_process_ids_;
}
private:
std::unordered_set<base::ProcessId> included_process_ids_;
};
class BASE_EXPORT EventFilterConfig {
public:
explicit EventFilterConfig(const std::string& predicate_name);
EventFilterConfig(const EventFilterConfig& tc);
~EventFilterConfig();
EventFilterConfig& operator=(const EventFilterConfig& rhs);
void InitializeFromConfigDict(const Value& event_filter);
void SetCategoryFilter(const TraceConfigCategoryFilter& category_filter);
void ToDict(Value* filter_dict) const;
bool GetArgAsSet(const char* key, std::unordered_set<std::string>*) const;
bool IsCategoryGroupEnabled(const StringPiece& category_group_name) const;
const std::string& predicate_name() const { return predicate_name_; }
const Value& filter_args() const { return args_; }
const TraceConfigCategoryFilter& category_filter() const {
return category_filter_;
}
private:
std::string predicate_name_;
TraceConfigCategoryFilter category_filter_;
Value args_;
};
typedef std::vector<EventFilterConfig> EventFilters;
static std::string TraceRecordModeToStr(TraceRecordMode record_mode);
TraceConfig();
// Create TraceConfig object from category filter and trace options strings.
//
// |category_filter_string| is a comma-delimited list of category wildcards.
// A category can have an optional '-' prefix to make it an excluded category.
// All the same rules apply above, so for example, having both included and
// excluded categories in the same list would not be supported.
//
// |trace_options_string| is a comma-delimited list of trace options.
// Possible options are: "record-until-full", "record-continuously",
// "record-as-much-as-possible", "trace-to-console", "enable-systrace" and
// "enable-argument-filter".
// The first 4 options are trace recoding modes and hence
// mutually exclusive. If more than one trace recording modes appear in the
// options_string, the last one takes precedence. If none of the trace
// recording mode is specified, recording mode is RECORD_UNTIL_FULL.
//
// The trace option will first be reset to the default option
// (record_mode set to RECORD_UNTIL_FULL, enable_systrace and
// enable_argument_filter set to false) before options parsed from
// |trace_options_string| are applied on it. If |trace_options_string| is
// invalid, the final state of trace options is undefined.
//
// Example: TraceConfig("test_MyTest*", "record-until-full");
// Example: TraceConfig("test_MyTest*,test_OtherStuff",
// "record-continuously");
// Example: TraceConfig("-excluded_category1,-excluded_category2",
// "record-until-full, trace-to-console");
// would set ECHO_TO_CONSOLE as the recording mode.
// Example: TraceConfig("-*,webkit", "");
// would disable everything but webkit; and use default options.
// Example: TraceConfig("-webkit", "");
// would enable everything but webkit; and use default options.
TraceConfig(StringPiece category_filter_string,
StringPiece trace_options_string);
TraceConfig(StringPiece category_filter_string, TraceRecordMode record_mode);
// Create TraceConfig object from the trace config string.
//
// |config_string| is a dictionary formatted as a JSON string, containing both
// category filters and trace options.
//
// Example:
// {
// "record_mode": "record-continuously",
// "enable_systrace": true,
// "enable_argument_filter": true,
// "included_categories": ["included",
// "inc_pattern*",
// "disabled-by-default-memory-infra"],
// "excluded_categories": ["excluded", "exc_pattern*"],
// "memory_dump_config": {
// "triggers": [
// {
// "mode": "detailed",
// "periodic_interval_ms": 2000
// }
// ]
// }
// }
//
// Note: memory_dump_config can be specified only if
// disabled-by-default-memory-infra category is enabled.
explicit TraceConfig(StringPiece config_string);
// Functionally identical to the above, but takes a parsed dictionary as input
// instead of its JSON serialization.
explicit TraceConfig(const Value& config);
TraceConfig(const TraceConfig& tc);
~TraceConfig();
TraceConfig& operator=(const TraceConfig& rhs);
TraceRecordMode GetTraceRecordMode() const { return record_mode_; }
size_t GetTraceBufferSizeInEvents() const {
return trace_buffer_size_in_events_;
}
size_t GetTraceBufferSizeInKb() const { return trace_buffer_size_in_kb_; }
bool IsSystraceEnabled() const { return enable_systrace_; }
bool IsArgumentFilterEnabled() const { return enable_argument_filter_; }
void SetTraceRecordMode(TraceRecordMode mode) { record_mode_ = mode; }
void SetTraceBufferSizeInEvents(size_t size) {
trace_buffer_size_in_events_ = size;
}
void SetTraceBufferSizeInKb(size_t size) { trace_buffer_size_in_kb_ = size; }
void EnableSystrace() { enable_systrace_ = true; }
void EnableSystraceEvent(const std::string& systrace_event);
void EnableArgumentFilter() { enable_argument_filter_ = true; }
void EnableHistogram(const std::string& histogram_name);
// Writes the string representation of the TraceConfig. The string is JSON
// formatted.
std::string ToString() const;
// Returns a copy of the TraceConfig wrapped in a ConvertableToTraceFormat
std::unique_ptr<ConvertableToTraceFormat> AsConvertableToTraceFormat() const;
// Write the string representation of the CategoryFilter part.
std::string ToCategoryFilterString() const;
// Write the string representation of the trace options part (record mode,
// systrace, argument filtering). Does not include category filters, event
// filters, or memory dump configs.
std::string ToTraceOptionsString() const;
// Returns true if at least one category in the list is enabled by this
// trace config. This is used to determine if the category filters are
// enabled in the TRACE_* macros.
bool IsCategoryGroupEnabled(const StringPiece& category_group_name) const;
// Merges config with the current TraceConfig
void Merge(const TraceConfig& config);
void Clear();
// Clears and resets the memory dump config.
void ResetMemoryDumpConfig(const MemoryDumpConfig& memory_dump_config);
const TraceConfigCategoryFilter& category_filter() const {
return category_filter_;
}
const MemoryDumpConfig& memory_dump_config() const {
return memory_dump_config_;
}
const ProcessFilterConfig& process_filter_config() const {
return process_filter_config_;
}
void SetProcessFilterConfig(const ProcessFilterConfig&);
const EventFilters& event_filters() const { return event_filters_; }
void SetEventFilters(const EventFilters& filter_configs) {
event_filters_ = filter_configs;
}
const std::unordered_set<std::string>& systrace_events() const {
return systrace_events_;
}
const std::unordered_set<std::string>& histogram_names() const {
return histogram_names_;
}
private:
FRIEND_TEST_ALL_PREFIXES(TraceConfigTest, TraceConfigFromValidLegacyFormat);
FRIEND_TEST_ALL_PREFIXES(TraceConfigTest,
TraceConfigFromInvalidLegacyStrings);
FRIEND_TEST_ALL_PREFIXES(TraceConfigTest, SystraceEventsSerialization);
// The default trace config, used when none is provided.
// Allows all non-disabled-by-default categories through, except if they end
// in the suffix 'Debug' or 'Test'.
void InitializeDefault();
// Initialize from a config dictionary.
void InitializeFromConfigDict(const Value& dict);
// Initialize from a config string.
void InitializeFromConfigString(StringPiece config_string);
// Initialize from category filter and trace options strings
void InitializeFromStrings(StringPiece category_filter_string,
StringPiece trace_options_string);
void SetMemoryDumpConfigFromConfigDict(const Value& memory_dump_config);
void SetDefaultMemoryDumpConfig();
void SetHistogramNamesFromConfigList(const Value& histogram_names);
void SetEventFiltersFromConfigList(const Value& event_filters);
Value ToValue() const;
TraceRecordMode record_mode_;
size_t trace_buffer_size_in_events_ = 0; // 0 specifies default size
size_t trace_buffer_size_in_kb_ = 0; // 0 specifies default size
bool enable_systrace_ : 1;
bool enable_argument_filter_ : 1;
TraceConfigCategoryFilter category_filter_;
MemoryDumpConfig memory_dump_config_;
ProcessFilterConfig process_filter_config_;
EventFilters event_filters_;
std::unordered_set<std::string> histogram_names_;
std::unordered_set<std::string> systrace_events_;
};
} // namespace trace_event
} // namespace base
#endif // BASE_TRACE_EVENT_TRACE_CONFIG_H_

View file

@ -0,0 +1,236 @@
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/trace_event/trace_config_category_filter.h"
#include "base/memory/ptr_util.h"
#include "base/strings/pattern.h"
#include "base/strings/string_split.h"
#include "base/strings/string_tokenizer.h"
#include "base/strings/string_util.h"
#include "base/strings/stringprintf.h"
#include "base/trace_event/trace_event.h"
namespace base {
namespace trace_event {
namespace {
const char kIncludedCategoriesParam[] = "included_categories";
const char kExcludedCategoriesParam[] = "excluded_categories";
}
TraceConfigCategoryFilter::TraceConfigCategoryFilter() = default;
TraceConfigCategoryFilter::TraceConfigCategoryFilter(
const TraceConfigCategoryFilter& other) = default;
TraceConfigCategoryFilter::~TraceConfigCategoryFilter() = default;
TraceConfigCategoryFilter& TraceConfigCategoryFilter::operator=(
const TraceConfigCategoryFilter& rhs) = default;
void TraceConfigCategoryFilter::InitializeFromString(
const StringPiece& category_filter_string) {
std::vector<StringPiece> split = SplitStringPiece(
category_filter_string, ",", TRIM_WHITESPACE, SPLIT_WANT_ALL);
for (const StringPiece& category : split) {
// Ignore empty categories.
if (category.empty())
continue;
if (category.front() == '-') {
// Excluded categories start with '-'.
// Remove '-' from category string.
excluded_categories_.push_back(category.substr(1).as_string());
} else if (category.starts_with(TRACE_DISABLED_BY_DEFAULT(""))) {
disabled_categories_.push_back(category.as_string());
} else {
included_categories_.push_back(category.as_string());
}
}
}
void TraceConfigCategoryFilter::InitializeFromConfigDict(const Value& dict) {
const Value* included_category_list =
dict.FindListKey(kIncludedCategoriesParam);
if (included_category_list)
SetCategoriesFromIncludedList(*included_category_list);
const Value* excluded_category_list =
dict.FindListKey(kExcludedCategoriesParam);
if (excluded_category_list)
SetCategoriesFromExcludedList(*excluded_category_list);
}
bool TraceConfigCategoryFilter::IsCategoryGroupEnabled(
const StringPiece& category_group_name) const {
bool had_enabled_by_default = false;
DCHECK(!category_group_name.empty());
CStringTokenizer category_group_tokens(category_group_name.begin(),
category_group_name.end(), ",");
while (category_group_tokens.GetNext()) {
StringPiece category_group_token = category_group_tokens.token_piece();
// Don't allow empty tokens, nor tokens with leading or trailing space.
DCHECK(IsCategoryNameAllowed(category_group_token))
<< "Disallowed category string";
if (IsCategoryEnabled(category_group_token))
return true;
if (!MatchPattern(category_group_token, TRACE_DISABLED_BY_DEFAULT("*")))
had_enabled_by_default = true;
}
// Do a second pass to check for explicitly disabled categories
// (those explicitly enabled have priority due to first pass).
category_group_tokens.Reset();
bool category_group_disabled = false;
while (category_group_tokens.GetNext()) {
StringPiece category_group_token = category_group_tokens.token_piece();
for (const std::string& category : excluded_categories_) {
if (MatchPattern(category_group_token, category)) {
// Current token of category_group_name is present in excluded_list.
// Flag the exclusion and proceed further to check if any of the
// remaining categories of category_group_name is not present in the
// excluded_ list.
category_group_disabled = true;
break;
}
// One of the category of category_group_name is not present in
// excluded_ list. So, if it's not a disabled-by-default category,
// it has to be included_ list. Enable the category_group_name
// for recording.
if (!MatchPattern(category_group_token, TRACE_DISABLED_BY_DEFAULT("*")))
category_group_disabled = false;
}
// One of the categories present in category_group_name is not present in
// excluded_ list. Implies this category_group_name group can be enabled
// for recording, since one of its groups is enabled for recording.
if (!category_group_disabled)
break;
}
// If the category group is not excluded, and there are no included patterns
// we consider this category group enabled, as long as it had categories
// other than disabled-by-default.
return !category_group_disabled && had_enabled_by_default &&
included_categories_.empty();
}
bool TraceConfigCategoryFilter::IsCategoryEnabled(
const StringPiece& category_name) const {
// Check the disabled- filters and the disabled-* wildcard first so that a
// "*" filter does not include the disabled.
for (const std::string& category : disabled_categories_) {
if (MatchPattern(category_name, category))
return true;
}
if (MatchPattern(category_name, TRACE_DISABLED_BY_DEFAULT("*")))
return false;
for (const std::string& category : included_categories_) {
if (MatchPattern(category_name, category))
return true;
}
return false;
}
void TraceConfigCategoryFilter::Merge(const TraceConfigCategoryFilter& config) {
// Keep included patterns only if both filters have an included entry.
// Otherwise, one of the filter was specifying "*" and we want to honor the
// broadest filter.
if (!included_categories_.empty() && !config.included_categories_.empty()) {
included_categories_.insert(included_categories_.end(),
config.included_categories_.begin(),
config.included_categories_.end());
} else {
included_categories_.clear();
}
disabled_categories_.insert(disabled_categories_.end(),
config.disabled_categories_.begin(),
config.disabled_categories_.end());
excluded_categories_.insert(excluded_categories_.end(),
config.excluded_categories_.begin(),
config.excluded_categories_.end());
}
void TraceConfigCategoryFilter::Clear() {
included_categories_.clear();
disabled_categories_.clear();
excluded_categories_.clear();
}
void TraceConfigCategoryFilter::ToDict(Value* dict) const {
StringList categories(included_categories_);
categories.insert(categories.end(), disabled_categories_.begin(),
disabled_categories_.end());
AddCategoriesToDict(categories, kIncludedCategoriesParam, dict);
AddCategoriesToDict(excluded_categories_, kExcludedCategoriesParam, dict);
}
std::string TraceConfigCategoryFilter::ToFilterString() const {
std::string filter_string;
WriteCategoryFilterString(included_categories_, &filter_string, true);
WriteCategoryFilterString(disabled_categories_, &filter_string, true);
WriteCategoryFilterString(excluded_categories_, &filter_string, false);
return filter_string;
}
void TraceConfigCategoryFilter::SetCategoriesFromIncludedList(
const Value& included_list) {
included_categories_.clear();
for (const Value& item : included_list.GetList()) {
if (!item.is_string())
continue;
const std::string& category = item.GetString();
if (category.compare(0, strlen(TRACE_DISABLED_BY_DEFAULT("")),
TRACE_DISABLED_BY_DEFAULT("")) == 0) {
disabled_categories_.push_back(category);
} else {
included_categories_.push_back(category);
}
}
}
void TraceConfigCategoryFilter::SetCategoriesFromExcludedList(
const Value& excluded_list) {
excluded_categories_.clear();
for (const Value& item : excluded_list.GetList()) {
if (item.is_string())
excluded_categories_.push_back(item.GetString());
}
}
void TraceConfigCategoryFilter::AddCategoriesToDict(
const StringList& categories,
const char* param,
Value* dict) const {
if (categories.empty())
return;
std::vector<base::Value> list;
for (const std::string& category : categories)
list.emplace_back(category);
dict->SetKey(param, base::Value(std::move(list)));
}
void TraceConfigCategoryFilter::WriteCategoryFilterString(
const StringList& values,
std::string* out,
bool included) const {
bool prepend_comma = !out->empty();
int token_cnt = 0;
for (const std::string& category : values) {
if (token_cnt > 0 || prepend_comma)
StringAppendF(out, ",");
StringAppendF(out, "%s%s", (included ? "" : "-"), category.c_str());
++token_cnt;
}
}
// static
bool TraceConfigCategoryFilter::IsCategoryNameAllowed(StringPiece str) {
return !str.empty() && str.front() != ' ' && str.back() != ' ';
}
} // namespace trace_event
} // namespace base

View file

@ -0,0 +1,81 @@
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TRACE_EVENT_TRACE_CONFIG_CATEGORY_FILTER_H_
#define BASE_TRACE_EVENT_TRACE_CONFIG_CATEGORY_FILTER_H_
#include <string>
#include <vector>
#include "base/base_export.h"
#include "base/strings/string_piece.h"
#include "base/values.h"
namespace base {
namespace trace_event {
// Configuration of categories enabled and disabled in TraceConfig.
class BASE_EXPORT TraceConfigCategoryFilter {
public:
using StringList = std::vector<std::string>;
TraceConfigCategoryFilter();
TraceConfigCategoryFilter(const TraceConfigCategoryFilter& other);
~TraceConfigCategoryFilter();
TraceConfigCategoryFilter& operator=(const TraceConfigCategoryFilter& rhs);
// Initializes from category filter string. See TraceConfig constructor for
// description of how to write category filter string.
void InitializeFromString(const StringPiece& category_filter_string);
// Initializes TraceConfigCategoryFilter object from the config dictionary.
void InitializeFromConfigDict(const Value& dict);
// Merges this with category filter config.
void Merge(const TraceConfigCategoryFilter& config);
void Clear();
// Returns true if at least one category in the list is enabled by this
// trace config. This is used to determine if the category filters are
// enabled in the TRACE_* macros.
bool IsCategoryGroupEnabled(const StringPiece& category_group_name) const;
// Returns true if the category is enabled according to this trace config.
// This tells whether a category is enabled from the TraceConfig's
// perspective. Please refer to IsCategoryGroupEnabled() to determine if a
// category is enabled from the tracing runtime's perspective.
bool IsCategoryEnabled(const StringPiece& category_name) const;
void ToDict(Value* dict) const;
std::string ToFilterString() const;
// Returns true if category name is a valid string.
static bool IsCategoryNameAllowed(StringPiece str);
const StringList& included_categories() const { return included_categories_; }
const StringList& excluded_categories() const { return excluded_categories_; }
private:
void SetCategoriesFromIncludedList(const Value& included_list);
void SetCategoriesFromExcludedList(const Value& excluded_list);
void AddCategoriesToDict(const StringList& categories,
const char* param,
Value* dict) const;
void WriteCategoryFilterString(const StringList& values,
std::string* out,
bool included) const;
StringList included_categories_;
StringList disabled_categories_;
StringList excluded_categories_;
};
} // namespace trace_event
} // namespace base
#endif // BASE_TRACE_EVENT_TRACE_CONFIG_CATEGORY_FILTER_H_

View file

@ -0,0 +1,151 @@
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TRACE_EVENT_TRACE_CONFIG_MEMORY_TEST_UTIL_H_
#define BASE_TRACE_EVENT_TRACE_CONFIG_MEMORY_TEST_UTIL_H_
#include "base/strings/stringprintf.h"
#include "base/trace_event/memory_dump_manager.h"
namespace base {
namespace trace_event {
class TraceConfigMemoryTestUtil {
public:
static std::string GetTraceConfig_LegacyPeriodicTriggers(int light_period,
int heavy_period) {
return StringPrintf(
"{"
"\"enable_argument_filter\":false,"
"\"enable_systrace\":false,"
"\"excluded_categories\":["
"\"*\""
"],"
"\"included_categories\":["
"\"%s\""
"],"
"\"memory_dump_config\":{"
"\"allowed_dump_modes\":[\"background\",\"light\",\"detailed\"],"
"\"heap_profiler_options\":{"
"\"breakdown_threshold_bytes\":2048"
"},"
"\"triggers\":["
"{"
"\"mode\":\"light\","
"\"periodic_interval_ms\":%d"
"},"
"{"
"\"mode\":\"detailed\","
"\"periodic_interval_ms\":%d"
"}"
"]"
"},"
"\"record_mode\":\"record-until-full\""
"}",
MemoryDumpManager::kTraceCategory, light_period, heavy_period);
}
static std::string GetTraceConfig_PeriodicTriggers(int light_period,
int heavy_period) {
return StringPrintf(
"{"
"\"enable_argument_filter\":false,"
"\"enable_systrace\":false,"
"\"excluded_categories\":["
"\"*\""
"],"
"\"included_categories\":["
"\"%s\""
"],"
"\"memory_dump_config\":{"
"\"allowed_dump_modes\":[\"background\",\"light\",\"detailed\"],"
"\"heap_profiler_options\":{"
"\"breakdown_threshold_bytes\":2048"
"},"
"\"triggers\":["
"{"
"\"min_time_between_dumps_ms\":%d,"
"\"mode\":\"light\","
"\"type\":\"periodic_interval\""
"},"
"{"
"\"min_time_between_dumps_ms\":%d,"
"\"mode\":\"detailed\","
"\"type\":\"periodic_interval\""
"}"
"]"
"},"
"\"record_mode\":\"record-until-full\""
"}",
MemoryDumpManager::kTraceCategory, light_period, heavy_period);
}
static std::string GetTraceConfig_EmptyTriggers() {
return StringPrintf(
"{"
"\"enable_argument_filter\":false,"
"\"enable_systrace\":false,"
"\"excluded_categories\":["
"\"*\""
"],"
"\"included_categories\":["
"\"%s\""
"],"
"\"memory_dump_config\":{"
"\"allowed_dump_modes\":[\"background\",\"light\",\"detailed\"],"
"\"triggers\":["
"]"
"},"
"\"record_mode\":\"record-until-full\""
"}",
MemoryDumpManager::kTraceCategory);
}
static std::string GetTraceConfig_NoTriggers() {
return StringPrintf(
"{"
"\"enable_argument_filter\":false,"
"\"enable_systrace\":false,"
"\"excluded_categories\":["
"\"*\""
"],"
"\"included_categories\":["
"\"%s\""
"],"
"\"record_mode\":\"record-until-full\""
"}",
MemoryDumpManager::kTraceCategory);
}
static std::string GetTraceConfig_BackgroundTrigger(int period_ms) {
return StringPrintf(
"{"
"\"enable_argument_filter\":false,"
"\"enable_systrace\":false,"
"\"excluded_categories\":["
"\"*\""
"],"
"\"included_categories\":["
"\"%s\""
"],"
"\"memory_dump_config\":{"
"\"allowed_dump_modes\":[\"background\"],"
"\"triggers\":["
"{"
"\"min_time_between_dumps_ms\":%d,"
"\"mode\":\"background\","
"\"type\":\"periodic_interval\""
"}"
"]"
"},"
"\"record_mode\":\"record-until-full\""
"}",
MemoryDumpManager::kTraceCategory, period_ms);
}
};
} // namespace trace_event
} // namespace base
#endif // BASE_TRACE_EVENT_TRACE_CONFIG_MEMORY_TEST_UTIL_H_

View file

@ -0,0 +1,837 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TRACE_EVENT_TRACE_EVENT_H_
#define BASE_TRACE_EVENT_TRACE_EVENT_H_
// This header file defines implementation details of how the trace macros in
// trace_event_common.h collect and store trace events. Anything not
// implementation-specific should go in trace_event_common.h instead of here.
#include <stddef.h>
#include <stdint.h>
#include <string>
#include "base/atomicops.h"
#include "base/debug/debugging_buildflags.h"
#include "base/macros.h"
#include "base/time/time.h"
#include "base/time/time_override.h"
#include "base/trace_event/builtin_categories.h"
#include "base/trace_event/common/trace_event_common.h"
#include "base/trace_event/heap_profiler.h"
#include "base/trace_event/log_message.h"
#include "base/trace_event/thread_instruction_count.h"
#include "base/trace_event/trace_arguments.h"
#include "base/trace_event/trace_category.h"
#include "base/trace_event/trace_log.h"
#include "build/build_config.h"
// By default, const char* argument values are assumed to have long-lived scope
// and will not be copied. Use this macro to force a const char* to be copied.
#define TRACE_STR_COPY(str) ::base::trace_event::TraceStringWithCopy(str)
// By default, trace IDs are eventually converted to a single 64-bit number. Use
// this macro to add a scope string. For example,
//
// TRACE_EVENT_NESTABLE_ASYNC_BEGIN0(
// "network", "ResourceLoad",
// TRACE_ID_WITH_SCOPE("BlinkResourceID", resourceID));
//
// Also, it is possible to prepend the ID with another number, like the process
// ID. This is useful in creating IDs that are unique among all processes. To do
// that, pass two numbers after the scope string instead of one. For example,
//
// TRACE_EVENT_NESTABLE_ASYNC_BEGIN0(
// "network", "ResourceLoad",
// TRACE_ID_WITH_SCOPE("BlinkResourceID", pid, resourceID));
#define TRACE_ID_WITH_SCOPE(scope, ...) \
trace_event_internal::TraceID::WithScope(scope, ##__VA_ARGS__)
// Use this for ids that are unique across processes. This allows different
// processes to use the same id to refer to the same event.
#define TRACE_ID_GLOBAL(id) trace_event_internal::TraceID::GlobalId(id)
// Use this for ids that are unique within a single process. This allows
// different processes to use the same id to refer to different events.
#define TRACE_ID_LOCAL(id) trace_event_internal::TraceID::LocalId(id)
#define TRACE_EVENT_API_CURRENT_THREAD_ID \
static_cast<int>(base::PlatformThread::CurrentId())
#define INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE() \
UNLIKELY(*INTERNAL_TRACE_EVENT_UID(category_group_enabled) & \
(base::trace_event::TraceCategory::ENABLED_FOR_RECORDING | \
base::trace_event::TraceCategory::ENABLED_FOR_ETW_EXPORT))
#define INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED() \
UNLIKELY(*INTERNAL_TRACE_EVENT_UID(category_group_enabled) & \
(base::trace_event::TraceCategory::ENABLED_FOR_RECORDING | \
base::trace_event::TraceCategory::ENABLED_FOR_ETW_EXPORT | \
base::trace_event::TraceCategory::ENABLED_FOR_FILTERING))
////////////////////////////////////////////////////////////////////////////////
// Implementation specific tracing API definitions.
// Get a pointer to the enabled state of the given trace category. Only
// long-lived literal strings should be given as the category group. The
// returned pointer can be held permanently in a local static for example. If
// the unsigned char is non-zero, tracing is enabled. If tracing is enabled,
// TRACE_EVENT_API_ADD_TRACE_EVENT can be called. It's OK if tracing is disabled
// between the load of the tracing state and the call to
// TRACE_EVENT_API_ADD_TRACE_EVENT, because this flag only provides an early out
// for best performance when tracing is disabled.
// const unsigned char*
// TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED(const char* category_group)
#define TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED \
base::trace_event::TraceLog::GetCategoryGroupEnabled
// Get the number of times traces have been recorded. This is used to implement
// the TRACE_EVENT_IS_NEW_TRACE facility.
// unsigned int TRACE_EVENT_API_GET_NUM_TRACES_RECORDED()
#define TRACE_EVENT_API_GET_NUM_TRACES_RECORDED \
trace_event_internal::GetNumTracesRecorded
// Add a trace event to the platform tracing system.
// base::trace_event::TraceEventHandle TRACE_EVENT_API_ADD_TRACE_EVENT(
// char phase,
// const unsigned char* category_group_enabled,
// const char* name,
// const char* scope,
// unsigned long long id,
// base::trace_event::TraceArguments* args,
// unsigned int flags)
#define TRACE_EVENT_API_ADD_TRACE_EVENT trace_event_internal::AddTraceEvent
// Add a trace event to the platform tracing system.
// base::trace_event::TraceEventHandle
// TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_BIND_ID(
// char phase,
// const unsigned char* category_group_enabled,
// const char* name,
// const char* scope,
// unsigned long long id,
// unsigned long long bind_id,
// base::trace_event::TraceArguments* args,
// unsigned int flags)
#define TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_BIND_ID \
trace_event_internal::AddTraceEventWithBindId
// Add a trace event to the platform tracing system overriding the pid.
// The resulting event will have tid = pid == (process_id passed here).
// base::trace_event::TraceEventHandle
// TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_PROCESS_ID(
// char phase,
// const unsigned char* category_group_enabled,
// const char* name,
// const char* scope,
// unsigned long long id,
// int process_id,
// base::trace_event::TraceArguments* args,
// unsigned int flags)
#define TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_PROCESS_ID \
trace_event_internal::AddTraceEventWithProcessId
// Add a trace event to the platform tracing system.
// base::trace_event::TraceEventHandle
// TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_TIMESTAMP(
// char phase,
// const unsigned char* category_group_enabled,
// const char* name,
// const char* scope,
// unsigned long long id,
// int thread_id,
// const TimeTicks& timestamp,
// base::trace_event::TraceArguments* args,
// unsigned int flags)
#define TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP \
trace_event_internal::AddTraceEventWithThreadIdAndTimestamp
// Set the duration field of a COMPLETE trace event.
// void TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(
// const unsigned char* category_group_enabled,
// const char* name,
// base::trace_event::TraceEventHandle id)
#define TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION \
trace_event_internal::UpdateTraceEventDuration
// Set the duration field of a COMPLETE trace event.
// void TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION_EXPLICIT(
// const unsigned char* category_group_enabled,
// const char* name,
// base::trace_event::TraceEventHandle id,
// int thread_id,
// bool explicit_timestamps,
// const base::TimeTicks& now,
// const base::ThreadTicks& thread_now,
// base::trace_event::ThreadInstructionCount thread_instruction_now)
#define TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION_EXPLICIT \
trace_event_internal::UpdateTraceEventDurationExplicit
// Adds a metadata event to the trace log. The |AppendValueAsTraceFormat| method
// on the convertable value will be called at flush time.
// TRACE_EVENT_API_ADD_METADATA_EVENT(
// const unsigned char* category_group_enabled,
// const char* event_name,
// const char* arg_name,
// std::unique_ptr<ConvertableToTraceFormat> arg_value)
#define TRACE_EVENT_API_ADD_METADATA_EVENT \
trace_event_internal::AddMetadataEvent
// Defines atomic operations used internally by the tracing system.
#define TRACE_EVENT_API_ATOMIC_WORD base::subtle::AtomicWord
#define TRACE_EVENT_API_ATOMIC_LOAD(var) base::subtle::NoBarrier_Load(&(var))
#define TRACE_EVENT_API_ATOMIC_STORE(var, value) \
base::subtle::NoBarrier_Store(&(var), (value))
// Defines visibility for classes in trace_event.h
#define TRACE_EVENT_API_CLASS_EXPORT BASE_EXPORT
////////////////////////////////////////////////////////////////////////////////
// Implementation detail: trace event macros create temporary variables
// to keep instrumentation overhead low. These macros give each temporary
// variable a unique name based on the line number to prevent name collisions.
#define INTERNAL_TRACE_EVENT_UID3(a,b) \
trace_event_unique_##a##b
#define INTERNAL_TRACE_EVENT_UID2(a,b) \
INTERNAL_TRACE_EVENT_UID3(a,b)
#define INTERNAL_TRACE_EVENT_UID(name_prefix) \
INTERNAL_TRACE_EVENT_UID2(name_prefix, __LINE__)
// Implementation detail: internal macro to create static category.
// No barriers are needed, because this code is designed to operate safely
// even when the unsigned char* points to garbage data (which may be the case
// on processors without cache coherency).
#define INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO_CUSTOM_VARIABLES( \
category_group, atomic, category_group_enabled) \
category_group_enabled = reinterpret_cast<const unsigned char*>( \
TRACE_EVENT_API_ATOMIC_LOAD(atomic)); \
if (UNLIKELY(!category_group_enabled)) { \
category_group_enabled = \
TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED(category_group); \
TRACE_EVENT_API_ATOMIC_STORE( \
atomic, reinterpret_cast<TRACE_EVENT_API_ATOMIC_WORD>( \
category_group_enabled)); \
}
#define INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO_MAYBE_AT_COMPILE_TIME( \
category_group, k_category_group_enabled, category_group_enabled) \
if (k_category_group_enabled) { \
category_group_enabled = k_category_group_enabled; \
} else { \
static TRACE_EVENT_API_ATOMIC_WORD INTERNAL_TRACE_EVENT_UID(atomic) = 0; \
INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO_CUSTOM_VARIABLES( \
category_group, INTERNAL_TRACE_EVENT_UID(atomic), \
category_group_enabled); \
}
#define INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group) \
static_assert( \
base::trace_event::BuiltinCategories::IsAllowedCategory(category_group), \
"Unknown tracing category is used. Please register your " \
"category in base/trace_event/builtin_categories.h"); \
constexpr const unsigned char* INTERNAL_TRACE_EVENT_UID( \
k_category_group_enabled) = \
base::trace_event::TraceLog::GetBuiltinCategoryEnabled(category_group); \
const unsigned char* INTERNAL_TRACE_EVENT_UID(category_group_enabled); \
INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO_MAYBE_AT_COMPILE_TIME( \
category_group, INTERNAL_TRACE_EVENT_UID(k_category_group_enabled), \
INTERNAL_TRACE_EVENT_UID(category_group_enabled));
// Implementation detail: internal macro to return unoverridden
// base::TimeTicks::Now(). This is important because in headless VirtualTime can
// override base:TimeTicks::Now().
#define INTERNAL_TRACE_TIME_TICKS_NOW() \
base::subtle::TimeTicksNowIgnoringOverride()
// Implementation detail: internal macro to return unoverridden
// base::Time::Now(). This is important because in headless VirtualTime can
// override base:TimeTicks::Now().
#define INTERNAL_TRACE_TIME_NOW() base::subtle::TimeNowIgnoringOverride()
// Implementation detail: internal macro to create static category and add
// event if the category is enabled.
#define INTERNAL_TRACE_EVENT_ADD(phase, category_group, name, flags, ...) \
do { \
INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED()) { \
trace_event_internal::AddTraceEvent( \
phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
trace_event_internal::kGlobalScope, trace_event_internal::kNoId, \
flags, trace_event_internal::kNoId, ##__VA_ARGS__); \
} \
} while (0)
// Implementation detail: internal macro to create static category and add begin
// event if the category is enabled. Also adds the end event when the scope
// ends.
#define INTERNAL_TRACE_EVENT_ADD_SCOPED(category_group, name, ...) \
INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
trace_event_internal::ScopedTracer INTERNAL_TRACE_EVENT_UID(tracer); \
if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED()) { \
base::trace_event::TraceEventHandle h = \
trace_event_internal::AddTraceEvent( \
TRACE_EVENT_PHASE_COMPLETE, \
INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
trace_event_internal::kGlobalScope, trace_event_internal::kNoId, \
TRACE_EVENT_FLAG_NONE, trace_event_internal::kNoId, \
##__VA_ARGS__); \
INTERNAL_TRACE_EVENT_UID(tracer).Initialize( \
INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, h); \
}
#define INTERNAL_TRACE_EVENT_ADD_SCOPED_WITH_FLAGS(category_group, name, \
flags, ...) \
INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
trace_event_internal::ScopedTracer INTERNAL_TRACE_EVENT_UID(tracer); \
if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED()) { \
base::trace_event::TraceEventHandle h = \
trace_event_internal::AddTraceEvent( \
TRACE_EVENT_PHASE_COMPLETE, \
INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
trace_event_internal::kGlobalScope, trace_event_internal::kNoId, \
flags, trace_event_internal::kNoId, ##__VA_ARGS__); \
INTERNAL_TRACE_EVENT_UID(tracer).Initialize( \
INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, h); \
}
#define INTERNAL_TRACE_EVENT_ADD_SCOPED_WITH_FLOW(category_group, name, \
bind_id, flow_flags, ...) \
INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
trace_event_internal::ScopedTracer INTERNAL_TRACE_EVENT_UID(tracer); \
if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED()) { \
trace_event_internal::TraceID trace_event_bind_id((bind_id)); \
unsigned int trace_event_flags = \
flow_flags | trace_event_bind_id.id_flags(); \
base::trace_event::TraceEventHandle h = \
trace_event_internal::AddTraceEvent( \
TRACE_EVENT_PHASE_COMPLETE, \
INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
trace_event_internal::kGlobalScope, trace_event_internal::kNoId, \
trace_event_flags, trace_event_bind_id.raw_id(), ##__VA_ARGS__); \
INTERNAL_TRACE_EVENT_UID(tracer).Initialize( \
INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, h); \
}
// Implementation detail: internal macro to create static category and add
// event if the category is enabled.
#define INTERNAL_TRACE_EVENT_ADD_WITH_ID(phase, category_group, name, id, \
flags, ...) \
do { \
INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED()) { \
trace_event_internal::TraceID trace_event_trace_id((id)); \
unsigned int trace_event_flags = \
flags | trace_event_trace_id.id_flags(); \
trace_event_internal::AddTraceEvent( \
phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
trace_event_trace_id.scope(), trace_event_trace_id.raw_id(), \
trace_event_flags, trace_event_internal::kNoId, ##__VA_ARGS__); \
} \
} while (0)
// Implementation detail: internal macro to create static category and add
// event if the category is enabled.
#define INTERNAL_TRACE_EVENT_ADD_WITH_TIMESTAMP(phase, category_group, name, \
timestamp, flags, ...) \
do { \
INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED()) { \
trace_event_internal::AddTraceEventWithThreadIdAndTimestamp( \
phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
trace_event_internal::kGlobalScope, trace_event_internal::kNoId, \
TRACE_EVENT_API_CURRENT_THREAD_ID, timestamp, \
flags | TRACE_EVENT_FLAG_EXPLICIT_TIMESTAMP, \
trace_event_internal::kNoId, ##__VA_ARGS__); \
} \
} while (0)
// Implementation detail: internal macro to create static category and add
// event if the category is enabled.
#define INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMP( \
phase, category_group, name, id, thread_id, timestamp, flags, ...) \
do { \
INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED()) { \
trace_event_internal::TraceID trace_event_trace_id((id)); \
unsigned int trace_event_flags = \
flags | trace_event_trace_id.id_flags(); \
trace_event_internal::AddTraceEventWithThreadIdAndTimestamp( \
phase, INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
trace_event_trace_id.scope(), trace_event_trace_id.raw_id(), \
thread_id, timestamp, \
trace_event_flags | TRACE_EVENT_FLAG_EXPLICIT_TIMESTAMP, \
trace_event_internal::kNoId, ##__VA_ARGS__); \
} \
} while (0)
// Implementation detail: internal macro to create static category and add
// event if the category is enabled.
#define INTERNAL_TRACE_EVENT_ADD_WITH_ID_TID_AND_TIMESTAMPS( \
category_group, name, id, thread_id, begin_timestamp, end_timestamp, \
thread_end_timestamp, flags, ...) \
do { \
INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED()) { \
trace_event_internal::TraceID trace_event_trace_id((id)); \
unsigned int trace_event_flags = \
flags | trace_event_trace_id.id_flags(); \
const unsigned char* uid_category_group_enabled = \
INTERNAL_TRACE_EVENT_UID(category_group_enabled); \
auto handle = \
trace_event_internal::AddTraceEventWithThreadIdAndTimestamp( \
TRACE_EVENT_PHASE_COMPLETE, uid_category_group_enabled, name, \
trace_event_trace_id.scope(), trace_event_trace_id.raw_id(), \
thread_id, begin_timestamp, \
trace_event_flags | TRACE_EVENT_FLAG_EXPLICIT_TIMESTAMP, \
trace_event_internal::kNoId, ##__VA_ARGS__); \
TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION_EXPLICIT( \
uid_category_group_enabled, name, handle, thread_id, \
/*explicit_timestamps=*/true, end_timestamp, thread_end_timestamp, \
base::trace_event::ThreadInstructionCount()); \
} \
} while (0)
// Implementation detail: internal macro to create static category and add
// metadata event if the category is enabled.
#define INTERNAL_TRACE_EVENT_METADATA_ADD(category_group, name, ...) \
do { \
INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \
if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED()) { \
TRACE_EVENT_API_ADD_METADATA_EVENT( \
INTERNAL_TRACE_EVENT_UID(category_group_enabled), name, \
##__VA_ARGS__); \
} \
} while (0)
#define INTERNAL_TRACE_LOG_MESSAGE(file, message, line) \
TRACE_EVENT_INSTANT1( \
"log", "LogMessage", \
TRACE_EVENT_FLAG_TYPED_PROTO_ARGS | TRACE_EVENT_SCOPE_THREAD, "message", \
std::make_unique<base::trace_event::LogMessage>(file, message, line))
#if BUILDFLAG(ENABLE_LOCATION_SOURCE)
// Implementation detail: internal macro to trace a task execution with the
// location where it was posted from.
//
// This implementation is for when location sources are available.
// TODO(ssid): The program counter of the current task should be added here.
#define INTERNAL_TRACE_TASK_EXECUTION(run_function, task) \
INTERNAL_TRACE_EVENT_ADD_SCOPED_WITH_FLAGS( \
"toplevel", run_function, TRACE_EVENT_FLAG_TYPED_PROTO_ARGS, "src_file", \
(task).posted_from.file_name(), "src_func", \
(task).posted_from.function_name()); \
TRACE_HEAP_PROFILER_API_SCOPED_TASK_EXECUTION INTERNAL_TRACE_EVENT_UID( \
task_event)((task).posted_from.file_name()); \
TRACE_HEAP_PROFILER_API_SCOPED_WITH_PROGRAM_COUNTER \
INTERNAL_TRACE_EVENT_UID(task_pc_event)((task).posted_from.program_counter());
#else
// TODO(http://crbug.com760702) remove file name and just pass the program
// counter to the heap profiler macro.
// TODO(ssid): The program counter of the current task should be added here.
#define INTERNAL_TRACE_TASK_EXECUTION(run_function, task) \
INTERNAL_TRACE_EVENT_ADD_SCOPED_WITH_FLAGS( \
"toplevel", run_function, TRACE_EVENT_FLAG_TYPED_PROTO_ARGS, "src", \
(task).posted_from.ToString()) \
TRACE_HEAP_PROFILER_API_SCOPED_TASK_EXECUTION INTERNAL_TRACE_EVENT_UID( \
task_event)((task).posted_from.file_name()); \
TRACE_HEAP_PROFILER_API_SCOPED_WITH_PROGRAM_COUNTER \
INTERNAL_TRACE_EVENT_UID(task_pc_event)((task).posted_from.program_counter());
#endif
namespace trace_event_internal {
// Specify these values when the corresponding argument of AddTraceEvent is not
// used.
const int kZeroNumArgs = 0;
const std::nullptr_t kGlobalScope = nullptr;
const unsigned long long kNoId = 0;
// TraceID encapsulates an ID that can either be an integer or pointer.
class BASE_EXPORT TraceID {
public:
// Can be combined with WithScope.
class LocalId {
public:
explicit LocalId(const void* raw_id)
: raw_id_(static_cast<unsigned long long>(
reinterpret_cast<uintptr_t>(raw_id))) {}
explicit LocalId(unsigned long long raw_id) : raw_id_(raw_id) {}
unsigned long long raw_id() const { return raw_id_; }
private:
unsigned long long raw_id_;
};
// Can be combined with WithScope.
class GlobalId {
public:
explicit GlobalId(unsigned long long raw_id) : raw_id_(raw_id) {}
unsigned long long raw_id() const { return raw_id_; }
private:
unsigned long long raw_id_;
};
class WithScope {
public:
WithScope(const char* scope, unsigned long long raw_id)
: scope_(scope), raw_id_(raw_id) {}
WithScope(const char* scope, LocalId local_id)
: scope_(scope), raw_id_(local_id.raw_id()) {
id_flags_ = TRACE_EVENT_FLAG_HAS_LOCAL_ID;
}
WithScope(const char* scope, GlobalId global_id)
: scope_(scope), raw_id_(global_id.raw_id()) {
id_flags_ = TRACE_EVENT_FLAG_HAS_GLOBAL_ID;
}
unsigned long long raw_id() const { return raw_id_; }
const char* scope() const { return scope_; }
unsigned int id_flags() const { return id_flags_; }
private:
const char* scope_ = nullptr;
unsigned long long raw_id_;
unsigned int id_flags_ = TRACE_EVENT_FLAG_HAS_ID;
};
TraceID(const void* raw_id) : raw_id_(static_cast<unsigned long long>(
reinterpret_cast<uintptr_t>(raw_id))) {
id_flags_ = TRACE_EVENT_FLAG_HAS_LOCAL_ID;
}
TraceID(unsigned long long raw_id) : raw_id_(raw_id) {}
TraceID(unsigned long raw_id) : raw_id_(raw_id) {}
TraceID(unsigned int raw_id) : raw_id_(raw_id) {}
TraceID(unsigned short raw_id) : raw_id_(raw_id) {}
TraceID(unsigned char raw_id) : raw_id_(raw_id) {}
TraceID(long long raw_id)
: raw_id_(static_cast<unsigned long long>(raw_id)) {}
TraceID(long raw_id)
: raw_id_(static_cast<unsigned long long>(raw_id)) {}
TraceID(int raw_id)
: raw_id_(static_cast<unsigned long long>(raw_id)) {}
TraceID(short raw_id)
: raw_id_(static_cast<unsigned long long>(raw_id)) {}
TraceID(signed char raw_id)
: raw_id_(static_cast<unsigned long long>(raw_id)) {}
TraceID(LocalId raw_id) : raw_id_(raw_id.raw_id()) {
id_flags_ = TRACE_EVENT_FLAG_HAS_LOCAL_ID;
}
TraceID(GlobalId raw_id) : raw_id_(raw_id.raw_id()) {
id_flags_ = TRACE_EVENT_FLAG_HAS_GLOBAL_ID;
}
TraceID(WithScope scoped_id)
: scope_(scoped_id.scope()),
raw_id_(scoped_id.raw_id()),
id_flags_(scoped_id.id_flags()) {}
unsigned long long raw_id() const { return raw_id_; }
const char* scope() const { return scope_; }
unsigned int id_flags() const { return id_flags_; }
private:
const char* scope_ = nullptr;
unsigned long long raw_id_;
unsigned int id_flags_ = TRACE_EVENT_FLAG_HAS_ID;
};
// These functions all internally call
// base::trace_event::TraceLog::GetInstance() then call the method with the same
// name on it. This is used to reduce the generated machine code at each
// TRACE_EVENTXXX macro call.
base::trace_event::TraceEventHandle BASE_EXPORT
AddTraceEvent(char phase,
const unsigned char* category_group_enabled,
const char* name,
const char* scope,
unsigned long long id,
base::trace_event::TraceArguments* args,
unsigned int flags);
base::trace_event::TraceEventHandle BASE_EXPORT
AddTraceEventWithBindId(char phase,
const unsigned char* category_group_enabled,
const char* name,
const char* scope,
unsigned long long id,
unsigned long long bind_id,
base::trace_event::TraceArguments* args,
unsigned int flags);
base::trace_event::TraceEventHandle BASE_EXPORT
AddTraceEventWithProcessId(char phase,
const unsigned char* category_group_enabled,
const char* name,
const char* scope,
unsigned long long id,
int process_id,
base::trace_event::TraceArguments* args,
unsigned int flags);
base::trace_event::TraceEventHandle BASE_EXPORT
AddTraceEventWithThreadIdAndTimestamp(
char phase,
const unsigned char* category_group_enabled,
const char* name,
const char* scope,
unsigned long long id,
int thread_id,
const base::TimeTicks& timestamp,
base::trace_event::TraceArguments* args,
unsigned int flags);
base::trace_event::TraceEventHandle BASE_EXPORT
AddTraceEventWithThreadIdAndTimestamp(
char phase,
const unsigned char* category_group_enabled,
const char* name,
const char* scope,
unsigned long long id,
unsigned long long bind_id,
int thread_id,
const base::TimeTicks& timestamp,
base::trace_event::TraceArguments* args,
unsigned int flags);
void BASE_EXPORT AddMetadataEvent(const unsigned char* category_group_enabled,
const char* name,
base::trace_event::TraceArguments* args,
unsigned int flags);
int BASE_EXPORT GetNumTracesRecorded();
void BASE_EXPORT
UpdateTraceEventDuration(const unsigned char* category_group_enabled,
const char* name,
base::trace_event::TraceEventHandle handle);
void BASE_EXPORT UpdateTraceEventDurationExplicit(
const unsigned char* category_group_enabled,
const char* name,
base::trace_event::TraceEventHandle handle,
int thread_id,
bool explicit_timestamps,
const base::TimeTicks& now,
const base::ThreadTicks& thread_now,
base::trace_event::ThreadInstructionCount thread_instruction_now);
// These AddTraceEvent and AddTraceEventWithThreadIdAndTimestamp template
// functions are defined here instead of in the macro, because the arg_values
// could be temporary objects, such as std::string. In order to store
// pointers to the internal c_str and pass through to the tracing API,
// the arg_values must live throughout these procedures.
template <class ARG1_TYPE>
static inline base::trace_event::TraceEventHandle
AddTraceEventWithThreadIdAndTimestamp(
char phase,
const unsigned char* category_group_enabled,
const char* name,
const char* scope,
unsigned long long id,
int thread_id,
const base::TimeTicks& timestamp,
unsigned int flags,
unsigned long long bind_id,
const char* arg1_name,
ARG1_TYPE&& arg1_val) {
base::trace_event::TraceArguments args(arg1_name,
std::forward<ARG1_TYPE>(arg1_val));
return TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP(
phase, category_group_enabled, name, scope, id, bind_id, thread_id,
timestamp, &args, flags);
}
template <class ARG1_TYPE, class ARG2_TYPE>
static inline base::trace_event::TraceEventHandle
AddTraceEventWithThreadIdAndTimestamp(
char phase,
const unsigned char* category_group_enabled,
const char* name,
const char* scope,
unsigned long long id,
int thread_id,
const base::TimeTicks& timestamp,
unsigned int flags,
unsigned long long bind_id,
const char* arg1_name,
ARG1_TYPE&& arg1_val,
const char* arg2_name,
ARG2_TYPE&& arg2_val) {
base::trace_event::TraceArguments args(
arg1_name, std::forward<ARG1_TYPE>(arg1_val), arg2_name,
std::forward<ARG2_TYPE>(arg2_val));
return TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP(
phase, category_group_enabled, name, scope, id, bind_id, thread_id,
timestamp, &args, flags);
}
static inline base::trace_event::TraceEventHandle
AddTraceEventWithThreadIdAndTimestamp(
char phase,
const unsigned char* category_group_enabled,
const char* name,
const char* scope,
unsigned long long id,
int thread_id,
const base::TimeTicks& timestamp,
unsigned int flags,
unsigned long long bind_id) {
return TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP(
phase, category_group_enabled, name, scope, id, bind_id, thread_id,
timestamp, nullptr, flags);
}
static inline base::trace_event::TraceEventHandle AddTraceEvent(
char phase,
const unsigned char* category_group_enabled,
const char* name,
const char* scope,
unsigned long long id,
unsigned int flags,
unsigned long long bind_id) {
const int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
const base::TimeTicks now = TRACE_TIME_TICKS_NOW();
return AddTraceEventWithThreadIdAndTimestamp(
phase, category_group_enabled, name, scope, id, thread_id, now, flags,
bind_id);
}
template <class ARG1_TYPE>
static inline base::trace_event::TraceEventHandle AddTraceEvent(
char phase,
const unsigned char* category_group_enabled,
const char* name,
const char* scope,
unsigned long long id,
unsigned int flags,
unsigned long long bind_id,
const char* arg1_name,
ARG1_TYPE&& arg1_val) {
int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
base::TimeTicks now = TRACE_TIME_TICKS_NOW();
return AddTraceEventWithThreadIdAndTimestamp(
phase, category_group_enabled, name, scope, id, thread_id, now, flags,
bind_id, arg1_name, std::forward<ARG1_TYPE>(arg1_val));
}
template <class ARG1_TYPE, class ARG2_TYPE>
static inline base::trace_event::TraceEventHandle AddTraceEvent(
char phase,
const unsigned char* category_group_enabled,
const char* name,
const char* scope,
unsigned long long id,
unsigned int flags,
unsigned long long bind_id,
const char* arg1_name,
ARG1_TYPE&& arg1_val,
const char* arg2_name,
ARG2_TYPE&& arg2_val) {
int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
base::TimeTicks now = TRACE_TIME_TICKS_NOW();
return AddTraceEventWithThreadIdAndTimestamp(
phase, category_group_enabled, name, scope, id, thread_id, now, flags,
bind_id, arg1_name, std::forward<ARG1_TYPE>(arg1_val), arg2_name,
std::forward<ARG2_TYPE>(arg2_val));
}
template <class ARG1_TYPE>
static void AddMetadataEvent(const unsigned char* category_group_enabled,
const char* event_name,
const char* arg_name,
ARG1_TYPE&& arg_val) {
base::trace_event::TraceArguments args(arg_name,
std::forward<ARG1_TYPE>(arg_val));
trace_event_internal::AddMetadataEvent(category_group_enabled, event_name,
&args, TRACE_EVENT_FLAG_NONE);
}
// Used by TRACE_EVENTx macros. Do not use directly.
class TRACE_EVENT_API_CLASS_EXPORT ScopedTracer {
public:
ScopedTracer() = default;
~ScopedTracer() {
if (category_group_enabled_ && *category_group_enabled_) {
TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(category_group_enabled_,
name_, event_handle_);
}
}
void Initialize(const unsigned char* category_group_enabled,
const char* name,
base::trace_event::TraceEventHandle event_handle) {
category_group_enabled_ = category_group_enabled;
name_ = name;
event_handle_ = event_handle;
}
private:
// NOTE: Only initialize the first member to reduce generated code size,
// since there is no point in initializing the other members if Initialize()
// is never called.
const unsigned char* category_group_enabled_ = nullptr;
const char* name_;
base::trace_event::TraceEventHandle event_handle_;
};
// Used by TRACE_EVENT_BINARY_EFFICIENTx macro. Do not use directly.
class TRACE_EVENT_API_CLASS_EXPORT ScopedTraceBinaryEfficient {
public:
ScopedTraceBinaryEfficient(const char* category_group, const char* name);
~ScopedTraceBinaryEfficient();
private:
const unsigned char* category_group_enabled_;
const char* name_;
base::trace_event::TraceEventHandle event_handle_;
};
// This macro generates less code then TRACE_EVENT0 but is also
// slower to execute when tracing is off. It should generally only be
// used with code that is seldom executed or conditionally executed
// when debugging.
// For now the category_group must be "gpu".
#define TRACE_EVENT_BINARY_EFFICIENT0(category_group, name) \
trace_event_internal::ScopedTraceBinaryEfficient \
INTERNAL_TRACE_EVENT_UID(scoped_trace)(category_group, name);
} // namespace trace_event_internal
namespace base {
namespace trace_event {
template <typename IDType, const char* category>
class TraceScopedTrackableObject {
public:
TraceScopedTrackableObject(const char* name, IDType id)
: name_(name), id_(id) {
TRACE_EVENT_OBJECT_CREATED_WITH_ID(category, name_, id_);
}
template <typename ArgType> void snapshot(ArgType snapshot) {
TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(category, name_, id_, snapshot);
}
~TraceScopedTrackableObject() {
TRACE_EVENT_OBJECT_DELETED_WITH_ID(category, name_, id_);
}
private:
const char* name_;
IDType id_;
DISALLOW_COPY_AND_ASSIGN(TraceScopedTrackableObject);
};
} // namespace trace_event
} // namespace base
#endif // BASE_TRACE_EVENT_TRACE_EVENT_H_

View file

@ -0,0 +1,202 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/trace_event/trace_event_impl.h"
#include <fcntl.h>
#include <stddef.h>
#include <stdint.h>
#include "base/bind.h"
#include "base/format_macros.h"
#include "base/logging.h"
#include "base/posix/eintr_wrapper.h"
#include "base/strings/stringprintf.h"
#include "base/synchronization/waitable_event.h"
#include "base/threading/thread.h"
#include "base/trace_event/trace_event.h"
namespace base {
namespace trace_event {
namespace {
int g_atrace_fd = -1;
const char kATraceMarkerFile[] = "/sys/kernel/debug/tracing/trace_marker";
void WriteToATrace(int fd, const char* buffer, size_t size) {
size_t total_written = 0;
while (total_written < size) {
ssize_t written = HANDLE_EINTR(write(
fd, buffer + total_written, size - total_written));
if (written <= 0)
break;
total_written += written;
}
if (total_written < size) {
PLOG(WARNING) << "Failed to write buffer '" << std::string(buffer, size)
<< "' to " << kATraceMarkerFile;
}
}
void WriteEvent(char phase,
const char* category_group,
const char* name,
unsigned long long id,
const TraceArguments& args,
unsigned int flags) {
std::string out = StringPrintf("%c|%d|%s", phase, getpid(), name);
if (flags & TRACE_EVENT_FLAG_HAS_ID)
StringAppendF(&out, "-%" PRIx64, static_cast<uint64_t>(id));
out += '|';
const char* const* arg_names = args.names();
for (size_t i = 0; i < args.size() && arg_names[i]; ++i) {
if (i)
out += ';';
out += arg_names[i];
out += '=';
std::string::size_type value_start = out.length();
args.values()[i].AppendAsJSON(args.types()[i], &out);
// Remove the quotes which may confuse the atrace script.
ReplaceSubstringsAfterOffset(&out, value_start, "\\\"", "'");
ReplaceSubstringsAfterOffset(&out, value_start, "\"", "");
// Replace chars used for separators with similar chars in the value.
std::replace(out.begin() + value_start, out.end(), ';', ',');
std::replace(out.begin() + value_start, out.end(), '|', '!');
}
out += '|';
out += category_group;
WriteToATrace(g_atrace_fd, out.c_str(), out.size());
}
void NoOpOutputCallback(WaitableEvent* complete_event,
const scoped_refptr<RefCountedString>&,
bool has_more_events) {
if (!has_more_events)
complete_event->Signal();
}
void EndChromeTracing(TraceLog* trace_log,
WaitableEvent* complete_event) {
trace_log->SetDisabled();
// Delete the buffered trace events as they have been sent to atrace.
trace_log->Flush(BindRepeating(&NoOpOutputCallback, complete_event));
}
} // namespace
// These functions support Android systrace.py when 'webview' category is
// traced. With the new adb_profile_chrome, we may have two phases:
// - before WebView is ready for combined tracing, we can use adb_profile_chrome
// to trace android categories other than 'webview' and chromium categories.
// In this way we can avoid the conflict between StartATrace/StopATrace and
// the intents.
// - TODO(wangxianzhu): after WebView is ready for combined tracing, remove
// StartATrace, StopATrace and SendToATrace, and perhaps send Java traces
// directly to atrace in trace_event_binding.cc.
void TraceLog::StartATrace() {
if (g_atrace_fd != -1)
return;
g_atrace_fd = HANDLE_EINTR(open(kATraceMarkerFile, O_WRONLY));
if (g_atrace_fd == -1) {
PLOG(WARNING) << "Couldn't open " << kATraceMarkerFile;
return;
}
TraceConfig trace_config;
trace_config.SetTraceRecordMode(RECORD_CONTINUOUSLY);
SetEnabled(trace_config, TraceLog::RECORDING_MODE);
}
void TraceLog::StopATrace() {
if (g_atrace_fd == -1)
return;
close(g_atrace_fd);
g_atrace_fd = -1;
// TraceLog::Flush() requires the current thread to have a message loop, but
// this thread called from Java may not have one, so flush in another thread.
Thread end_chrome_tracing_thread("end_chrome_tracing");
WaitableEvent complete_event(WaitableEvent::ResetPolicy::AUTOMATIC,
WaitableEvent::InitialState::NOT_SIGNALED);
end_chrome_tracing_thread.Start();
end_chrome_tracing_thread.task_runner()->PostTask(
FROM_HERE, base::BindOnce(&EndChromeTracing, Unretained(this),
Unretained(&complete_event)));
complete_event.Wait();
}
void TraceEvent::SendToATrace() {
if (g_atrace_fd == -1)
return;
const char* category_group =
TraceLog::GetCategoryGroupName(category_group_enabled_);
switch (phase_) {
case TRACE_EVENT_PHASE_BEGIN:
WriteEvent('B', category_group, name_, id_, args_, flags_);
break;
case TRACE_EVENT_PHASE_COMPLETE:
WriteEvent(duration_.ToInternalValue() == -1 ? 'B' : 'E', category_group,
name_, id_, args_, flags_);
break;
case TRACE_EVENT_PHASE_END:
// Though a single 'E' is enough, here append pid, name and
// category_group etc. So that unpaired events can be found easily.
WriteEvent('E', category_group, name_, id_, args_, flags_);
break;
case TRACE_EVENT_PHASE_INSTANT:
// Simulate an instance event with a pair of begin/end events.
WriteEvent('B', category_group, name_, id_, args_, flags_);
WriteToATrace(g_atrace_fd, "E", 1);
break;
case TRACE_EVENT_PHASE_COUNTER:
for (size_t i = 0; i < arg_size() && arg_name(i); ++i) {
DCHECK(arg_type(i) == TRACE_VALUE_TYPE_INT);
std::string out =
base::StringPrintf("C|%d|%s-%s", getpid(), name_, arg_name(i));
if (flags_ & TRACE_EVENT_FLAG_HAS_ID)
StringAppendF(&out, "-%" PRIx64, static_cast<uint64_t>(id_));
StringAppendF(&out, "|%d|%s", static_cast<int>(arg_value(i).as_int),
category_group);
WriteToATrace(g_atrace_fd, out.c_str(), out.size());
}
break;
default:
// Do nothing.
break;
}
}
void TraceLog::AddClockSyncMetadataEvent() {
int atrace_fd = HANDLE_EINTR(open(kATraceMarkerFile, O_WRONLY | O_APPEND));
if (atrace_fd == -1) {
PLOG(WARNING) << "Couldn't open " << kATraceMarkerFile;
return;
}
// Android's kernel trace system has a trace_marker feature: this is a file on
// debugfs that takes the written data and pushes it onto the trace
// buffer. So, to establish clock sync, we write our monotonic clock into that
// trace buffer.
double now_in_seconds = (TRACE_TIME_TICKS_NOW() - TimeTicks()).InSecondsF();
std::string marker = StringPrintf(
"trace_event_clock_sync: parent_ts=%f\n", now_in_seconds);
WriteToATrace(atrace_fd, marker.c_str(), marker.size());
close(atrace_fd);
}
} // namespace trace_event
} // namespace base

View file

@ -0,0 +1,381 @@
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/trace_event/trace_event_etw_export_win.h"
#include <stddef.h>
#include "base/at_exit.h"
#include "base/command_line.h"
#include "base/logging.h"
#include "base/memory/singleton.h"
#include "base/strings/string_tokenizer.h"
#include "base/strings/utf_string_conversions.h"
#include "base/threading/platform_thread.h"
#include "base/trace_event/trace_event.h"
#include "base/trace_event/trace_event_impl.h"
#include <windows.h>
// The GetProcAddress technique is borrowed from
// https://github.com/google/UIforETW/tree/master/ETWProviders
//
// EVNTAPI is used in evntprov.h which is included by chrome_events_win.h.
// We define EVNTAPI without the DECLSPEC_IMPORT specifier so that we can
// implement these functions locally instead of using the import library, and
// can therefore still run on Windows XP.
#define EVNTAPI __stdcall
// Include the event register/write/unregister macros compiled from the manifest
// file. Note that this includes evntprov.h which requires a Vista+ Windows SDK.
//
// In SHARED_INTERMEDIATE_DIR.
// Headers generated by mc.exe have a ';' at the end of extern "C" {} blocks.
#if defined(__clang__)
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wextra-semi"
#endif
#include "base/trace_event/etw_manifest/chrome_events_win.h" // NOLINT
#if defined(__clang__)
#pragma clang diagnostic pop
#endif
namespace {
// |kFilteredEventGroupNames| contains the event categories that can be
// exported individually. These categories can be enabled by passing the correct
// keyword when starting the trace. A keyword is a 64-bit flag and we attribute
// one bit per category. We can therefore enable a particular category by
// setting its corresponding bit in the keyword. For events that are not present
// in |kFilteredEventGroupNames|, we have two bits that control their
// behaviour. When bit 61 is enabled, any event that is not disabled by default
// (ie. doesn't start with disabled-by-default-) will be exported. Likewise,
// when bit 62 is enabled, any event that is disabled by default will be
// exported.
//
// Note that bit 63 (MSB) must always be set, otherwise tracing will be disabled
// by ETW. Therefore, the keyword will always be greater than
// 0x8000000000000000.
//
// Examples of passing keywords to the provider using xperf:
// # This exports "benchmark" and "cc" events
// xperf -start chrome -on Chrome:0x8000000000000009
//
// # This exports "gpu", "netlog" and all other events that are not disabled by
// # default
// xperf -start chrome -on Chrome:0xA0000000000000A0
//
// More info about starting a trace and keyword can be obtained by using the
// help section of xperf (xperf -help start). Note that xperf documentation
// refers to keywords as flags and there are two ways to enable them, using
// group names or the hex representation. We only support the latter. Also, we
// ignore the level.
const char* const kFilteredEventGroupNames[] = {
"benchmark", // 0x1
"blink", // 0x2
"browser", // 0x4
"cc", // 0x8
"evdev", // 0x10
"gpu", // 0x20
"input", // 0x40
"netlog", // 0x80
"sequence_manager", // 0x100
"toplevel", // 0x200
"v8", // 0x400
"disabled-by-default-cc.debug", // 0x800
"disabled-by-default-cc.debug.picture", // 0x1000
"disabled-by-default-toplevel.flow", // 0x2000
"startup", // 0x4000
"latency", // 0x8000
"blink.user_timing", // 0x10000
"media", // 0x20000
"loading", // 0x40000
};
const char kOtherEventsGroupName[] = "__OTHER_EVENTS"; // 0x2000000000000000
const char kDisabledOtherEventsGroupName[] =
"__DISABLED_OTHER_EVENTS"; // 0x4000000000000000
const uint64_t kOtherEventsKeywordBit = 1ULL << 61;
const uint64_t kDisabledOtherEventsKeywordBit = 1ULL << 62;
const size_t kNumberOfCategories = ARRAYSIZE(kFilteredEventGroupNames) + 2U;
static void __stdcall EtwEnableCallback(LPCGUID SourceId,
ULONG ControlCode,
UCHAR Level,
ULONGLONG MatchAnyKeyword,
ULONGLONG MatchAllKeyword,
PEVENT_FILTER_DESCRIPTOR FilterData,
PVOID CallbackContext) {
// Invoke the default callback, which updates the information inside
// CHROME_Context.
McGenControlCallbackV2(SourceId, ControlCode, Level, MatchAnyKeyword,
MatchAllKeyword, FilterData, CallbackContext);
base::trace_event::TraceEventETWExport::OnETWEnableUpdate();
}
} // namespace
namespace base {
namespace trace_event {
bool TraceEventETWExport::is_registration_complete_ = false;
TraceEventETWExport::TraceEventETWExport() : etw_match_any_keyword_(0ULL) {
// Register the ETW provider. If registration fails then the event logging
// calls will fail. We're essentially doing the same operation as
// EventRegisterChrome (which was auto generated for our provider by the
// ETW manifest compiler), but instead we're passing our own callback.
// This allows us to detect changes to enable/disable/keyword changes.
// ChromeHandle and the other parameters to EventRegister are all generated
// globals from chrome_events_win.h
DCHECK(!ChromeHandle);
EventRegister(&CHROME, &EtwEnableCallback, &CHROME_Context, &ChromeHandle);
TraceEventETWExport::is_registration_complete_ = true;
// Make sure to initialize the map with all the group names. Subsequent
// modifications will be made by the background thread and only affect the
// values of the keys (no key addition/deletion). Therefore, the map does not
// require a lock for access.
for (size_t i = 0; i < ARRAYSIZE(kFilteredEventGroupNames); i++)
categories_status_[kFilteredEventGroupNames[i]] = false;
categories_status_[kOtherEventsGroupName] = false;
categories_status_[kDisabledOtherEventsGroupName] = false;
DCHECK_EQ(kNumberOfCategories, categories_status_.size());
}
TraceEventETWExport::~TraceEventETWExport() {
EventUnregisterChrome();
is_registration_complete_ = false;
}
// static
void TraceEventETWExport::EnableETWExport() {
auto* instance = GetInstance();
if (instance) {
// Sync the enabled categories with ETW by calling UpdateEnabledCategories()
// that checks the keyword. We'll stay in sync via the EtwEnableCallback
// we register in TraceEventETWExport's constructor.
instance->UpdateEnabledCategories();
}
}
// static
void TraceEventETWExport::AddEvent(char phase,
const unsigned char* category_group_enabled,
const char* name,
unsigned long long id,
const TraceArguments* args) {
// We bail early in case exporting is disabled or no consumer is listening.
auto* instance = GetInstance();
if (!instance || !EventEnabledChromeEvent())
return;
const char* phase_string = nullptr;
// Space to store the phase identifier and null-terminator, when needed.
char phase_buffer[2];
switch (phase) {
case TRACE_EVENT_PHASE_BEGIN:
phase_string = "Begin";
break;
case TRACE_EVENT_PHASE_END:
phase_string = "End";
break;
case TRACE_EVENT_PHASE_COMPLETE:
phase_string = "Complete";
break;
case TRACE_EVENT_PHASE_INSTANT:
phase_string = "Instant";
break;
case TRACE_EVENT_PHASE_ASYNC_BEGIN:
phase_string = "Async Begin";
break;
case TRACE_EVENT_PHASE_ASYNC_STEP_INTO:
phase_string = "Async Step Into";
break;
case TRACE_EVENT_PHASE_ASYNC_STEP_PAST:
phase_string = "Async Step Past";
break;
case TRACE_EVENT_PHASE_ASYNC_END:
phase_string = "Async End";
break;
case TRACE_EVENT_PHASE_NESTABLE_ASYNC_BEGIN:
phase_string = "Nestable Async Begin";
break;
case TRACE_EVENT_PHASE_NESTABLE_ASYNC_END:
phase_string = "Nestable Async End";
break;
case TRACE_EVENT_PHASE_NESTABLE_ASYNC_INSTANT:
phase_string = "Nestable Async Instant";
break;
case TRACE_EVENT_PHASE_FLOW_BEGIN:
phase_string = "Phase Flow Begin";
break;
case TRACE_EVENT_PHASE_FLOW_STEP:
phase_string = "Phase Flow Step";
break;
case TRACE_EVENT_PHASE_FLOW_END:
phase_string = "Phase Flow End";
break;
case TRACE_EVENT_PHASE_METADATA:
phase_string = "Phase Metadata";
break;
case TRACE_EVENT_PHASE_COUNTER:
phase_string = "Phase Counter";
break;
case TRACE_EVENT_PHASE_SAMPLE:
phase_string = "Phase Sample";
break;
case TRACE_EVENT_PHASE_CREATE_OBJECT:
phase_string = "Phase Create Object";
break;
case TRACE_EVENT_PHASE_SNAPSHOT_OBJECT:
phase_string = "Phase Snapshot Object";
break;
case TRACE_EVENT_PHASE_DELETE_OBJECT:
phase_string = "Phase Delete Object";
break;
default:
phase_buffer[0] = phase;
phase_buffer[1] = 0;
phase_string = phase_buffer;
break;
}
std::string arg_values_string[3];
size_t num_args = args ? args->size() : 0;
for (size_t i = 0; i < num_args; i++) {
if (args->types()[i] == TRACE_VALUE_TYPE_CONVERTABLE) {
// Temporarily do nothing here. This function consumes 1/3 to 1/2 of
// *total* process CPU time when ETW tracing, and many of the strings
// created exceed WPA's 4094 byte limit and are shown as:
// "Unable to parse data". See crbug.com/488257
} else {
args->values()[i].AppendAsString(args->types()[i], arg_values_string + i);
}
}
EventWriteChromeEvent(
name, phase_string, num_args > 0 ? args->names()[0] : "",
arg_values_string[0].c_str(), num_args > 1 ? args->names()[1] : "",
arg_values_string[1].c_str(), "", "");
}
// static
void TraceEventETWExport::AddCompleteEndEvent(const char* name) {
auto* instance = GetInstance();
if (!instance || !EventEnabledChromeEvent())
return;
EventWriteChromeEvent(name, "Complete End", "", "", "", "", "", "");
}
// static
bool TraceEventETWExport::IsCategoryGroupEnabled(
StringPiece category_group_name) {
DCHECK(!category_group_name.empty());
auto* instance = GetInstanceIfExists();
if (instance == nullptr)
return false;
if (!EventEnabledChromeEvent())
return false;
CStringTokenizer category_group_tokens(category_group_name.begin(),
category_group_name.end(), ",");
while (category_group_tokens.GetNext()) {
StringPiece category_group_token = category_group_tokens.token_piece();
if (instance->IsCategoryEnabled(category_group_token)) {
return true;
}
}
return false;
}
bool TraceEventETWExport::UpdateEnabledCategories() {
if (etw_match_any_keyword_ == CHROME_Context.MatchAnyKeyword)
return false;
// If the keyword has changed, update each category.
// Chrome_Context.MatchAnyKeyword is set by UIforETW (or other ETW trace
// recording tools) using the ETW infrastructure. This value will be set in
// all Chrome processes that have registered their ETW provider.
etw_match_any_keyword_ = CHROME_Context.MatchAnyKeyword;
for (size_t i = 0; i < ARRAYSIZE(kFilteredEventGroupNames); i++) {
if (etw_match_any_keyword_ & (1ULL << i)) {
categories_status_[kFilteredEventGroupNames[i]] = true;
} else {
categories_status_[kFilteredEventGroupNames[i]] = false;
}
}
// Also update the two default categories.
if (etw_match_any_keyword_ & kOtherEventsKeywordBit) {
categories_status_[kOtherEventsGroupName] = true;
} else {
categories_status_[kOtherEventsGroupName] = false;
}
if (etw_match_any_keyword_ & kDisabledOtherEventsKeywordBit) {
categories_status_[kDisabledOtherEventsGroupName] = true;
} else {
categories_status_[kDisabledOtherEventsGroupName] = false;
}
DCHECK_EQ(kNumberOfCategories, categories_status_.size());
// Update the categories in TraceLog.
TraceLog::GetInstance()->UpdateETWCategoryGroupEnabledFlags();
return true;
}
bool TraceEventETWExport::IsCategoryEnabled(StringPiece category_name) const {
DCHECK_EQ(kNumberOfCategories, categories_status_.size());
// Try to find the category and return its status if found
auto it = categories_status_.find(category_name);
if (it != categories_status_.end())
return it->second;
// Otherwise return the corresponding default status by first checking if the
// category is disabled by default.
if (category_name.starts_with("disabled-by-default")) {
DCHECK(categories_status_.find(kDisabledOtherEventsGroupName) !=
categories_status_.end());
return categories_status_.find(kDisabledOtherEventsGroupName)->second;
} else {
DCHECK(categories_status_.find(kOtherEventsGroupName) !=
categories_status_.end());
return categories_status_.find(kOtherEventsGroupName)->second;
}
}
// static
void TraceEventETWExport::OnETWEnableUpdate() {
// During construction, if tracing is already enabled, we'll get
// a callback synchronously on the same thread. Calling GetInstance
// in that case will hang since we're in the process of creating the
// singleton.
if (is_registration_complete_) {
auto* instance = GetInstance();
if (instance)
instance->UpdateEnabledCategories();
}
}
// static
TraceEventETWExport* TraceEventETWExport::GetInstance() {
return Singleton<TraceEventETWExport,
StaticMemorySingletonTraits<TraceEventETWExport>>::get();
}
// static
TraceEventETWExport* TraceEventETWExport::GetInstanceIfExists() {
return Singleton<
TraceEventETWExport,
StaticMemorySingletonTraits<TraceEventETWExport>>::GetIfExists();
}
} // namespace trace_event
} // namespace base

View file

@ -0,0 +1,88 @@
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// This file contains the Windows-specific exporting to ETW.
#ifndef BASE_TRACE_EVENT_TRACE_EVENT_ETW_EXPORT_WIN_H_
#define BASE_TRACE_EVENT_TRACE_EVENT_ETW_EXPORT_WIN_H_
#include <stdint.h>
#include <map>
#include "base/base_export.h"
#include "base/macros.h"
#include "base/strings/string_piece.h"
#include "base/trace_event/trace_event_impl.h"
namespace base {
template <typename Type>
struct StaticMemorySingletonTraits;
namespace trace_event {
class BASE_EXPORT TraceEventETWExport {
public:
~TraceEventETWExport();
// Retrieves the singleton.
// Note that this may return NULL post-AtExit processing.
static TraceEventETWExport* GetInstance();
// Retrieves the singleton iff it was previously instantiated by a
// GetInstance() call. Avoids creating the instance only to check that it
// wasn't disabled. Note that, like GetInstance(), this may also return NULL
// post-AtExit processing.
static TraceEventETWExport* GetInstanceIfExists();
// Enables exporting of events to ETW. If tracing is disabled for the Chrome
// provider, AddEvent and AddCustomEvent will simply return when called.
static void EnableETWExport();
// Exports an event to ETW. This is mainly used in
// TraceLog::AddTraceEventWithThreadIdAndTimestamp to export internal events.
static void AddEvent(char phase,
const unsigned char* category_group_enabled,
const char* name,
unsigned long long id,
const TraceArguments* args);
// Exports an ETW event that marks the end of a complete event.
static void AddCompleteEndEvent(const char* name);
// Returns true if any category in the group is enabled.
static bool IsCategoryGroupEnabled(StringPiece category_group_name);
// Called from the ETW EnableCallback when the state of the provider or
// keywords has changed.
static void OnETWEnableUpdate();
private:
// Ensure only the provider can construct us.
friend struct StaticMemorySingletonTraits<TraceEventETWExport>;
TraceEventETWExport();
// Updates the list of enabled categories by consulting the ETW keyword.
// Returns true if there was a change, false otherwise.
bool UpdateEnabledCategories();
// Returns true if the category is enabled.
bool IsCategoryEnabled(StringPiece category_name) const;
static bool is_registration_complete_;
// Maps category names to their status (enabled/disabled).
std::map<StringPiece, bool> categories_status_;
// Local copy of the ETW keyword.
uint64_t etw_match_any_keyword_;
DISALLOW_COPY_AND_ASSIGN(TraceEventETWExport);
};
} // namespace trace_event
} // namespace base
#endif // BASE_TRACE_EVENT_TRACE_EVENT_ETW_EXPORT_WIN_H_

View file

@ -0,0 +1,17 @@
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/trace_event/trace_event_filter.h"
namespace base {
namespace trace_event {
TraceEventFilter::TraceEventFilter() = default;
TraceEventFilter::~TraceEventFilter() = default;
void TraceEventFilter::EndEvent(const char* category_name,
const char* event_name) const {}
} // namespace trace_event
} // namespace base

View file

@ -0,0 +1,51 @@
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TRACE_EVENT_TRACE_EVENT_FILTER_H_
#define BASE_TRACE_EVENT_TRACE_EVENT_FILTER_H_
#include <memory>
#include "base/base_export.h"
#include "base/macros.h"
namespace base {
namespace trace_event {
class TraceEvent;
// TraceEventFilter is like iptables for TRACE_EVENT macros. Filters can be
// enabled on a per-category basis, hence a single filter instance can serve
// more than a TraceCategory. There are two use cases for filters:
// 1. Snooping TRACE_EVENT macros without adding them to the TraceLog. This is
// possible by setting the ENABLED_FOR_FILTERING flag on a category w/o
// ENABLED_FOR_RECORDING (see TraceConfig for user-facing configuration).
// 2. Filtering TRACE_EVENT macros before they are added to the TraceLog. This
// requires both the ENABLED_FOR_FILTERING and ENABLED_FOR_RECORDING flags
// on the category.
// More importantly, filters must be thread-safe. The FilterTraceEvent and
// EndEvent methods can be called concurrently as trace macros are hit on
// different threads.
class BASE_EXPORT TraceEventFilter {
public:
TraceEventFilter();
virtual ~TraceEventFilter();
// If the category is ENABLED_FOR_RECORDING, the event is added iff all the
// filters enabled for the category return true. false causes the event to be
// discarded.
virtual bool FilterTraceEvent(const TraceEvent& trace_event) const = 0;
// Notifies the end of a duration event when the RAII macro goes out of scope.
virtual void EndEvent(const char* category_name,
const char* event_name) const;
private:
DISALLOW_COPY_AND_ASSIGN(TraceEventFilter);
};
} // namespace trace_event
} // namespace base
#endif // BASE_TRACE_EVENT_TRACE_EVENT_FILTER_H_

View file

@ -0,0 +1,61 @@
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/trace_event/trace_event_filter_test_utils.h"
#include "base/logging.h"
namespace base {
namespace trace_event {
namespace {
TestEventFilter::HitsCounter* g_hits_counter;
} // namespace;
// static
const char TestEventFilter::kName[] = "testing_predicate";
bool TestEventFilter::filter_return_value_;
// static
std::unique_ptr<TraceEventFilter> TestEventFilter::Factory(
const std::string& predicate_name) {
std::unique_ptr<TraceEventFilter> res;
if (predicate_name == kName)
res.reset(new TestEventFilter());
return res;
}
TestEventFilter::TestEventFilter() = default;
TestEventFilter::~TestEventFilter() = default;
bool TestEventFilter::FilterTraceEvent(const TraceEvent& trace_event) const {
if (g_hits_counter)
g_hits_counter->filter_trace_event_hit_count++;
return filter_return_value_;
}
void TestEventFilter::EndEvent(const char* category_name,
const char* name) const {
if (g_hits_counter)
g_hits_counter->end_event_hit_count++;
}
TestEventFilter::HitsCounter::HitsCounter() {
Reset();
DCHECK(!g_hits_counter);
g_hits_counter = this;
}
TestEventFilter::HitsCounter::~HitsCounter() {
DCHECK(g_hits_counter);
g_hits_counter = nullptr;
}
void TestEventFilter::HitsCounter::Reset() {
filter_trace_event_hit_count = 0;
end_event_hit_count = 0;
}
} // namespace trace_event
} // namespace base

View file

@ -0,0 +1,53 @@
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TRACE_EVENT_TRACE_EVENT_FILTER_TEST_UTILS_H_
#define BASE_TRACE_EVENT_TRACE_EVENT_FILTER_TEST_UTILS_H_
#include <memory>
#include <string>
#include "base/macros.h"
#include "base/trace_event/trace_event_filter.h"
namespace base {
namespace trace_event {
class TestEventFilter : public TraceEventFilter {
public:
struct HitsCounter {
HitsCounter();
~HitsCounter();
void Reset();
size_t filter_trace_event_hit_count;
size_t end_event_hit_count;
};
static const char kName[];
// Factory method for TraceLog::SetFilterFactoryForTesting().
static std::unique_ptr<TraceEventFilter> Factory(
const std::string& predicate_name);
TestEventFilter();
~TestEventFilter() override;
// TraceEventFilter implementation.
bool FilterTraceEvent(const TraceEvent& trace_event) const override;
void EndEvent(const char* category_name, const char* name) const override;
static void set_filter_return_value(bool value) {
filter_return_value_ = value;
}
private:
static bool filter_return_value_;
DISALLOW_COPY_AND_ASSIGN(TestEventFilter);
};
} // namespace trace_event
} // namespace base
#endif // BASE_TRACE_EVENT_TRACE_EVENT_FILTER_TEST_UTILS_H_

View file

@ -0,0 +1,313 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/trace_event/trace_event_impl.h"
#include <stddef.h>
#include "base/format_macros.h"
#include "base/json/string_escape.h"
#include "base/memory/ptr_util.h"
#include "base/process/process_handle.h"
#include "base/stl_util.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/string_util.h"
#include "base/strings/stringprintf.h"
#include "base/strings/utf_string_conversions.h"
#include "base/trace_event/trace_event.h"
#include "base/trace_event/trace_log.h"
#include "base/trace_event/traced_value.h"
namespace base {
namespace trace_event {
bool ConvertableToTraceFormat::AppendToProto(ProtoAppender* appender) {
return false;
}
// See comment for name TraceEvent::scope_ definition.
static_assert(trace_event_internal::kGlobalScope == nullptr,
"Invalid TraceEvent::scope default initializer value");
TraceEvent::TraceEvent() = default;
TraceEvent::TraceEvent(int thread_id,
TimeTicks timestamp,
ThreadTicks thread_timestamp,
ThreadInstructionCount thread_instruction_count,
char phase,
const unsigned char* category_group_enabled,
const char* name,
const char* scope,
unsigned long long id,
unsigned long long bind_id,
TraceArguments* args,
unsigned int flags)
: timestamp_(timestamp),
thread_timestamp_(thread_timestamp),
thread_instruction_count_(thread_instruction_count),
scope_(scope),
id_(id),
category_group_enabled_(category_group_enabled),
name_(name),
thread_id_(thread_id),
flags_(flags),
bind_id_(bind_id),
phase_(phase) {
InitArgs(args);
}
TraceEvent::~TraceEvent() = default;
TraceEvent::TraceEvent(TraceEvent&& other) noexcept = default;
TraceEvent& TraceEvent::operator=(TraceEvent&& other) noexcept = default;
void TraceEvent::Reset() {
// Only reset fields that won't be initialized in Reset(int, ...), or that may
// hold references to other objects.
duration_ = TimeDelta::FromInternalValue(-1);
thread_instruction_delta_ = ThreadInstructionDelta();
args_.Reset();
parameter_copy_storage_.Reset();
}
void TraceEvent::Reset(int thread_id,
TimeTicks timestamp,
ThreadTicks thread_timestamp,
ThreadInstructionCount thread_instruction_count,
char phase,
const unsigned char* category_group_enabled,
const char* name,
const char* scope,
unsigned long long id,
unsigned long long bind_id,
TraceArguments* args,
unsigned int flags) {
Reset();
timestamp_ = timestamp;
thread_timestamp_ = thread_timestamp;
scope_ = scope;
id_ = id;
category_group_enabled_ = category_group_enabled;
name_ = name;
thread_id_ = thread_id;
flags_ = flags;
bind_id_ = bind_id;
thread_instruction_count_ = thread_instruction_count;
phase_ = phase;
InitArgs(args);
}
void TraceEvent::InitArgs(TraceArguments* args) {
if (args)
args_ = std::move(*args);
args_.CopyStringsTo(&parameter_copy_storage_,
!!(flags_ & TRACE_EVENT_FLAG_COPY), &name_, &scope_);
}
void TraceEvent::UpdateDuration(const TimeTicks& now,
const ThreadTicks& thread_now,
ThreadInstructionCount thread_instruction_now) {
DCHECK_EQ(duration_.ToInternalValue(), -1);
duration_ = now - timestamp_;
// |thread_timestamp_| can be empty if the thread ticks clock wasn't
// initialized when it was recorded.
if (thread_timestamp_ != ThreadTicks())
thread_duration_ = thread_now - thread_timestamp_;
if (!thread_instruction_count_.is_null()) {
thread_instruction_delta_ =
thread_instruction_now - thread_instruction_count_;
}
}
void TraceEvent::EstimateTraceMemoryOverhead(
TraceEventMemoryOverhead* overhead) {
overhead->Add(TraceEventMemoryOverhead::kTraceEvent,
parameter_copy_storage_.EstimateTraceMemoryOverhead());
for (size_t i = 0; i < arg_size(); ++i) {
if (arg_type(i) == TRACE_VALUE_TYPE_CONVERTABLE)
arg_value(i).as_convertable->EstimateTraceMemoryOverhead(overhead);
}
}
void TraceEvent::AppendAsJSON(
std::string* out,
const ArgumentFilterPredicate& argument_filter_predicate) const {
int64_t time_int64 = timestamp_.ToInternalValue();
int process_id;
int thread_id;
if ((flags_ & TRACE_EVENT_FLAG_HAS_PROCESS_ID) &&
process_id_ != kNullProcessId) {
process_id = process_id_;
thread_id = -1;
} else {
process_id = TraceLog::GetInstance()->process_id();
thread_id = thread_id_;
}
const char* category_group_name =
TraceLog::GetCategoryGroupName(category_group_enabled_);
// Category group checked at category creation time.
DCHECK(!strchr(name_, '"'));
StringAppendF(out, "{\"pid\":%i,\"tid\":%i,\"ts\":%" PRId64
",\"ph\":\"%c\",\"cat\":\"%s\",\"name\":",
process_id, thread_id, time_int64, phase_, category_group_name);
EscapeJSONString(name_, true, out);
*out += ",\"args\":";
// Output argument names and values, stop at first NULL argument name.
// TODO(oysteine): The dual predicates here is a bit ugly; if the filtering
// capabilities need to grow even more precise we should rethink this
// approach
ArgumentNameFilterPredicate argument_name_filter_predicate;
bool strip_args =
arg_size() > 0 && arg_name(0) && !argument_filter_predicate.is_null() &&
!argument_filter_predicate.Run(category_group_name, name_,
&argument_name_filter_predicate);
if (strip_args) {
*out += "\"__stripped__\"";
} else {
*out += "{";
for (size_t i = 0; i < arg_size() && arg_name(i); ++i) {
if (i > 0)
*out += ",";
*out += "\"";
*out += arg_name(i);
*out += "\":";
if (argument_name_filter_predicate.is_null() ||
argument_name_filter_predicate.Run(arg_name(i))) {
arg_value(i).AppendAsJSON(arg_type(i), out);
} else {
*out += "\"__stripped__\"";
}
}
*out += "}";
}
if (phase_ == TRACE_EVENT_PHASE_COMPLETE) {
int64_t duration = duration_.ToInternalValue();
if (duration != -1)
StringAppendF(out, ",\"dur\":%" PRId64, duration);
if (!thread_timestamp_.is_null()) {
int64_t thread_duration = thread_duration_.ToInternalValue();
if (thread_duration != -1)
StringAppendF(out, ",\"tdur\":%" PRId64, thread_duration);
}
if (!thread_instruction_count_.is_null()) {
int64_t thread_instructions = thread_instruction_delta_.ToInternalValue();
StringAppendF(out, ",\"tidelta\":%" PRId64, thread_instructions);
}
}
// Output tts if thread_timestamp is valid.
if (!thread_timestamp_.is_null()) {
int64_t thread_time_int64 = thread_timestamp_.ToInternalValue();
StringAppendF(out, ",\"tts\":%" PRId64, thread_time_int64);
}
// Output ticount if thread_instruction_count is valid.
if (!thread_instruction_count_.is_null()) {
int64_t thread_instructions = thread_instruction_count_.ToInternalValue();
StringAppendF(out, ",\"ticount\":%" PRId64, thread_instructions);
}
// Output async tts marker field if flag is set.
if (flags_ & TRACE_EVENT_FLAG_ASYNC_TTS) {
StringAppendF(out, ", \"use_async_tts\":1");
}
// If id_ is set, print it out as a hex string so we don't loose any
// bits (it might be a 64-bit pointer).
unsigned int id_flags_ = flags_ & (TRACE_EVENT_FLAG_HAS_ID |
TRACE_EVENT_FLAG_HAS_LOCAL_ID |
TRACE_EVENT_FLAG_HAS_GLOBAL_ID);
if (id_flags_) {
if (scope_ != trace_event_internal::kGlobalScope)
StringAppendF(out, ",\"scope\":\"%s\"", scope_);
switch (id_flags_) {
case TRACE_EVENT_FLAG_HAS_ID:
StringAppendF(out, ",\"id\":\"0x%" PRIx64 "\"",
static_cast<uint64_t>(id_));
break;
case TRACE_EVENT_FLAG_HAS_LOCAL_ID:
StringAppendF(out, ",\"id2\":{\"local\":\"0x%" PRIx64 "\"}",
static_cast<uint64_t>(id_));
break;
case TRACE_EVENT_FLAG_HAS_GLOBAL_ID:
StringAppendF(out, ",\"id2\":{\"global\":\"0x%" PRIx64 "\"}",
static_cast<uint64_t>(id_));
break;
default:
NOTREACHED() << "More than one of the ID flags are set";
break;
}
}
if (flags_ & TRACE_EVENT_FLAG_BIND_TO_ENCLOSING)
StringAppendF(out, ",\"bp\":\"e\"");
if ((flags_ & TRACE_EVENT_FLAG_FLOW_OUT) ||
(flags_ & TRACE_EVENT_FLAG_FLOW_IN)) {
StringAppendF(out, ",\"bind_id\":\"0x%" PRIx64 "\"",
static_cast<uint64_t>(bind_id_));
}
if (flags_ & TRACE_EVENT_FLAG_FLOW_IN)
StringAppendF(out, ",\"flow_in\":true");
if (flags_ & TRACE_EVENT_FLAG_FLOW_OUT)
StringAppendF(out, ",\"flow_out\":true");
// Instant events also output their scope.
if (phase_ == TRACE_EVENT_PHASE_INSTANT) {
char scope = '?';
switch (flags_ & TRACE_EVENT_FLAG_SCOPE_MASK) {
case TRACE_EVENT_SCOPE_GLOBAL:
scope = TRACE_EVENT_SCOPE_NAME_GLOBAL;
break;
case TRACE_EVENT_SCOPE_PROCESS:
scope = TRACE_EVENT_SCOPE_NAME_PROCESS;
break;
case TRACE_EVENT_SCOPE_THREAD:
scope = TRACE_EVENT_SCOPE_NAME_THREAD;
break;
}
StringAppendF(out, ",\"s\":\"%c\"", scope);
}
*out += "}";
}
void TraceEvent::AppendPrettyPrinted(std::ostringstream* out) const {
*out << name_ << "[";
*out << TraceLog::GetCategoryGroupName(category_group_enabled_);
*out << "]";
if (arg_size() > 0 && arg_name(0)) {
*out << ", {";
for (size_t i = 0; i < arg_size() && arg_name(i); ++i) {
if (i > 0)
*out << ", ";
*out << arg_name(i) << ":";
std::string value_as_text;
arg_value(i).AppendAsJSON(arg_type(i), &value_as_text);
*out << value_as_text;
}
*out << "}";
}
}
} // namespace trace_event
} // namespace base

View file

@ -0,0 +1,199 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TRACE_EVENT_TRACE_EVENT_IMPL_H_
#define BASE_TRACE_EVENT_TRACE_EVENT_IMPL_H_
#include <stdint.h>
#include <memory>
#include <string>
#include <vector>
#include "base/atomicops.h"
#include "base/base_export.h"
#include "base/callback.h"
#include "base/macros.h"
#include "base/observer_list.h"
#include "base/single_thread_task_runner.h"
#include "base/strings/string_util.h"
#include "base/synchronization/condition_variable.h"
#include "base/synchronization/lock.h"
#include "base/threading/thread_local.h"
#include "base/trace_event/common/trace_event_common.h"
#include "base/trace_event/thread_instruction_count.h"
#include "base/trace_event/trace_arguments.h"
#include "base/trace_event/trace_event_memory_overhead.h"
#include "build/build_config.h"
namespace base {
namespace trace_event {
typedef base::RepeatingCallback<bool(const char* arg_name)>
ArgumentNameFilterPredicate;
typedef base::RepeatingCallback<bool(const char* category_group_name,
const char* event_name,
ArgumentNameFilterPredicate*)>
ArgumentFilterPredicate;
typedef base::RepeatingCallback<bool(const std::string& metadata_name)>
MetadataFilterPredicate;
struct TraceEventHandle {
uint32_t chunk_seq;
// These numbers of bits must be kept consistent with
// TraceBufferChunk::kMaxTrunkIndex and
// TraceBufferChunk::kTraceBufferChunkSize (in trace_buffer.h).
unsigned chunk_index : 26;
unsigned event_index : 6;
};
class BASE_EXPORT TraceEvent {
public:
// TODO(898794): Remove once all users have been updated.
using TraceValue = base::trace_event::TraceValue;
TraceEvent();
TraceEvent(int thread_id,
TimeTicks timestamp,
ThreadTicks thread_timestamp,
ThreadInstructionCount thread_instruction_count,
char phase,
const unsigned char* category_group_enabled,
const char* name,
const char* scope,
unsigned long long id,
unsigned long long bind_id,
TraceArguments* args,
unsigned int flags);
~TraceEvent();
// Allow move operations.
TraceEvent(TraceEvent&&) noexcept;
TraceEvent& operator=(TraceEvent&&) noexcept;
// Reset instance to empty state.
void Reset();
// Reset instance to new state. This is equivalent but slightly more
// efficient than doing a move assignment, since it avoids creating
// temporary copies. I.e. compare these two statements:
//
// event = TraceEvent(thread_id, ....); // Create and destroy temporary.
// event.Reset(thread_id, ...); // Direct re-initialization.
//
void Reset(int thread_id,
TimeTicks timestamp,
ThreadTicks thread_timestamp,
ThreadInstructionCount thread_instruction_count,
char phase,
const unsigned char* category_group_enabled,
const char* name,
const char* scope,
unsigned long long id,
unsigned long long bind_id,
TraceArguments* args,
unsigned int flags);
void UpdateDuration(const TimeTicks& now,
const ThreadTicks& thread_now,
ThreadInstructionCount thread_instruction_now);
void EstimateTraceMemoryOverhead(TraceEventMemoryOverhead* overhead);
// Serialize event data to JSON
void AppendAsJSON(
std::string* out,
const ArgumentFilterPredicate& argument_filter_predicate) const;
void AppendPrettyPrinted(std::ostringstream* out) const;
TimeTicks timestamp() const { return timestamp_; }
ThreadTicks thread_timestamp() const { return thread_timestamp_; }
ThreadInstructionCount thread_instruction_count() const {
return thread_instruction_count_;
}
char phase() const { return phase_; }
int thread_id() const { return thread_id_; }
int process_id() const { return process_id_; }
TimeDelta duration() const { return duration_; }
TimeDelta thread_duration() const { return thread_duration_; }
ThreadInstructionDelta thread_instruction_delta() const {
return thread_instruction_delta_;
}
const char* scope() const { return scope_; }
unsigned long long id() const { return id_; }
unsigned int flags() const { return flags_; }
unsigned long long bind_id() const { return bind_id_; }
// Exposed for unittesting:
const StringStorage& parameter_copy_storage() const {
return parameter_copy_storage_;
}
const unsigned char* category_group_enabled() const {
return category_group_enabled_;
}
const char* name() const { return name_; }
size_t arg_size() const { return args_.size(); }
unsigned char arg_type(size_t index) const { return args_.types()[index]; }
const char* arg_name(size_t index) const { return args_.names()[index]; }
const TraceValue& arg_value(size_t index) const {
return args_.values()[index];
}
ConvertableToTraceFormat* arg_convertible_value(size_t index) {
return (arg_type(index) == TRACE_VALUE_TYPE_CONVERTABLE)
? arg_value(index).as_convertable
: nullptr;
}
#if defined(OS_ANDROID)
void SendToATrace();
#endif
private:
void InitArgs(TraceArguments* args);
// Note: these are ordered by size (largest first) for optimal packing.
TimeTicks timestamp_ = TimeTicks();
ThreadTicks thread_timestamp_ = ThreadTicks();
TimeDelta duration_ = TimeDelta::FromInternalValue(-1);
TimeDelta thread_duration_ = TimeDelta();
ThreadInstructionCount thread_instruction_count_ = ThreadInstructionCount();
ThreadInstructionDelta thread_instruction_delta_ = ThreadInstructionDelta();
// scope_ and id_ can be used to store phase-specific data.
// The following should be default-initialized to the expression
// trace_event_internal::kGlobalScope, which is nullptr, but its definition
// cannot be included here due to cyclical header dependencies.
// The equivalence is checked with a static_assert() in trace_event_impl.cc.
const char* scope_ = nullptr;
unsigned long long id_ = 0u;
const unsigned char* category_group_enabled_ = nullptr;
const char* name_ = nullptr;
StringStorage parameter_copy_storage_;
TraceArguments args_;
// Depending on TRACE_EVENT_FLAG_HAS_PROCESS_ID the event will have either:
// tid: thread_id_, pid: current_process_id (default case).
// tid: -1, pid: process_id_ (when flags_ & TRACE_EVENT_FLAG_HAS_PROCESS_ID).
union {
int thread_id_ = 0;
int process_id_;
};
unsigned int flags_ = 0;
unsigned long long bind_id_ = 0;
char phase_ = TRACE_EVENT_PHASE_BEGIN;
DISALLOW_COPY_AND_ASSIGN(TraceEvent);
};
} // namespace trace_event
} // namespace base
#endif // BASE_TRACE_EVENT_TRACE_EVENT_IMPL_H_

View file

@ -0,0 +1,179 @@
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/trace_event/trace_event_memory_overhead.h"
#include <algorithm>
#include "base/bits.h"
#include "base/memory/ref_counted_memory.h"
#include "base/strings/stringprintf.h"
#include "base/trace_event/memory_allocator_dump.h"
#include "base/trace_event/memory_usage_estimator.h"
#include "base/trace_event/process_memory_dump.h"
#include "base/values.h"
namespace base {
namespace trace_event {
namespace {
const char* ObjectTypeToString(TraceEventMemoryOverhead::ObjectType type) {
switch (type) {
case TraceEventMemoryOverhead::kOther:
return "(Other)";
case TraceEventMemoryOverhead::kTraceBuffer:
return "TraceBuffer";
case TraceEventMemoryOverhead::kTraceBufferChunk:
return "TraceBufferChunk";
case TraceEventMemoryOverhead::kTraceEvent:
return "TraceEvent";
case TraceEventMemoryOverhead::kUnusedTraceEvent:
return "TraceEvent(Unused)";
case TraceEventMemoryOverhead::kTracedValue:
return "TracedValue";
case TraceEventMemoryOverhead::kConvertableToTraceFormat:
return "ConvertableToTraceFormat";
case TraceEventMemoryOverhead::kHeapProfilerAllocationRegister:
return "AllocationRegister";
case TraceEventMemoryOverhead::kHeapProfilerTypeNameDeduplicator:
return "TypeNameDeduplicator";
case TraceEventMemoryOverhead::kHeapProfilerStackFrameDeduplicator:
return "StackFrameDeduplicator";
case TraceEventMemoryOverhead::kStdString:
return "std::string";
case TraceEventMemoryOverhead::kBaseValue:
return "base::Value";
case TraceEventMemoryOverhead::kTraceEventMemoryOverhead:
return "TraceEventMemoryOverhead";
case TraceEventMemoryOverhead::kFrameMetrics:
return "FrameMetrics";
case TraceEventMemoryOverhead::kLast:
NOTREACHED();
}
NOTREACHED();
return "BUG";
}
} // namespace
TraceEventMemoryOverhead::TraceEventMemoryOverhead() : allocated_objects_() {}
TraceEventMemoryOverhead::~TraceEventMemoryOverhead() = default;
void TraceEventMemoryOverhead::AddInternal(ObjectType object_type,
size_t count,
size_t allocated_size_in_bytes,
size_t resident_size_in_bytes) {
ObjectCountAndSize& count_and_size =
allocated_objects_[static_cast<uint32_t>(object_type)];
count_and_size.count += count;
count_and_size.allocated_size_in_bytes += allocated_size_in_bytes;
count_and_size.resident_size_in_bytes += resident_size_in_bytes;
}
void TraceEventMemoryOverhead::Add(ObjectType object_type,
size_t allocated_size_in_bytes) {
Add(object_type, allocated_size_in_bytes, allocated_size_in_bytes);
}
void TraceEventMemoryOverhead::Add(ObjectType object_type,
size_t allocated_size_in_bytes,
size_t resident_size_in_bytes) {
AddInternal(object_type, 1, allocated_size_in_bytes, resident_size_in_bytes);
}
void TraceEventMemoryOverhead::AddString(const std::string& str) {
Add(kStdString, EstimateMemoryUsage(str));
}
void TraceEventMemoryOverhead::AddRefCountedString(
const RefCountedString& str) {
Add(kOther, sizeof(RefCountedString));
AddString(str.data());
}
void TraceEventMemoryOverhead::AddValue(const Value& value) {
switch (value.type()) {
case Value::Type::NONE:
case Value::Type::BOOLEAN:
case Value::Type::INTEGER:
case Value::Type::DOUBLE:
Add(kBaseValue, sizeof(Value));
break;
case Value::Type::STRING: {
const Value* string_value = nullptr;
value.GetAsString(&string_value);
Add(kBaseValue, sizeof(Value));
AddString(string_value->GetString());
} break;
case Value::Type::BINARY: {
Add(kBaseValue, sizeof(Value) + value.GetBlob().size());
} break;
case Value::Type::DICTIONARY: {
const DictionaryValue* dictionary_value = nullptr;
value.GetAsDictionary(&dictionary_value);
Add(kBaseValue, sizeof(DictionaryValue));
for (DictionaryValue::Iterator it(*dictionary_value); !it.IsAtEnd();
it.Advance()) {
AddString(it.key());
AddValue(it.value());
}
} break;
case Value::Type::LIST: {
const ListValue* list_value = nullptr;
value.GetAsList(&list_value);
Add(kBaseValue, sizeof(ListValue));
for (const auto& v : *list_value)
AddValue(v);
} break;
default:
NOTREACHED();
}
}
void TraceEventMemoryOverhead::AddSelf() {
Add(kTraceEventMemoryOverhead, sizeof(*this));
}
size_t TraceEventMemoryOverhead::GetCount(ObjectType object_type) const {
CHECK(object_type < kLast);
return allocated_objects_[static_cast<uint32_t>(object_type)].count;
}
void TraceEventMemoryOverhead::Update(const TraceEventMemoryOverhead& other) {
for (uint32_t i = 0; i < kLast; i++) {
const ObjectCountAndSize& other_entry = other.allocated_objects_[i];
AddInternal(static_cast<ObjectType>(i), other_entry.count,
other_entry.allocated_size_in_bytes,
other_entry.resident_size_in_bytes);
}
}
void TraceEventMemoryOverhead::DumpInto(const char* base_name,
ProcessMemoryDump* pmd) const {
for (uint32_t i = 0; i < kLast; i++) {
const ObjectCountAndSize& count_and_size = allocated_objects_[i];
if (count_and_size.allocated_size_in_bytes == 0)
continue;
std::string dump_name = StringPrintf(
"%s/%s", base_name, ObjectTypeToString(static_cast<ObjectType>(i)));
MemoryAllocatorDump* mad = pmd->CreateAllocatorDump(dump_name);
mad->AddScalar(MemoryAllocatorDump::kNameSize,
MemoryAllocatorDump::kUnitsBytes,
count_and_size.allocated_size_in_bytes);
mad->AddScalar("resident_size", MemoryAllocatorDump::kUnitsBytes,
count_and_size.resident_size_in_bytes);
mad->AddScalar(MemoryAllocatorDump::kNameObjectCount,
MemoryAllocatorDump::kUnitsObjects, count_and_size.count);
}
}
} // namespace trace_event
} // namespace base

View file

@ -0,0 +1,96 @@
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TRACE_EVENT_TRACE_EVENT_MEMORY_OVERHEAD_H_
#define BASE_TRACE_EVENT_TRACE_EVENT_MEMORY_OVERHEAD_H_
#include <stddef.h>
#include <stdint.h>
#include <string>
#include <unordered_map>
#include "base/base_export.h"
#include "base/macros.h"
namespace base {
class RefCountedString;
class Value;
namespace trace_event {
class ProcessMemoryDump;
// Used to estimate the memory overhead of the tracing infrastructure.
class BASE_EXPORT TraceEventMemoryOverhead {
public:
enum ObjectType : uint32_t {
kOther = 0,
kTraceBuffer,
kTraceBufferChunk,
kTraceEvent,
kUnusedTraceEvent,
kTracedValue,
kConvertableToTraceFormat,
kHeapProfilerAllocationRegister,
kHeapProfilerTypeNameDeduplicator,
kHeapProfilerStackFrameDeduplicator,
kStdString,
kBaseValue,
kTraceEventMemoryOverhead,
kFrameMetrics,
kLast
};
TraceEventMemoryOverhead();
~TraceEventMemoryOverhead();
// Use this method to account the overhead of an object for which an estimate
// is known for both the allocated and resident memory.
void Add(ObjectType object_type,
size_t allocated_size_in_bytes,
size_t resident_size_in_bytes);
// Similar to Add() above, but assumes that
// |resident_size_in_bytes| == |allocated_size_in_bytes|.
void Add(ObjectType object_type, size_t allocated_size_in_bytes);
// Specialized profiling functions for commonly used object types.
void AddString(const std::string& str);
void AddValue(const Value& value);
void AddRefCountedString(const RefCountedString& str);
// Call this after all the Add* methods above to account the memory used by
// this TraceEventMemoryOverhead instance itself.
void AddSelf();
// Retrieves the count, that is, the count of Add*(|object_type|, ...) calls.
size_t GetCount(ObjectType object_type) const;
// Adds up and merges all the values from |other| to this instance.
void Update(const TraceEventMemoryOverhead& other);
void DumpInto(const char* base_name, ProcessMemoryDump* pmd) const;
private:
struct ObjectCountAndSize {
size_t count;
size_t allocated_size_in_bytes;
size_t resident_size_in_bytes;
};
ObjectCountAndSize allocated_objects_[ObjectType::kLast];
void AddInternal(ObjectType object_type,
size_t count,
size_t allocated_size_in_bytes,
size_t resident_size_in_bytes);
DISALLOW_COPY_AND_ASSIGN(TraceEventMemoryOverhead);
};
} // namespace trace_event
} // namespace base
#endif // BASE_TRACE_EVENT_TRACE_EVENT_MEMORY_OVERHEAD_H_

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,569 @@
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TRACE_EVENT_TRACE_LOG_H_
#define BASE_TRACE_EVENT_TRACE_LOG_H_
#include <stddef.h>
#include <stdint.h>
#include <atomic>
#include <memory>
#include <string>
#include <unordered_map>
#include <vector>
#include "base/atomicops.h"
#include "base/containers/stack.h"
#include "base/gtest_prod_util.h"
#include "base/macros.h"
#include "base/memory/scoped_refptr.h"
#include "base/single_thread_task_runner.h"
#include "base/time/time_override.h"
#include "base/trace_event/category_registry.h"
#include "base/trace_event/memory_dump_provider.h"
#include "base/trace_event/trace_config.h"
#include "base/trace_event/trace_event_impl.h"
#include "build/build_config.h"
namespace base {
class RefCountedString;
template <typename T>
class NoDestructor;
namespace trace_event {
struct TraceCategory;
class TraceBuffer;
class TraceBufferChunk;
class TraceEvent;
class TraceEventFilter;
class TraceEventMemoryOverhead;
struct BASE_EXPORT TraceLogStatus {
TraceLogStatus();
~TraceLogStatus();
uint32_t event_capacity;
uint32_t event_count;
};
class BASE_EXPORT TraceLog : public MemoryDumpProvider {
public:
// Argument passed to TraceLog::SetEnabled.
enum Mode : uint8_t {
// Enables normal tracing (recording trace events in the trace buffer).
RECORDING_MODE = 1 << 0,
// Trace events are enabled just for filtering but not for recording. Only
// event filters config of |trace_config| argument is used.
FILTERING_MODE = 1 << 1
};
static TraceLog* GetInstance();
// Retrieves a copy (for thread-safety) of the current TraceConfig.
TraceConfig GetCurrentTraceConfig() const;
// Initializes the thread-local event buffer, if not already initialized and
// if the current thread supports that (has a message loop).
void InitializeThreadLocalEventBufferIfSupported();
// See TraceConfig comments for details on how to control which categories
// will be traced. SetDisabled must be called distinctly for each mode that is
// enabled. If tracing has already been enabled for recording, category filter
// (enabled and disabled categories) will be merged into the current category
// filter. Enabling RECORDING_MODE does not enable filters. Trace event
// filters will be used only if FILTERING_MODE is set on |modes_to_enable|.
// Conversely to RECORDING_MODE, FILTERING_MODE doesn't support upgrading,
// i.e. filters can only be enabled if not previously enabled.
void SetEnabled(const TraceConfig& trace_config, uint8_t modes_to_enable);
// TODO(ssid): Remove the default SetEnabled and IsEnabled. They should take
// Mode as argument.
// Disables tracing for all categories for the specified |modes_to_disable|
// only. Only RECORDING_MODE is taken as default |modes_to_disable|.
void SetDisabled();
void SetDisabled(uint8_t modes_to_disable);
// Returns true if TraceLog is enabled on recording mode.
// Note: Returns false even if FILTERING_MODE is enabled.
bool IsEnabled() {
AutoLock lock(lock_);
return enabled_modes_ & RECORDING_MODE;
}
// Returns a bitmap of enabled modes from TraceLog::Mode.
uint8_t enabled_modes() { return enabled_modes_; }
// The number of times we have begun recording traces. If tracing is off,
// returns -1. If tracing is on, then it returns the number of times we have
// recorded a trace. By watching for this number to increment, you can
// passively discover when a new trace has begun. This is then used to
// implement the TRACE_EVENT_IS_NEW_TRACE() primitive.
int GetNumTracesRecorded();
#if defined(OS_ANDROID)
void StartATrace();
void StopATrace();
void AddClockSyncMetadataEvent();
#endif
// Enabled state listeners give a callback when tracing is enabled or
// disabled. This can be used to tie into other library's tracing systems
// on-demand.
class BASE_EXPORT EnabledStateObserver {
public:
virtual ~EnabledStateObserver() = default;
// Called just after the tracing system becomes enabled, outside of the
// |lock_|. TraceLog::IsEnabled() is true at this point.
virtual void OnTraceLogEnabled() = 0;
// Called just after the tracing system disables, outside of the |lock_|.
// TraceLog::IsEnabled() is false at this point.
virtual void OnTraceLogDisabled() = 0;
};
// Adds an observer. Cannot be called from within the observer callback.
void AddEnabledStateObserver(EnabledStateObserver* listener);
// Removes an observer. Cannot be called from within the observer callback.
void RemoveEnabledStateObserver(EnabledStateObserver* listener);
// Adds an observer that is owned by TraceLog. This is useful for agents that
// implement tracing feature that needs to stay alive as long as TraceLog
// does.
void AddOwnedEnabledStateObserver(
std::unique_ptr<EnabledStateObserver> listener);
bool HasEnabledStateObserver(EnabledStateObserver* listener) const;
// Asynchronous enabled state listeners. When tracing is enabled or disabled,
// for each observer, a task for invoking its appropriate callback is posted
// to the thread from which AddAsyncEnabledStateObserver() was called. This
// allows the observer to be safely destroyed, provided that it happens on the
// same thread that invoked AddAsyncEnabledStateObserver().
class BASE_EXPORT AsyncEnabledStateObserver {
public:
virtual ~AsyncEnabledStateObserver() = default;
// Posted just after the tracing system becomes enabled, outside |lock_|.
// TraceLog::IsEnabled() is true at this point.
virtual void OnTraceLogEnabled() = 0;
// Posted just after the tracing system becomes disabled, outside |lock_|.
// TraceLog::IsEnabled() is false at this point.
virtual void OnTraceLogDisabled() = 0;
};
// TODO(oysteine): This API originally needed to use WeakPtrs as the observer
// list was copied under the global trace lock, but iterated over outside of
// that lock so that observers could add tracing. The list is now protected by
// its own lock, so this can be changed to a raw ptr.
void AddAsyncEnabledStateObserver(
WeakPtr<AsyncEnabledStateObserver> listener);
void RemoveAsyncEnabledStateObserver(AsyncEnabledStateObserver* listener);
bool HasAsyncEnabledStateObserver(AsyncEnabledStateObserver* listener) const;
TraceLogStatus GetStatus() const;
bool BufferIsFull() const;
// Computes an estimate of the size of the TraceLog including all the retained
// objects.
void EstimateTraceMemoryOverhead(TraceEventMemoryOverhead* overhead);
void SetArgumentFilterPredicate(
const ArgumentFilterPredicate& argument_filter_predicate);
ArgumentFilterPredicate GetArgumentFilterPredicate() const;
void SetMetadataFilterPredicate(
const MetadataFilterPredicate& metadata_filter_predicate);
MetadataFilterPredicate GetMetadataFilterPredicate() const;
// Flush all collected events to the given output callback. The callback will
// be called one or more times either synchronously or asynchronously from
// the current thread with IPC-bite-size chunks. The string format is
// undefined. Use TraceResultBuffer to convert one or more trace strings to
// JSON. The callback can be null if the caller doesn't want any data.
// Due to the implementation of thread-local buffers, flush can't be
// done when tracing is enabled. If called when tracing is enabled, the
// callback will be called directly with (empty_string, false) to indicate
// the end of this unsuccessful flush. Flush does the serialization
// on the same thread if the caller doesn't set use_worker_thread explicitly.
using OutputCallback =
base::RepeatingCallback<void(const scoped_refptr<base::RefCountedString>&,
bool has_more_events)>;
void Flush(const OutputCallback& cb, bool use_worker_thread = false);
// Cancels tracing and discards collected data.
void CancelTracing(const OutputCallback& cb);
using AddTraceEventOverrideFunction = void (*)(TraceEvent*,
bool thread_will_flush,
TraceEventHandle* handle);
using OnFlushFunction = void (*)();
using UpdateDurationFunction =
void (*)(const unsigned char* category_group_enabled,
const char* name,
TraceEventHandle handle,
int thread_id,
bool explicit_timestamps,
const TimeTicks& now,
const ThreadTicks& thread_now,
ThreadInstructionCount thread_instruction_now);
// The callbacks will be called up until the point where the flush is
// finished, i.e. must be callable until OutputCallback is called with
// has_more_events==false.
void SetAddTraceEventOverrides(
const AddTraceEventOverrideFunction& add_event_override,
const OnFlushFunction& on_flush_callback,
const UpdateDurationFunction& update_duration_callback);
// Called by TRACE_EVENT* macros, don't call this directly.
// The name parameter is a category group for example:
// TRACE_EVENT0("renderer,webkit", "WebViewImpl::HandleInputEvent")
static const unsigned char* GetCategoryGroupEnabled(const char* name);
static const char* GetCategoryGroupName(
const unsigned char* category_group_enabled);
static constexpr const unsigned char* GetBuiltinCategoryEnabled(
const char* name) {
TraceCategory* builtin_category =
CategoryRegistry::GetBuiltinCategoryByName(name);
if (builtin_category)
return builtin_category->state_ptr();
return nullptr;
}
// Called by TRACE_EVENT* macros, don't call this directly.
// If |copy| is set, |name|, |arg_name1| and |arg_name2| will be deep copied
// into the event; see "Memory scoping note" and TRACE_EVENT_COPY_XXX above.
bool ShouldAddAfterUpdatingState(char phase,
const unsigned char* category_group_enabled,
const char* name,
unsigned long long id,
int thread_id,
TraceArguments* args);
TraceEventHandle AddTraceEvent(char phase,
const unsigned char* category_group_enabled,
const char* name,
const char* scope,
unsigned long long id,
TraceArguments* args,
unsigned int flags);
TraceEventHandle AddTraceEventWithBindId(
char phase,
const unsigned char* category_group_enabled,
const char* name,
const char* scope,
unsigned long long id,
unsigned long long bind_id,
TraceArguments* args,
unsigned int flags);
TraceEventHandle AddTraceEventWithProcessId(
char phase,
const unsigned char* category_group_enabled,
const char* name,
const char* scope,
unsigned long long id,
int process_id,
TraceArguments* args,
unsigned int flags);
TraceEventHandle AddTraceEventWithThreadIdAndTimestamp(
char phase,
const unsigned char* category_group_enabled,
const char* name,
const char* scope,
unsigned long long id,
int thread_id,
const TimeTicks& timestamp,
TraceArguments* args,
unsigned int flags);
TraceEventHandle AddTraceEventWithThreadIdAndTimestamp(
char phase,
const unsigned char* category_group_enabled,
const char* name,
const char* scope,
unsigned long long id,
unsigned long long bind_id,
int thread_id,
const TimeTicks& timestamp,
TraceArguments* args,
unsigned int flags);
// Adds a metadata event that will be written when the trace log is flushed.
void AddMetadataEvent(const unsigned char* category_group_enabled,
const char* name,
TraceArguments* args,
unsigned int flags);
void UpdateTraceEventDuration(const unsigned char* category_group_enabled,
const char* name,
TraceEventHandle handle);
void UpdateTraceEventDurationExplicit(
const unsigned char* category_group_enabled,
const char* name,
TraceEventHandle handle,
int thread_id,
bool explicit_timestamps,
const TimeTicks& now,
const ThreadTicks& thread_now,
ThreadInstructionCount thread_instruction_now);
void EndFilteredEvent(const unsigned char* category_group_enabled,
const char* name,
TraceEventHandle handle);
int process_id() const { return process_id_; }
const std::string& process_name() const { return process_name_; }
uint64_t MangleEventId(uint64_t id);
// Exposed for unittesting:
// Testing factory for TraceEventFilter.
typedef std::unique_ptr<TraceEventFilter> (*FilterFactoryForTesting)(
const std::string& /* predicate_name */);
void SetFilterFactoryForTesting(FilterFactoryForTesting factory) {
filter_factory_for_testing_ = factory;
}
// Allows clearing up our singleton instance.
static void ResetForTesting();
// Allow tests to inspect TraceEvents.
TraceEvent* GetEventByHandle(TraceEventHandle handle);
void SetProcessID(int process_id);
// Process sort indices, if set, override the order of a process will appear
// relative to other processes in the trace viewer. Processes are sorted first
// on their sort index, ascending, then by their name, and then tid.
void SetProcessSortIndex(int sort_index);
// Sets the name of the process.
void set_process_name(const std::string& process_name) {
AutoLock lock(lock_);
process_name_ = process_name;
}
bool IsProcessNameEmpty() const { return process_name_.empty(); }
// Processes can have labels in addition to their names. Use labels, for
// instance, to list out the web page titles that a process is handling.
void UpdateProcessLabel(int label_id, const std::string& current_label);
void RemoveProcessLabel(int label_id);
// Thread sort indices, if set, override the order of a thread will appear
// within its process in the trace viewer. Threads are sorted first on their
// sort index, ascending, then by their name, and then tid.
void SetThreadSortIndex(PlatformThreadId thread_id, int sort_index);
// Allow setting an offset between the current TimeTicks time and the time
// that should be reported.
void SetTimeOffset(TimeDelta offset);
size_t GetObserverCountForTest() const;
// Call this method if the current thread may block the message loop to
// prevent the thread from using the thread-local buffer because the thread
// may not handle the flush request in time causing lost of unflushed events.
void SetCurrentThreadBlocksMessageLoop();
#if defined(OS_WIN)
// This function is called by the ETW exporting module whenever the ETW
// keyword (flags) changes. This keyword indicates which categories should be
// exported, so whenever it changes, we adjust accordingly.
void UpdateETWCategoryGroupEnabledFlags();
#endif
// Replaces |logged_events_| with a new TraceBuffer for testing.
void SetTraceBufferForTesting(std::unique_ptr<TraceBuffer> trace_buffer);
private:
typedef unsigned int InternalTraceOptions;
FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture,
TraceBufferRingBufferGetReturnChunk);
FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture,
TraceBufferRingBufferHalfIteration);
FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture,
TraceBufferRingBufferFullIteration);
FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture, TraceBufferVectorReportFull);
FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture,
ConvertTraceConfigToInternalOptions);
FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture,
TraceRecordAsMuchAsPossibleMode);
FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture, ConfigTraceBufferLimit);
friend class base::NoDestructor<TraceLog>;
// MemoryDumpProvider implementation.
bool OnMemoryDump(const MemoryDumpArgs& args,
ProcessMemoryDump* pmd) override;
// Enable/disable each category group based on the current mode_,
// category_filter_ and event_filters_enabled_.
// Enable the category group in the recording mode if category_filter_ matches
// the category group, is not null. Enable category for filtering if any
// filter in event_filters_enabled_ enables it.
void UpdateCategoryRegistry();
void UpdateCategoryState(TraceCategory* category);
void CreateFiltersForTraceConfig();
InternalTraceOptions GetInternalOptionsFromTraceConfig(
const TraceConfig& config);
class ThreadLocalEventBuffer;
class OptionalAutoLock;
struct RegisteredAsyncObserver;
TraceLog();
~TraceLog() override;
void AddMetadataEventsWhileLocked();
template <typename T>
void AddMetadataEventWhileLocked(int thread_id,
const char* metadata_name,
const char* arg_name,
const T& value);
InternalTraceOptions trace_options() const {
return static_cast<InternalTraceOptions>(
subtle::NoBarrier_Load(&trace_options_));
}
TraceBuffer* trace_buffer() const { return logged_events_.get(); }
TraceBuffer* CreateTraceBuffer();
std::string EventToConsoleMessage(unsigned char phase,
const TimeTicks& timestamp,
TraceEvent* trace_event);
TraceEvent* AddEventToThreadSharedChunkWhileLocked(TraceEventHandle* handle,
bool check_buffer_is_full);
void CheckIfBufferIsFullWhileLocked();
void SetDisabledWhileLocked(uint8_t modes);
TraceEvent* GetEventByHandleInternal(TraceEventHandle handle,
OptionalAutoLock* lock);
void FlushInternal(const OutputCallback& cb,
bool use_worker_thread,
bool discard_events);
// |generation| is used in the following callbacks to check if the callback
// is called for the flush of the current |logged_events_|.
void FlushCurrentThread(int generation, bool discard_events);
// Usually it runs on a different thread.
static void ConvertTraceEventsToTraceFormat(
std::unique_ptr<TraceBuffer> logged_events,
const TraceLog::OutputCallback& flush_output_callback,
const ArgumentFilterPredicate& argument_filter_predicate);
void FinishFlush(int generation, bool discard_events);
void OnFlushTimeout(int generation, bool discard_events);
int generation() const {
return static_cast<int>(subtle::NoBarrier_Load(&generation_));
}
bool CheckGeneration(int generation) const {
return generation == this->generation();
}
void UseNextTraceBuffer();
TimeTicks OffsetNow() const {
// This should be TRACE_TIME_TICKS_NOW but include order makes that hard.
return OffsetTimestamp(base::subtle::TimeTicksNowIgnoringOverride());
}
TimeTicks OffsetTimestamp(const TimeTicks& timestamp) const {
return timestamp - time_offset_;
}
// Internal representation of trace options since we store the currently used
// trace option as an AtomicWord.
static const InternalTraceOptions kInternalNone;
static const InternalTraceOptions kInternalRecordUntilFull;
static const InternalTraceOptions kInternalRecordContinuously;
static const InternalTraceOptions kInternalEchoToConsole;
static const InternalTraceOptions kInternalRecordAsMuchAsPossible;
static const InternalTraceOptions kInternalEnableArgumentFilter;
// This lock protects TraceLog member accesses (except for members protected
// by thread_info_lock_) from arbitrary threads.
mutable Lock lock_;
// This lock protects accesses to thread_names_, thread_event_start_times_
// and thread_colors_.
Lock thread_info_lock_;
uint8_t enabled_modes_; // See TraceLog::Mode.
int num_traces_recorded_;
std::unique_ptr<TraceBuffer> logged_events_;
std::vector<std::unique_ptr<TraceEvent>> metadata_events_;
// The lock protects observers access.
mutable Lock observers_lock_;
bool dispatching_to_observers_ = false;
std::vector<EnabledStateObserver*> enabled_state_observers_;
std::map<AsyncEnabledStateObserver*, RegisteredAsyncObserver>
async_observers_;
// Manages ownership of the owned observers. The owned observers will also be
// added to |enabled_state_observers_|.
std::vector<std::unique_ptr<EnabledStateObserver>>
owned_enabled_state_observer_copy_;
std::string process_name_;
std::unordered_map<int, std::string> process_labels_;
int process_sort_index_;
std::unordered_map<int, int> thread_sort_indices_;
std::unordered_map<int, std::string> thread_names_;
base::Time process_creation_time_;
// The following two maps are used only when ECHO_TO_CONSOLE.
std::unordered_map<int, base::stack<TimeTicks>> thread_event_start_times_;
std::unordered_map<std::string, int> thread_colors_;
TimeTicks buffer_limit_reached_timestamp_;
// XORed with TraceID to make it unlikely to collide with other processes.
unsigned long long process_id_hash_;
int process_id_;
TimeDelta time_offset_;
subtle::AtomicWord /* Options */ trace_options_;
TraceConfig trace_config_;
TraceConfig::EventFilters enabled_event_filters_;
ThreadLocalPointer<ThreadLocalEventBuffer> thread_local_event_buffer_;
ThreadLocalBoolean thread_blocks_message_loop_;
ThreadLocalBoolean thread_is_in_trace_event_;
// Contains task runners for the threads that have had at least one event
// added into the local event buffer.
std::unordered_map<int, scoped_refptr<SingleThreadTaskRunner>>
thread_task_runners_;
// For events which can't be added into the thread local buffer, e.g. events
// from threads without a message loop.
std::unique_ptr<TraceBufferChunk> thread_shared_chunk_;
size_t thread_shared_chunk_index_;
// Set when asynchronous Flush is in progress.
OutputCallback flush_output_callback_;
scoped_refptr<SequencedTaskRunner> flush_task_runner_;
ArgumentFilterPredicate argument_filter_predicate_;
MetadataFilterPredicate metadata_filter_predicate_;
subtle::AtomicWord generation_;
bool use_worker_thread_;
std::atomic<AddTraceEventOverrideFunction> add_trace_event_override_{nullptr};
std::atomic<OnFlushFunction> on_flush_override_{nullptr};
std::atomic<UpdateDurationFunction> update_duration_override_{nullptr};
FilterFactoryForTesting filter_factory_for_testing_;
DISALLOW_COPY_AND_ASSIGN(TraceLog);
};
} // namespace trace_event
} // namespace base
#endif // BASE_TRACE_EVENT_TRACE_LOG_H_

View file

@ -0,0 +1,26 @@
// Copyright (c) 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/trace_event/trace_log.h"
namespace base {
namespace trace_event {
// Constant used by TraceLog's internal implementation of trace_option.
const TraceLog::InternalTraceOptions
TraceLog::kInternalNone = 0;
const TraceLog::InternalTraceOptions
TraceLog::kInternalRecordUntilFull = 1 << 0;
const TraceLog::InternalTraceOptions
TraceLog::kInternalRecordContinuously = 1 << 1;
// 1 << 2 is reserved for the DEPRECATED kInternalEnableSampling. DO NOT USE.
const TraceLog::InternalTraceOptions
TraceLog::kInternalEchoToConsole = 1 << 3;
const TraceLog::InternalTraceOptions
TraceLog::kInternalRecordAsMuchAsPossible = 1 << 4;
const TraceLog::InternalTraceOptions
TraceLog::kInternalEnableArgumentFilter = 1 << 5;
} // namespace trace_event
} // namespace base

View file

@ -0,0 +1,638 @@
// Copyright (c) 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/trace_event/traced_value.h"
#include <stdint.h>
#include <atomic>
#include <utility>
#include "base/bits.h"
#include "base/containers/circular_deque.h"
#include "base/json/json_writer.h"
#include "base/json/string_escape.h"
#include "base/memory/ptr_util.h"
#include "base/trace_event/trace_event.h"
#include "base/trace_event/trace_event_impl.h"
#include "base/trace_event/trace_event_memory_overhead.h"
#include "base/trace_event/trace_log.h"
#include "base/values.h"
namespace base {
namespace trace_event {
namespace {
const char kTypeStartDict = '{';
const char kTypeEndDict = '}';
const char kTypeStartArray = '[';
const char kTypeEndArray = ']';
const char kTypeBool = 'b';
const char kTypeInt = 'i';
const char kTypeDouble = 'd';
const char kTypeString = 's';
const char kTypeCStr = '*'; // only used for key names
std::atomic<TracedValue::WriterFactoryCallback> g_writer_factory_callback;
#ifndef NDEBUG
const bool kStackTypeDict = false;
const bool kStackTypeArray = true;
#define DCHECK_CURRENT_CONTAINER_IS(x) DCHECK_EQ(x, nesting_stack_.back())
#define DCHECK_CONTAINER_STACK_DEPTH_EQ(x) DCHECK_EQ(x, nesting_stack_.size())
#define DEBUG_PUSH_CONTAINER(x) nesting_stack_.push_back(x)
#define DEBUG_POP_CONTAINER() nesting_stack_.pop_back()
#else
#define DCHECK_CURRENT_CONTAINER_IS(x) \
do { \
} while (0)
#define DCHECK_CONTAINER_STACK_DEPTH_EQ(x) \
do { \
} while (0)
#define DEBUG_PUSH_CONTAINER(x) \
do { \
} while (0)
#define DEBUG_POP_CONTAINER() \
do { \
} while (0)
#endif
inline void WriteKeyNameAsRawPtr(Pickle& pickle, const char* ptr) {
pickle.WriteBytes(&kTypeCStr, 1);
pickle.WriteUInt64(static_cast<uint64_t>(reinterpret_cast<uintptr_t>(ptr)));
}
inline void WriteKeyNameWithCopy(Pickle& pickle, base::StringPiece str) {
pickle.WriteBytes(&kTypeString, 1);
pickle.WriteString(str);
}
std::string ReadKeyName(PickleIterator& pickle_iterator) {
const char* type = nullptr;
bool res = pickle_iterator.ReadBytes(&type, 1);
std::string key_name;
if (res && *type == kTypeCStr) {
uint64_t ptr_value = 0;
res = pickle_iterator.ReadUInt64(&ptr_value);
key_name = reinterpret_cast<const char*>(static_cast<uintptr_t>(ptr_value));
} else if (res && *type == kTypeString) {
res = pickle_iterator.ReadString(&key_name);
}
DCHECK(res);
return key_name;
}
class PickleWriter final : public TracedValue::Writer {
public:
explicit PickleWriter(size_t capacity) {
if (capacity) {
pickle_.Reserve(capacity);
}
}
bool IsPickleWriter() const override { return true; }
bool IsProtoWriter() const override { return false; }
void SetInteger(const char* name, int value) override {
pickle_.WriteBytes(&kTypeInt, 1);
pickle_.WriteInt(value);
WriteKeyNameAsRawPtr(pickle_, name);
}
void SetIntegerWithCopiedName(base::StringPiece name, int value) override {
pickle_.WriteBytes(&kTypeInt, 1);
pickle_.WriteInt(value);
WriteKeyNameWithCopy(pickle_, name);
}
void SetDouble(const char* name, double value) override {
pickle_.WriteBytes(&kTypeDouble, 1);
pickle_.WriteDouble(value);
WriteKeyNameAsRawPtr(pickle_, name);
}
void SetDoubleWithCopiedName(base::StringPiece name, double value) override {
pickle_.WriteBytes(&kTypeDouble, 1);
pickle_.WriteDouble(value);
WriteKeyNameWithCopy(pickle_, name);
}
void SetBoolean(const char* name, bool value) override {
pickle_.WriteBytes(&kTypeBool, 1);
pickle_.WriteBool(value);
WriteKeyNameAsRawPtr(pickle_, name);
}
void SetBooleanWithCopiedName(base::StringPiece name, bool value) override {
pickle_.WriteBytes(&kTypeBool, 1);
pickle_.WriteBool(value);
WriteKeyNameWithCopy(pickle_, name);
}
void SetString(const char* name, base::StringPiece value) override {
pickle_.WriteBytes(&kTypeString, 1);
pickle_.WriteString(value);
WriteKeyNameAsRawPtr(pickle_, name);
}
void SetStringWithCopiedName(base::StringPiece name,
base::StringPiece value) override {
pickle_.WriteBytes(&kTypeString, 1);
pickle_.WriteString(value);
WriteKeyNameWithCopy(pickle_, name);
}
void SetValue(const char* name, Writer* value) override {
DCHECK(value->IsPickleWriter());
const PickleWriter* pickle_writer = static_cast<const PickleWriter*>(value);
BeginDictionary(name);
pickle_.WriteBytes(pickle_writer->pickle_.payload(),
static_cast<int>(pickle_writer->pickle_.payload_size()));
EndDictionary();
}
void SetValueWithCopiedName(base::StringPiece name, Writer* value) override {
DCHECK(value->IsPickleWriter());
const PickleWriter* pickle_writer = static_cast<const PickleWriter*>(value);
BeginDictionaryWithCopiedName(name);
pickle_.WriteBytes(pickle_writer->pickle_.payload(),
static_cast<int>(pickle_writer->pickle_.payload_size()));
EndDictionary();
}
void BeginArray() override { pickle_.WriteBytes(&kTypeStartArray, 1); }
void BeginDictionary() override { pickle_.WriteBytes(&kTypeStartDict, 1); }
void BeginDictionary(const char* name) override {
pickle_.WriteBytes(&kTypeStartDict, 1);
WriteKeyNameAsRawPtr(pickle_, name);
}
void BeginDictionaryWithCopiedName(base::StringPiece name) override {
pickle_.WriteBytes(&kTypeStartDict, 1);
WriteKeyNameWithCopy(pickle_, name);
}
void BeginArray(const char* name) override {
pickle_.WriteBytes(&kTypeStartArray, 1);
WriteKeyNameAsRawPtr(pickle_, name);
}
void BeginArrayWithCopiedName(base::StringPiece name) override {
pickle_.WriteBytes(&kTypeStartArray, 1);
WriteKeyNameWithCopy(pickle_, name);
}
void EndDictionary() override { pickle_.WriteBytes(&kTypeEndDict, 1); }
void EndArray() override { pickle_.WriteBytes(&kTypeEndArray, 1); }
void AppendInteger(int value) override {
pickle_.WriteBytes(&kTypeInt, 1);
pickle_.WriteInt(value);
}
void AppendDouble(double value) override {
pickle_.WriteBytes(&kTypeDouble, 1);
pickle_.WriteDouble(value);
}
void AppendBoolean(bool value) override {
pickle_.WriteBytes(&kTypeBool, 1);
pickle_.WriteBool(value);
}
void AppendString(base::StringPiece value) override {
pickle_.WriteBytes(&kTypeString, 1);
pickle_.WriteString(value);
}
void AppendAsTraceFormat(std::string* out) const override {
struct State {
enum Type { kTypeDict, kTypeArray };
Type type;
bool needs_comma;
};
auto maybe_append_key_name = [](State current_state, PickleIterator* it,
std::string* out) {
if (current_state.type == State::kTypeDict) {
EscapeJSONString(ReadKeyName(*it), true, out);
out->append(":");
}
};
base::circular_deque<State> state_stack;
out->append("{");
state_stack.push_back({State::kTypeDict});
PickleIterator it(pickle_);
for (const char* type; it.ReadBytes(&type, 1);) {
switch (*type) {
case kTypeEndDict:
out->append("}");
state_stack.pop_back();
continue;
case kTypeEndArray:
out->append("]");
state_stack.pop_back();
continue;
}
// Use an index so it will stay valid across resizes.
size_t current_state_index = state_stack.size() - 1;
if (state_stack[current_state_index].needs_comma) {
out->append(",");
}
switch (*type) {
case kTypeStartDict: {
maybe_append_key_name(state_stack[current_state_index], &it, out);
out->append("{");
state_stack.push_back({State::kTypeDict});
break;
}
case kTypeStartArray: {
maybe_append_key_name(state_stack[current_state_index], &it, out);
out->append("[");
state_stack.push_back({State::kTypeArray});
break;
}
case kTypeBool: {
TraceEvent::TraceValue json_value;
CHECK(it.ReadBool(&json_value.as_bool));
maybe_append_key_name(state_stack[current_state_index], &it, out);
json_value.AppendAsJSON(TRACE_VALUE_TYPE_BOOL, out);
break;
}
case kTypeInt: {
int value;
CHECK(it.ReadInt(&value));
maybe_append_key_name(state_stack[current_state_index], &it, out);
TraceEvent::TraceValue json_value;
json_value.as_int = value;
json_value.AppendAsJSON(TRACE_VALUE_TYPE_INT, out);
break;
}
case kTypeDouble: {
TraceEvent::TraceValue json_value;
CHECK(it.ReadDouble(&json_value.as_double));
maybe_append_key_name(state_stack[current_state_index], &it, out);
json_value.AppendAsJSON(TRACE_VALUE_TYPE_DOUBLE, out);
break;
}
case kTypeString: {
std::string value;
CHECK(it.ReadString(&value));
maybe_append_key_name(state_stack[current_state_index], &it, out);
TraceEvent::TraceValue json_value;
json_value.as_string = value.c_str();
json_value.AppendAsJSON(TRACE_VALUE_TYPE_STRING, out);
break;
}
default:
NOTREACHED();
}
state_stack[current_state_index].needs_comma = true;
}
out->append("}");
state_stack.pop_back();
DCHECK(state_stack.empty());
}
void EstimateTraceMemoryOverhead(
TraceEventMemoryOverhead* overhead) override {
overhead->Add(TraceEventMemoryOverhead::kTracedValue,
/* allocated size */
pickle_.GetTotalAllocatedSize(),
/* resident size */
pickle_.size());
}
std::unique_ptr<base::Value> ToBaseValue() const {
base::Value root(base::Value::Type::DICTIONARY);
Value* cur_dict = &root;
Value* cur_list = nullptr;
std::vector<Value*> stack;
PickleIterator it(pickle_);
const char* type;
while (it.ReadBytes(&type, 1)) {
DCHECK((cur_dict && !cur_list) || (cur_list && !cur_dict));
switch (*type) {
case kTypeStartDict: {
base::Value new_dict(base::Value::Type::DICTIONARY);
if (cur_dict) {
stack.push_back(cur_dict);
cur_dict = cur_dict->SetKey(ReadKeyName(it), std::move(new_dict));
} else {
cur_list->Append(std::move(new_dict));
// |new_dict| is invalidated at this point, so |cur_dict| needs to
// be reset.
cur_dict = &cur_list->GetList().back();
stack.push_back(cur_list);
cur_list = nullptr;
}
} break;
case kTypeEndArray:
case kTypeEndDict: {
if (stack.back()->is_dict()) {
cur_dict = stack.back();
cur_list = nullptr;
} else if (stack.back()->is_list()) {
cur_list = stack.back();
cur_dict = nullptr;
}
stack.pop_back();
} break;
case kTypeStartArray: {
base::Value new_list(base::Value::Type::LIST);
if (cur_dict) {
stack.push_back(cur_dict);
cur_list = cur_dict->SetKey(ReadKeyName(it), std::move(new_list));
cur_dict = nullptr;
} else {
cur_list->Append(std::move(new_list));
stack.push_back(cur_list);
// |cur_list| is invalidated at this point by the Append, so it
// needs to be reset.
cur_list = &cur_list->GetList().back();
}
} break;
case kTypeBool: {
bool value;
CHECK(it.ReadBool(&value));
if (cur_dict) {
cur_dict->SetBoolKey(ReadKeyName(it), value);
} else {
cur_list->Append(value);
}
} break;
case kTypeInt: {
int value;
CHECK(it.ReadInt(&value));
if (cur_dict) {
cur_dict->SetIntKey(ReadKeyName(it), value);
} else {
cur_list->Append(value);
}
} break;
case kTypeDouble: {
TraceEvent::TraceValue trace_value;
CHECK(it.ReadDouble(&trace_value.as_double));
Value base_value;
if (!std::isfinite(trace_value.as_double)) {
// base::Value doesn't support nan and infinity values. Use strings
// for them instead. This follows the same convention in
// AppendAsTraceFormat(), supported by TraceValue::Append*().
std::string value_string;
trace_value.AppendAsString(TRACE_VALUE_TYPE_DOUBLE, &value_string);
base_value = Value(value_string);
} else {
base_value = Value(trace_value.as_double);
}
if (cur_dict) {
cur_dict->SetKey(ReadKeyName(it), std::move(base_value));
} else {
cur_list->Append(std::move(base_value));
}
} break;
case kTypeString: {
std::string value;
CHECK(it.ReadString(&value));
if (cur_dict) {
cur_dict->SetStringKey(ReadKeyName(it), std::move(value));
} else {
cur_list->Append(std::move(value));
}
} break;
default:
NOTREACHED();
}
}
DCHECK(stack.empty());
return base::Value::ToUniquePtrValue(std::move(root));
}
private:
Pickle pickle_;
};
std::unique_ptr<TracedValue::Writer> CreateWriter(size_t capacity) {
TracedValue::WriterFactoryCallback callback =
g_writer_factory_callback.load(std::memory_order_relaxed);
if (callback) {
return callback(capacity);
}
return std::make_unique<PickleWriter>(capacity);
}
} // namespace
bool TracedValue::Writer::AppendToProto(ProtoAppender* appender) {
return false;
}
// static
void TracedValue::SetWriterFactoryCallback(WriterFactoryCallback callback) {
g_writer_factory_callback.store(callback);
}
TracedValue::TracedValue(size_t capacity)
: TracedValue(capacity, /*forced_json*/ false) {}
TracedValue::TracedValue(size_t capacity, bool forced_json) {
DEBUG_PUSH_CONTAINER(kStackTypeDict);
writer_ = forced_json ? std::make_unique<PickleWriter>(capacity)
: CreateWriter(capacity);
}
TracedValue::~TracedValue() {
DCHECK_CURRENT_CONTAINER_IS(kStackTypeDict);
DEBUG_POP_CONTAINER();
DCHECK_CONTAINER_STACK_DEPTH_EQ(0u);
}
void TracedValue::SetInteger(const char* name, int value) {
DCHECK_CURRENT_CONTAINER_IS(kStackTypeDict);
writer_->SetInteger(name, value);
}
void TracedValue::SetIntegerWithCopiedName(base::StringPiece name, int value) {
DCHECK_CURRENT_CONTAINER_IS(kStackTypeDict);
writer_->SetIntegerWithCopiedName(name, value);
}
void TracedValue::SetDouble(const char* name, double value) {
DCHECK_CURRENT_CONTAINER_IS(kStackTypeDict);
writer_->SetDouble(name, value);
}
void TracedValue::SetDoubleWithCopiedName(base::StringPiece name,
double value) {
DCHECK_CURRENT_CONTAINER_IS(kStackTypeDict);
writer_->SetDoubleWithCopiedName(name, value);
}
void TracedValue::SetBoolean(const char* name, bool value) {
DCHECK_CURRENT_CONTAINER_IS(kStackTypeDict);
writer_->SetBoolean(name, value);
}
void TracedValue::SetBooleanWithCopiedName(base::StringPiece name, bool value) {
DCHECK_CURRENT_CONTAINER_IS(kStackTypeDict);
writer_->SetBooleanWithCopiedName(name, value);
}
void TracedValue::SetString(const char* name, base::StringPiece value) {
DCHECK_CURRENT_CONTAINER_IS(kStackTypeDict);
writer_->SetString(name, value);
}
void TracedValue::SetStringWithCopiedName(base::StringPiece name,
base::StringPiece value) {
DCHECK_CURRENT_CONTAINER_IS(kStackTypeDict);
writer_->SetStringWithCopiedName(name, value);
}
void TracedValue::SetValue(const char* name, TracedValue* value) {
DCHECK_CURRENT_CONTAINER_IS(kStackTypeDict);
writer_->SetValue(name, value->writer_.get());
}
void TracedValue::SetValueWithCopiedName(base::StringPiece name,
TracedValue* value) {
DCHECK_CURRENT_CONTAINER_IS(kStackTypeDict);
writer_->SetValueWithCopiedName(name, value->writer_.get());
}
void TracedValue::BeginDictionary(const char* name) {
DCHECK_CURRENT_CONTAINER_IS(kStackTypeDict);
DEBUG_PUSH_CONTAINER(kStackTypeDict);
writer_->BeginDictionary(name);
}
void TracedValue::BeginDictionaryWithCopiedName(base::StringPiece name) {
DCHECK_CURRENT_CONTAINER_IS(kStackTypeDict);
DEBUG_PUSH_CONTAINER(kStackTypeDict);
writer_->BeginDictionaryWithCopiedName(name);
}
void TracedValue::BeginArray(const char* name) {
DCHECK_CURRENT_CONTAINER_IS(kStackTypeDict);
DEBUG_PUSH_CONTAINER(kStackTypeArray);
writer_->BeginArray(name);
}
void TracedValue::BeginArrayWithCopiedName(base::StringPiece name) {
DCHECK_CURRENT_CONTAINER_IS(kStackTypeDict);
DEBUG_PUSH_CONTAINER(kStackTypeArray);
writer_->BeginArrayWithCopiedName(name);
}
void TracedValue::AppendInteger(int value) {
DCHECK_CURRENT_CONTAINER_IS(kStackTypeArray);
writer_->AppendInteger(value);
}
void TracedValue::AppendDouble(double value) {
DCHECK_CURRENT_CONTAINER_IS(kStackTypeArray);
writer_->AppendDouble(value);
}
void TracedValue::AppendBoolean(bool value) {
DCHECK_CURRENT_CONTAINER_IS(kStackTypeArray);
writer_->AppendBoolean(value);
}
void TracedValue::AppendString(base::StringPiece value) {
DCHECK_CURRENT_CONTAINER_IS(kStackTypeArray);
writer_->AppendString(value);
}
void TracedValue::BeginArray() {
DCHECK_CURRENT_CONTAINER_IS(kStackTypeArray);
DEBUG_PUSH_CONTAINER(kStackTypeArray);
writer_->BeginArray();
}
void TracedValue::BeginDictionary() {
DCHECK_CURRENT_CONTAINER_IS(kStackTypeArray);
DEBUG_PUSH_CONTAINER(kStackTypeDict);
writer_->BeginDictionary();
}
void TracedValue::EndArray() {
DCHECK_CURRENT_CONTAINER_IS(kStackTypeArray);
DEBUG_POP_CONTAINER();
writer_->EndArray();
}
void TracedValue::EndDictionary() {
DCHECK_CURRENT_CONTAINER_IS(kStackTypeDict);
DEBUG_POP_CONTAINER();
writer_->EndDictionary();
}
std::unique_ptr<base::Value> TracedValue::ToBaseValue() const {
DCHECK(writer_->IsPickleWriter());
return static_cast<const PickleWriter*>(writer_.get())->ToBaseValue();
}
void TracedValue::AppendAsTraceFormat(std::string* out) const {
DCHECK_CURRENT_CONTAINER_IS(kStackTypeDict);
DCHECK_CONTAINER_STACK_DEPTH_EQ(1u);
writer_->AppendAsTraceFormat(out);
}
bool TracedValue::AppendToProto(ProtoAppender* appender) {
return writer_->AppendToProto(appender);
}
void TracedValue::EstimateTraceMemoryOverhead(
TraceEventMemoryOverhead* overhead) {
writer_->EstimateTraceMemoryOverhead(overhead);
}
std::string TracedValueJSON::ToJSON() const {
std::string result;
AppendAsTraceFormat(&result);
return result;
}
std::string TracedValueJSON::ToFormattedJSON() const {
std::string str;
base::JSONWriter::WriteWithOptions(
*ToBaseValue(),
base::JSONWriter::OPTIONS_OMIT_DOUBLE_TYPE_PRESERVATION |
base::JSONWriter::OPTIONS_PRETTY_PRINT,
&str);
return str;
}
} // namespace trace_event
} // namespace base

View file

@ -0,0 +1,169 @@
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TRACE_EVENT_TRACED_VALUE_H_
#define BASE_TRACE_EVENT_TRACED_VALUE_H_
#include <stddef.h>
#include <memory>
#include <string>
#include <vector>
#include "base/macros.h"
#include "base/pickle.h"
#include "base/strings/string_piece.h"
#include "base/trace_event/trace_event_impl.h"
namespace base {
class Value;
namespace trace_event {
class BASE_EXPORT TracedValue : public ConvertableToTraceFormat {
public:
// TODO(oysteine): |capacity| is not used in any production code. Consider
// removing it.
explicit TracedValue(size_t capacity = 0);
~TracedValue() override;
void EndDictionary();
void EndArray();
// These methods assume that |name| is a long lived "quoted" string.
void SetInteger(const char* name, int value);
void SetDouble(const char* name, double value);
void SetBoolean(const char* name, bool value);
void SetString(const char* name, base::StringPiece value);
void SetValue(const char* name, TracedValue* value);
void BeginDictionary(const char* name);
void BeginArray(const char* name);
// These, instead, can be safely passed a temporary string.
void SetIntegerWithCopiedName(base::StringPiece name, int value);
void SetDoubleWithCopiedName(base::StringPiece name, double value);
void SetBooleanWithCopiedName(base::StringPiece name, bool value);
void SetStringWithCopiedName(base::StringPiece name, base::StringPiece value);
void SetValueWithCopiedName(base::StringPiece name, TracedValue* value);
void BeginDictionaryWithCopiedName(base::StringPiece name);
void BeginArrayWithCopiedName(base::StringPiece name);
void AppendInteger(int);
void AppendDouble(double);
void AppendBoolean(bool);
void AppendString(base::StringPiece);
void BeginArray();
void BeginDictionary();
// ConvertableToTraceFormat implementation.
void AppendAsTraceFormat(std::string* out) const override;
bool AppendToProto(ProtoAppender* appender) override;
void EstimateTraceMemoryOverhead(TraceEventMemoryOverhead* overhead) override;
// A custom serialization class can be supplied by implementing the
// Writer interface and supplying a factory class to SetWriterFactoryCallback.
// Primarily used by Perfetto to write TracedValues directly into its proto
// format, which lets us do a direct memcpy() in AppendToProto() rather than
// a JSON serialization step in AppendAsTraceFormat.
class BASE_EXPORT Writer {
public:
virtual ~Writer() = default;
virtual void BeginArray() = 0;
virtual void BeginDictionary() = 0;
virtual void EndDictionary() = 0;
virtual void EndArray() = 0;
// These methods assume that |name| is a long lived "quoted" string.
virtual void SetInteger(const char* name, int value) = 0;
virtual void SetDouble(const char* name, double value) = 0;
virtual void SetBoolean(const char* name, bool value) = 0;
virtual void SetString(const char* name, base::StringPiece value) = 0;
virtual void SetValue(const char* name, Writer* value) = 0;
virtual void BeginDictionary(const char* name) = 0;
virtual void BeginArray(const char* name) = 0;
// These, instead, can be safely passed a temporary string.
virtual void SetIntegerWithCopiedName(base::StringPiece name,
int value) = 0;
virtual void SetDoubleWithCopiedName(base::StringPiece name,
double value) = 0;
virtual void SetBooleanWithCopiedName(base::StringPiece name,
bool value) = 0;
virtual void SetStringWithCopiedName(base::StringPiece name,
base::StringPiece value) = 0;
virtual void SetValueWithCopiedName(base::StringPiece name,
Writer* value) = 0;
virtual void BeginDictionaryWithCopiedName(base::StringPiece name) = 0;
virtual void BeginArrayWithCopiedName(base::StringPiece name) = 0;
virtual void AppendInteger(int) = 0;
virtual void AppendDouble(double) = 0;
virtual void AppendBoolean(bool) = 0;
virtual void AppendString(base::StringPiece) = 0;
virtual void AppendAsTraceFormat(std::string* out) const = 0;
virtual bool AppendToProto(ProtoAppender* appender);
virtual void EstimateTraceMemoryOverhead(
TraceEventMemoryOverhead* overhead) = 0;
virtual bool IsPickleWriter() const = 0;
virtual bool IsProtoWriter() const = 0;
};
typedef std::unique_ptr<Writer> (*WriterFactoryCallback)(size_t capacity);
static void SetWriterFactoryCallback(WriterFactoryCallback callback);
protected:
TracedValue(size_t capacity, bool forced_json);
std::unique_ptr<base::Value> ToBaseValue() const;
private:
std::unique_ptr<Writer> writer_;
#ifndef NDEBUG
// In debug builds checks the pairings of {Start,End}{Dictionary,Array}
std::vector<bool> nesting_stack_;
#endif
DISALLOW_COPY_AND_ASSIGN(TracedValue);
};
// TracedValue that is convertable to JSON format. This has lower performance
// than the default TracedValue in production code, and should be used only for
// testing and debugging. Should be avoided in tracing. It's for
// testing/debugging code calling value dumping function designed for tracing,
// like the following:
//
// TracedValueJSON value;
// AsValueInto(&value); // which is designed for tracing.
// return value.ToJSON();
//
// If the code is merely for testing/debugging, base::Value should be used
// instead.
class BASE_EXPORT TracedValueJSON : public TracedValue {
public:
explicit TracedValueJSON(size_t capacity = 0)
: TracedValue(capacity, /*forced_josn*/ true) {}
using TracedValue::ToBaseValue;
// Converts the value into a JSON string without formatting. Suitable for
// printing a simple value or printing a value in a single line context.
std::string ToJSON() const;
// Converts the value into a formatted JSON string, with indentation, spaces
// and new lines for better human readability of complex values.
std::string ToFormattedJSON() const;
};
} // namespace trace_event
} // namespace base
#endif // BASE_TRACE_EVENT_TRACED_VALUE_H_

View file

@ -0,0 +1,24 @@
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/trace_event/tracing_agent.h"
namespace base {
namespace trace_event {
TracingAgent::~TracingAgent() = default;
bool TracingAgent::SupportsExplicitClockSync() {
return false;
}
void TracingAgent::RecordClockSyncMarker(
const std::string& sync_id,
RecordClockSyncMarkerCallback callback) {
DCHECK(SupportsExplicitClockSync());
}
} // namespace trace_event
} // namespace base

View file

@ -0,0 +1,96 @@
// Copyright 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_TRACE_EVENT_TRACING_AGENT_H_
#define BASE_TRACE_EVENT_TRACING_AGENT_H_
#include "base/base_export.h"
#include "base/callback.h"
#include "base/memory/ref_counted_memory.h"
#include "base/values.h"
namespace base {
class TimeTicks;
namespace trace_event {
class TraceConfig;
// A tracing agent is an entity that records its own sort of trace. Each
// tracing method that produces its own trace log should implement this
// interface. All tracing agents must only be controlled by TracingController.
// Some existing examples include TracingControllerImpl for Chrome trace events,
// DebugDaemonClient for CrOs system trace, and EtwTracingAgent for Windows
// system.
class BASE_EXPORT TracingAgent {
public:
using StartAgentTracingCallback =
base::OnceCallback<void(const std::string& agent_name, bool success)>;
// Passing a null or empty events_str_ptr indicates that no trace data is
// available for the specified agent.
using StopAgentTracingCallback = base::OnceCallback<void(
const std::string& agent_name,
const std::string& events_label,
const scoped_refptr<base::RefCountedString>& events_str_ptr)>;
using RecordClockSyncMarkerCallback =
base::OnceCallback<void(const std::string& sync_id,
const TimeTicks& issue_ts,
const TimeTicks& issue_end_ts)>;
virtual ~TracingAgent();
// Gets the name of the tracing agent. Each tracing agent's name should be
// unique.
virtual std::string GetTracingAgentName() = 0;
// Gets the trace event label of this tracing agent. The label will be used to
// label this agent's trace when all traces from different tracing agents are
// combined. Multiple tracing agents could have the same label. The tracing
// agents using the same label should not be able to run at the same time. For
// example, ETW on Windows and CrOS system tracing both use
// "systemTraceEvents" as the label. Those two agents never run at the same
// time because they are for different platforms.
virtual std::string GetTraceEventLabel() = 0;
// Starts tracing on the tracing agent with the trace configuration.
virtual void StartAgentTracing(const TraceConfig& trace_config,
StartAgentTracingCallback callback) = 0;
// Stops tracing on the tracing agent. The trace data will be passed back to
// the TracingController via the callback.
virtual void StopAgentTracing(StopAgentTracingCallback callback) = 0;
// Checks if the tracing agent supports explicit clock synchronization.
virtual bool SupportsExplicitClockSync();
// Records a clock sync marker issued by another tracing agent. This is only
// used if the tracing agent supports explicit clock synchronization.
//
// Two things need to be done:
// 1. The issuer asks the receiver to record the clock sync marker.
// 2. The issuer records how long the receiver takes to do the recording.
//
// In Chrome, the receiver thread also runs in Chrome and it will talk to the
// real receiver entity, e.g., power monitor or Android device system, via
// different communication methods, e.g., through USB or file reading/writing.
// The 2nd task measures that communication latency.
//
// Having a reliable timing measurement for the 2nd task requires synchronous
// function call without any cross-thread or cross-process activity. However,
// tracing agents in Chrome run in their own threads. Therefore, the issuer
// needs to dedicate the 2nd task to the receiver to take time measurements
// in the receiver thread, and the receiver thread needs to pass them back to
// the issuer in the callback.
//
// The assumption is that the receiver thread knows the issuer's clock, which
// is true in Chrome because all agent threads' clocks are Chrome clock.
virtual void RecordClockSyncMarker(const std::string& sync_id,
RecordClockSyncMarkerCallback callback);
};
} // namespace trace_event
} // namespace base
#endif // BASE_TRACE_EVENT_TRACING_AGENT_H_