Repo created

This commit is contained in:
Fr4nz D13trich 2025-11-22 14:04:28 +01:00
parent 81b91f4139
commit f8c34fa5ee
22732 changed files with 4815320 additions and 2 deletions

View file

@ -0,0 +1,9 @@
asvitkine@chromium.org
bcwhite@chromium.org
holte@chromium.org
isherman@chromium.org
jwd@chromium.org
mpearson@chromium.org
rkaplow@chromium.org
# COMPONENT: Internals>Metrics

View file

@ -0,0 +1,53 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/metrics/bucket_ranges.h"
#include <cmath>
#include "base/logging.h"
#include "base/metrics/crc32.h"
namespace base {
BucketRanges::BucketRanges(size_t num_ranges)
: ranges_(num_ranges, 0),
checksum_(0) {}
BucketRanges::~BucketRanges() = default;
uint32_t BucketRanges::CalculateChecksum() const {
// Crc of empty ranges_ happens to be 0. This early exit prevents trying to
// take the address of ranges_[0] which will fail for an empty vector even
// if that address is never used.
const size_t ranges_size = ranges_.size();
if (ranges_size == 0)
return 0;
// Checksum is seeded with the ranges "size".
return Crc32(static_cast<uint32_t>(ranges_size), &ranges_[0],
sizeof(ranges_[0]) * ranges_size);
}
bool BucketRanges::HasValidChecksum() const {
return CalculateChecksum() == checksum_;
}
void BucketRanges::ResetChecksum() {
checksum_ = CalculateChecksum();
}
bool BucketRanges::Equals(const BucketRanges* other) const {
if (checksum_ != other->checksum_)
return false;
if (ranges_.size() != other->ranges_.size())
return false;
for (size_t index = 0; index < ranges_.size(); ++index) {
if (ranges_[index] != other->ranges_[index])
return false;
}
return true;
}
} // namespace base

View file

@ -0,0 +1,101 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
//
// BucketRanges stores the vector of ranges that delimit what samples are
// tallied in the corresponding buckets of a histogram. Histograms that have
// same ranges for all their corresponding buckets should share the same
// BucketRanges object.
//
// E.g. A 5 buckets LinearHistogram with 1 as minimal value and 4 as maximal
// value will need a BucketRanges with 6 ranges:
// 0, 1, 2, 3, 4, INT_MAX
//
// TODO(kaiwang): Currently we keep all negative values in 0~1 bucket. Consider
// changing 0 to INT_MIN.
#ifndef BASE_METRICS_BUCKET_RANGES_H_
#define BASE_METRICS_BUCKET_RANGES_H_
#include <stddef.h>
#include <stdint.h>
#include <vector>
#include <limits.h>
#include "base/atomicops.h"
#include "base/base_export.h"
#include "base/macros.h"
#include "base/metrics/histogram_base.h"
namespace base {
class BASE_EXPORT BucketRanges {
public:
typedef std::vector<HistogramBase::Sample> Ranges;
explicit BucketRanges(size_t num_ranges);
~BucketRanges();
size_t size() const { return ranges_.size(); }
HistogramBase::Sample range(size_t i) const { return ranges_[i]; }
void set_range(size_t i, HistogramBase::Sample value) {
DCHECK_LT(i, ranges_.size());
DCHECK_GE(value, 0);
ranges_[i] = value;
}
uint32_t checksum() const { return checksum_; }
void set_checksum(uint32_t checksum) { checksum_ = checksum; }
// A bucket is defined by a consecutive pair of entries in |ranges|, so there
// is one fewer bucket than there are ranges. For example, if |ranges| is
// [0, 1, 3, 7, INT_MAX], then the buckets in this histogram are
// [0, 1), [1, 3), [3, 7), and [7, INT_MAX).
size_t bucket_count() const { return ranges_.size() - 1; }
// Checksum methods to verify whether the ranges are corrupted (e.g. bad
// memory access).
uint32_t CalculateChecksum() const;
bool HasValidChecksum() const;
void ResetChecksum();
// Return true iff |other| object has same ranges_ as |this| object's ranges_.
bool Equals(const BucketRanges* other) const;
// Set and get a reference into persistent memory where this bucket data
// can be found (and re-used). These calls are internally atomic with no
// safety against overwriting an existing value since though it is wasteful
// to have multiple identical persistent records, it is still safe.
void set_persistent_reference(uint32_t ref) const {
subtle::Release_Store(&persistent_reference_, ref);
}
uint32_t persistent_reference() const {
return subtle::Acquire_Load(&persistent_reference_);
}
private:
// A monotonically increasing list of values which determine which bucket to
// put a sample into. For each index, show the smallest sample that can be
// added to the corresponding bucket.
Ranges ranges_;
// Checksum for the conntents of ranges_. Used to detect random over-writes
// of our data, and to quickly see if some other BucketRanges instance is
// possibly Equal() to this instance.
// TODO(kaiwang): Consider change this to uint64_t. Because we see a lot of
// noise on UMA dashboard.
uint32_t checksum_;
// A reference into a global PersistentMemoryAllocator where the ranges
// information is stored. This allows for the record to be created once and
// re-used simply by having all histograms with the same ranges use the
// same reference.
mutable subtle::Atomic32 persistent_reference_ = 0;
DISALLOW_COPY_AND_ASSIGN(BucketRanges);
};
} // namespace base
#endif // BASE_METRICS_BUCKET_RANGES_H_

View file

@ -0,0 +1,81 @@
// Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/metrics/crc32.h"
namespace base {
// Static table of checksums for all possible 8 bit bytes.
const uint32_t kCrcTable[256] = {
0x0, 0x77073096L, 0xee0e612cL, 0x990951baL, 0x76dc419L,
0x706af48fL, 0xe963a535L, 0x9e6495a3L, 0xedb8832L, 0x79dcb8a4L,
0xe0d5e91eL, 0x97d2d988L, 0x9b64c2bL, 0x7eb17cbdL, 0xe7b82d07L,
0x90bf1d91L, 0x1db71064L, 0x6ab020f2L, 0xf3b97148L, 0x84be41deL,
0x1adad47dL, 0x6ddde4ebL, 0xf4d4b551L, 0x83d385c7L, 0x136c9856L,
0x646ba8c0L, 0xfd62f97aL, 0x8a65c9ecL, 0x14015c4fL, 0x63066cd9L,
0xfa0f3d63L, 0x8d080df5L, 0x3b6e20c8L, 0x4c69105eL, 0xd56041e4L,
0xa2677172L, 0x3c03e4d1L, 0x4b04d447L, 0xd20d85fdL, 0xa50ab56bL,
0x35b5a8faL, 0x42b2986cL, 0xdbbbc9d6L, 0xacbcf940L, 0x32d86ce3L,
0x45df5c75L, 0xdcd60dcfL, 0xabd13d59L, 0x26d930acL, 0x51de003aL,
0xc8d75180L, 0xbfd06116L, 0x21b4f4b5L, 0x56b3c423L, 0xcfba9599L,
0xb8bda50fL, 0x2802b89eL, 0x5f058808L, 0xc60cd9b2L, 0xb10be924L,
0x2f6f7c87L, 0x58684c11L, 0xc1611dabL, 0xb6662d3dL, 0x76dc4190L,
0x1db7106L, 0x98d220bcL, 0xefd5102aL, 0x71b18589L, 0x6b6b51fL,
0x9fbfe4a5L, 0xe8b8d433L, 0x7807c9a2L, 0xf00f934L, 0x9609a88eL,
0xe10e9818L, 0x7f6a0dbbL, 0x86d3d2dL, 0x91646c97L, 0xe6635c01L,
0x6b6b51f4L, 0x1c6c6162L, 0x856530d8L, 0xf262004eL, 0x6c0695edL,
0x1b01a57bL, 0x8208f4c1L, 0xf50fc457L, 0x65b0d9c6L, 0x12b7e950L,
0x8bbeb8eaL, 0xfcb9887cL, 0x62dd1ddfL, 0x15da2d49L, 0x8cd37cf3L,
0xfbd44c65L, 0x4db26158L, 0x3ab551ceL, 0xa3bc0074L, 0xd4bb30e2L,
0x4adfa541L, 0x3dd895d7L, 0xa4d1c46dL, 0xd3d6f4fbL, 0x4369e96aL,
0x346ed9fcL, 0xad678846L, 0xda60b8d0L, 0x44042d73L, 0x33031de5L,
0xaa0a4c5fL, 0xdd0d7cc9L, 0x5005713cL, 0x270241aaL, 0xbe0b1010L,
0xc90c2086L, 0x5768b525L, 0x206f85b3L, 0xb966d409L, 0xce61e49fL,
0x5edef90eL, 0x29d9c998L, 0xb0d09822L, 0xc7d7a8b4L, 0x59b33d17L,
0x2eb40d81L, 0xb7bd5c3bL, 0xc0ba6cadL, 0xedb88320L, 0x9abfb3b6L,
0x3b6e20cL, 0x74b1d29aL, 0xead54739L, 0x9dd277afL, 0x4db2615L,
0x73dc1683L, 0xe3630b12L, 0x94643b84L, 0xd6d6a3eL, 0x7a6a5aa8L,
0xe40ecf0bL, 0x9309ff9dL, 0xa00ae27L, 0x7d079eb1L, 0xf00f9344L,
0x8708a3d2L, 0x1e01f268L, 0x6906c2feL, 0xf762575dL, 0x806567cbL,
0x196c3671L, 0x6e6b06e7L, 0xfed41b76L, 0x89d32be0L, 0x10da7a5aL,
0x67dd4accL, 0xf9b9df6fL, 0x8ebeeff9L, 0x17b7be43L, 0x60b08ed5L,
0xd6d6a3e8L, 0xa1d1937eL, 0x38d8c2c4L, 0x4fdff252L, 0xd1bb67f1L,
0xa6bc5767L, 0x3fb506ddL, 0x48b2364bL, 0xd80d2bdaL, 0xaf0a1b4cL,
0x36034af6L, 0x41047a60L, 0xdf60efc3L, 0xa867df55L, 0x316e8eefL,
0x4669be79L, 0xcb61b38cL, 0xbc66831aL, 0x256fd2a0L, 0x5268e236L,
0xcc0c7795L, 0xbb0b4703L, 0x220216b9L, 0x5505262fL, 0xc5ba3bbeL,
0xb2bd0b28L, 0x2bb45a92L, 0x5cb36a04L, 0xc2d7ffa7L, 0xb5d0cf31L,
0x2cd99e8bL, 0x5bdeae1dL, 0x9b64c2b0L, 0xec63f226L, 0x756aa39cL,
0x26d930aL, 0x9c0906a9L, 0xeb0e363fL, 0x72076785L, 0x5005713L,
0x95bf4a82L, 0xe2b87a14L, 0x7bb12baeL, 0xcb61b38L, 0x92d28e9bL,
0xe5d5be0dL, 0x7cdcefb7L, 0xbdbdf21L, 0x86d3d2d4L, 0xf1d4e242L,
0x68ddb3f8L, 0x1fda836eL, 0x81be16cdL, 0xf6b9265bL, 0x6fb077e1L,
0x18b74777L, 0x88085ae6L, 0xff0f6a70L, 0x66063bcaL, 0x11010b5cL,
0x8f659effL, 0xf862ae69L, 0x616bffd3L, 0x166ccf45L, 0xa00ae278L,
0xd70dd2eeL, 0x4e048354L, 0x3903b3c2L, 0xa7672661L, 0xd06016f7L,
0x4969474dL, 0x3e6e77dbL, 0xaed16a4aL, 0xd9d65adcL, 0x40df0b66L,
0x37d83bf0L, 0xa9bcae53L, 0xdebb9ec5L, 0x47b2cf7fL, 0x30b5ffe9L,
0xbdbdf21cL, 0xcabac28aL, 0x53b39330L, 0x24b4a3a6L, 0xbad03605L,
0xcdd70693L, 0x54de5729L, 0x23d967bfL, 0xb3667a2eL, 0xc4614ab8L,
0x5d681b02L, 0x2a6f2b94L, 0xb40bbe37L, 0xc30c8ea1L, 0x5a05df1bL,
0x2d02ef8dL,
};
// We generate the CRC-32 using the low order bits to select whether to XOR in
// the reversed polynomial 0xEDB88320. This is nice and simple, and allows us
// to keep the quotient in a uint32_t. Since we're not concerned about the
// nature of corruptions (i.e., we don't care about bit sequencing, since we are
// handling memory changes, which are more grotesque) so we don't bother to get
// the CRC correct for big-endian vs little-ending calculations. All we need is
// a nice hash, that tends to depend on all the bits of the sample, with very
// little chance of changes in one place impacting changes in another place.
uint32_t Crc32(uint32_t sum, const void* data, size_t size) {
const unsigned char* bytes = reinterpret_cast<const unsigned char*>(data);
for (size_t i = 0; i < size; ++i) {
sum = kCrcTable[(sum & 0x000000FF) ^ bytes[i]] ^ (sum >> 8);
}
return sum;
}
} // namespace base

View file

@ -0,0 +1,24 @@
// Copyright 2019 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_METRICS_CRC32_H_
#define BASE_METRICS_CRC32_H_
#include <stddef.h>
#include <stdint.h>
#include "base/base_export.h"
namespace base {
BASE_EXPORT extern const uint32_t kCrcTable[256];
// This provides a simple, fast CRC-32 calculation that can be used for checking
// the integrity of data. It is not a "secure" calculation! |sum| can start
// with any seed or be used to continue an operation began with previous data.
BASE_EXPORT uint32_t Crc32(uint32_t sum, const void* data, size_t size);
} // namespace base
#endif // BASE_METRICS_CRC32_H_

View file

@ -0,0 +1,102 @@
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/metrics/dummy_histogram.h"
#include <memory>
#include "base/logging.h"
#include "base/metrics/histogram_samples.h"
#include "base/metrics/metrics_hashes.h"
namespace base {
namespace {
// Helper classes for DummyHistogram.
class DummySampleCountIterator : public SampleCountIterator {
public:
DummySampleCountIterator() {}
~DummySampleCountIterator() override {}
// SampleCountIterator:
bool Done() const override { return true; }
void Next() override { NOTREACHED(); }
void Get(HistogramBase::Sample* min,
int64_t* max,
HistogramBase::Count* count) const override {
NOTREACHED();
}
private:
DISALLOW_COPY_AND_ASSIGN(DummySampleCountIterator);
};
class DummyHistogramSamples : public HistogramSamples {
public:
explicit DummyHistogramSamples() : HistogramSamples(0, new LocalMetadata()) {}
~DummyHistogramSamples() override {
delete static_cast<LocalMetadata*>(meta());
}
// HistogramSamples:
void Accumulate(HistogramBase::Sample value,
HistogramBase::Count count) override {}
HistogramBase::Count GetCount(HistogramBase::Sample value) const override {
return HistogramBase::Count();
}
HistogramBase::Count TotalCount() const override {
return HistogramBase::Count();
}
std::unique_ptr<SampleCountIterator> Iterator() const override {
return std::make_unique<DummySampleCountIterator>();
}
bool AddSubtractImpl(SampleCountIterator* iter, Operator op) override {
return true;
}
private:
DISALLOW_COPY_AND_ASSIGN(DummyHistogramSamples);
};
} // namespace
// static
DummyHistogram* DummyHistogram::GetInstance() {
static base::NoDestructor<DummyHistogram> dummy_histogram;
return dummy_histogram.get();
}
uint64_t DummyHistogram::name_hash() const {
return HashMetricName(histogram_name());
}
HistogramType DummyHistogram::GetHistogramType() const {
return DUMMY_HISTOGRAM;
}
bool DummyHistogram::HasConstructionArguments(
Sample expected_minimum,
Sample expected_maximum,
uint32_t expected_bucket_count) const {
return true;
}
bool DummyHistogram::AddSamplesFromPickle(PickleIterator* iter) {
return true;
}
std::unique_ptr<HistogramSamples> DummyHistogram::SnapshotSamples() const {
return std::make_unique<DummyHistogramSamples>();
}
std::unique_ptr<HistogramSamples> DummyHistogram::SnapshotDelta() {
return std::make_unique<DummyHistogramSamples>();
}
std::unique_ptr<HistogramSamples> DummyHistogram::SnapshotFinalDelta() const {
return std::make_unique<DummyHistogramSamples>();
}
} // namespace base

View file

@ -0,0 +1,61 @@
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_METRICS_DUMMY_HISTOGRAM_H_
#define BASE_METRICS_DUMMY_HISTOGRAM_H_
#include <stdint.h>
#include <memory>
#include <string>
#include "base/base_export.h"
#include "base/metrics/histogram_base.h"
#include "base/no_destructor.h"
namespace base {
// DummyHistogram is used for mocking histogram objects for histograms that
// shouldn't be recorded. It doesn't do any actual processing.
class BASE_EXPORT DummyHistogram : public HistogramBase {
public:
static DummyHistogram* GetInstance();
// HistogramBase:
void CheckName(const StringPiece& name) const override {}
uint64_t name_hash() const override;
HistogramType GetHistogramType() const override;
bool HasConstructionArguments(Sample expected_minimum,
Sample expected_maximum,
uint32_t expected_bucket_count) const override;
void Add(Sample value) override {}
void AddCount(Sample value, int count) override {}
void AddSamples(const HistogramSamples& samples) override {}
bool AddSamplesFromPickle(PickleIterator* iter) override;
std::unique_ptr<HistogramSamples> SnapshotSamples() const override;
std::unique_ptr<HistogramSamples> SnapshotDelta() override;
std::unique_ptr<HistogramSamples> SnapshotFinalDelta() const override;
void WriteHTMLGraph(std::string* output) const override {}
void WriteAscii(std::string* output) const override {}
protected:
// HistogramBase:
void SerializeInfoImpl(Pickle* pickle) const override {}
void GetParameters(DictionaryValue* params) const override {}
void GetCountAndBucketData(Count* count,
int64_t* sum,
ListValue* buckets) const override {}
private:
friend class NoDestructor<DummyHistogram>;
DummyHistogram() : HistogramBase("dummy_histogram") {}
~DummyHistogram() override {}
DISALLOW_COPY_AND_ASSIGN(DummyHistogram);
};
} // namespace base
#endif // BASE_METRICS_DUMMY_HISTOGRAM_H_

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,811 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// FieldTrial is a class for handling details of statistical experiments
// performed by actual users in the field (i.e., in a shipped or beta product).
// All code is called exclusively on the UI thread currently.
//
// The simplest example is an experiment to see whether one of two options
// produces "better" results across our user population. In that scenario, UMA
// data is uploaded to aggregate the test results, and this FieldTrial class
// manages the state of each such experiment (state == which option was
// pseudo-randomly selected).
//
// States are typically generated randomly, either based on a one time
// randomization (which will yield the same results, in terms of selecting
// the client for a field trial or not, for every run of the program on a
// given machine), or by a session randomization (generated each time the
// application starts up, but held constant during the duration of the
// process).
//------------------------------------------------------------------------------
// Example: Suppose we have an experiment involving memory, such as determining
// the impact of some pruning algorithm.
// We assume that we already have a histogram of memory usage, such as:
// UMA_HISTOGRAM_COUNTS_1M("Memory.RendererTotal", count);
// Somewhere in main thread initialization code, we'd probably define an
// instance of a FieldTrial, with code such as:
// // FieldTrials are reference counted, and persist automagically until
// // process teardown, courtesy of their automatic registration in
// // FieldTrialList.
// // Note: This field trial will run in Chrome instances compiled through
// // 8 July, 2015, and after that all instances will be in "StandardMem".
// scoped_refptr<base::FieldTrial> trial(
// base::FieldTrialList::FactoryGetFieldTrial(
// "MemoryExperiment", 1000, "StandardMem",
// base::FieldTrial::ONE_TIME_RANDOMIZED, nullptr));
//
// const int high_mem_group =
// trial->AppendGroup("HighMem", 20); // 2% in HighMem group.
// const int low_mem_group =
// trial->AppendGroup("LowMem", 20); // 2% in LowMem group.
// // Take action depending of which group we randomly land in.
// if (trial->group() == high_mem_group)
// SetPruningAlgorithm(kType1); // Sample setting of browser state.
// else if (trial->group() == low_mem_group)
// SetPruningAlgorithm(kType2); // Sample alternate setting.
//------------------------------------------------------------------------------
#ifndef BASE_METRICS_FIELD_TRIAL_H_
#define BASE_METRICS_FIELD_TRIAL_H_
#include <stddef.h>
#include <stdint.h>
#include <map>
#include <memory>
#include <set>
#include <string>
#include <vector>
#include "base/atomicops.h"
#include "base/base_export.h"
#include "base/command_line.h"
#include "base/feature_list.h"
#include "base/gtest_prod_util.h"
#include "base/macros.h"
#include "base/memory/read_only_shared_memory_region.h"
#include "base/memory/ref_counted.h"
#include "base/memory/shared_memory_mapping.h"
#include "base/metrics/persistent_memory_allocator.h"
#include "base/observer_list_threadsafe.h"
#include "base/pickle.h"
#include "base/process/launch.h"
#include "base/strings/string_piece.h"
#include "base/synchronization/lock.h"
#include "build/build_config.h"
#if defined(OS_MACOSX) && !defined(OS_IOS)
#include "base/mac/mach_port_rendezvous.h"
#endif
namespace base {
class FieldTrialList;
class BASE_EXPORT FieldTrial : public RefCounted<FieldTrial> {
public:
typedef int Probability; // Probability type for being selected in a trial.
// Specifies the persistence of the field trial group choice.
enum RandomizationType {
// One time randomized trials will persist the group choice between
// restarts, which is recommended for most trials, especially those that
// change user visible behavior.
ONE_TIME_RANDOMIZED,
// Session randomized trials will roll the dice to select a group on every
// process restart.
SESSION_RANDOMIZED,
};
// EntropyProvider is an interface for providing entropy for one-time
// randomized (persistent) field trials.
class BASE_EXPORT EntropyProvider {
public:
virtual ~EntropyProvider();
// Returns a double in the range of [0, 1) to be used for the dice roll for
// the specified field trial. If |randomization_seed| is not 0, it will be
// used in preference to |trial_name| for generating the entropy by entropy
// providers that support it. A given instance should always return the same
// value given the same input |trial_name| and |randomization_seed| values.
virtual double GetEntropyForTrial(const std::string& trial_name,
uint32_t randomization_seed) const = 0;
};
// A pair representing a Field Trial and its selected group.
struct ActiveGroup {
std::string trial_name;
std::string group_name;
};
// A triplet representing a FieldTrial, its selected group and whether it's
// active. String members are pointers to the underlying strings owned by the
// FieldTrial object. Does not use StringPiece to avoid conversions back to
// std::string.
struct BASE_EXPORT State {
const std::string* trial_name = nullptr;
const std::string* group_name = nullptr;
bool activated = false;
State();
State(const State& other);
~State();
};
// We create one FieldTrialEntry per field trial in shared memory, via
// AddToAllocatorWhileLocked. The FieldTrialEntry is followed by a
// base::Pickle object that we unpickle and read from.
struct BASE_EXPORT FieldTrialEntry {
// SHA1(FieldTrialEntry): Increment this if structure changes!
static constexpr uint32_t kPersistentTypeId = 0xABA17E13 + 2;
// Expected size for 32/64-bit check.
static constexpr size_t kExpectedInstanceSize = 8;
// Whether or not this field trial is activated. This is really just a
// boolean but using a 32 bit value for portability reasons. It should be
// accessed via NoBarrier_Load()/NoBarrier_Store() to prevent the compiler
// from doing unexpected optimizations because it thinks that only one
// thread is accessing the memory location.
subtle::Atomic32 activated;
// Size of the pickled structure, NOT the total size of this entry.
uint32_t pickle_size;
// Calling this is only valid when the entry is initialized. That is, it
// resides in shared memory and has a pickle containing the trial name and
// group name following it.
bool GetTrialAndGroupName(StringPiece* trial_name,
StringPiece* group_name) const;
// Calling this is only valid when the entry is initialized as well. Reads
// the parameters following the trial and group name and stores them as
// key-value mappings in |params|.
bool GetParams(std::map<std::string, std::string>* params) const;
private:
// Returns an iterator over the data containing names and params.
PickleIterator GetPickleIterator() const;
// Takes the iterator and writes out the first two items into |trial_name|
// and |group_name|.
bool ReadStringPair(PickleIterator* iter,
StringPiece* trial_name,
StringPiece* group_name) const;
};
typedef std::vector<ActiveGroup> ActiveGroups;
// A return value to indicate that a given instance has not yet had a group
// assignment (and hence is not yet participating in the trial).
static const int kNotFinalized;
// Disables this trial, meaning it always determines the default group
// has been selected. May be called immediately after construction, or
// at any time after initialization (should not be interleaved with
// AppendGroup calls). Once disabled, there is no way to re-enable a
// trial.
// TODO(mad): http://code.google.com/p/chromium/issues/detail?id=121446
// This doesn't properly reset to Default when a group was forced.
void Disable();
// Establish the name and probability of the next group in this trial.
// Sometimes, based on construction randomization, this call may cause the
// provided group to be *THE* group selected for use in this instance.
// The return value is the group number of the new group.
int AppendGroup(const std::string& name, Probability group_probability);
// Return the name of the FieldTrial (excluding the group name).
const std::string& trial_name() const { return trial_name_; }
// Return the randomly selected group number that was assigned, and notify
// any/all observers that this finalized group number has presumably been used
// (queried), and will never change. Note that this will force an instance to
// participate, and make it illegal to attempt to probabilistically add any
// other groups to the trial.
int group();
// If the group's name is empty, a string version containing the group number
// is used as the group name. This causes a winner to be chosen if none was.
const std::string& group_name();
// Finalizes the group choice and returns the chosen group, but does not mark
// the trial as active - so its state will not be reported until group_name()
// or similar is called.
const std::string& GetGroupNameWithoutActivation();
// Set the field trial as forced, meaning that it was setup earlier than
// the hard coded registration of the field trial to override it.
// This allows the code that was hard coded to register the field trial to
// still succeed even though the field trial has already been registered.
// This must be called after appending all the groups, since we will make
// the group choice here. Note that this is a NOOP for already forced trials.
// And, as the rest of the FieldTrial code, this is not thread safe and must
// be done from the UI thread.
void SetForced();
// Enable benchmarking sets field trials to a common setting.
static void EnableBenchmarking();
// Creates a FieldTrial object with the specified parameters, to be used for
// simulation of group assignment without actually affecting global field
// trial state in the running process. Group assignment will be done based on
// |entropy_value|, which must have a range of [0, 1).
//
// Note: Using this function will not register the field trial globally in the
// running process - for that, use FieldTrialList::FactoryGetFieldTrial().
//
// The ownership of the returned FieldTrial is transfered to the caller which
// is responsible for deref'ing it (e.g. by using scoped_refptr<FieldTrial>).
static FieldTrial* CreateSimulatedFieldTrial(
const std::string& trial_name,
Probability total_probability,
const std::string& default_group_name,
double entropy_value);
private:
// Allow tests to access our innards for testing purposes.
FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, Registration);
FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, AbsoluteProbabilities);
FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, RemainingProbability);
FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, FiftyFiftyProbability);
FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, MiddleProbabilities);
FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, OneWinner);
FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, DisableProbability);
FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, ActiveGroups);
FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, AllGroups);
FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, ActiveGroupsNotFinalized);
FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, Save);
FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, SaveAll);
FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, DuplicateRestore);
FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, SetForcedTurnFeatureOff);
FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, SetForcedTurnFeatureOn);
FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, SetForcedChangeDefault_Default);
FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, SetForcedChangeDefault_NonDefault);
FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, FloatBoundariesGiveEqualGroupSizes);
FRIEND_TEST_ALL_PREFIXES(FieldTrialTest, DoesNotSurpassTotalProbability);
FRIEND_TEST_ALL_PREFIXES(FieldTrialListTest,
DoNotAddSimulatedFieldTrialsToAllocator);
FRIEND_TEST_ALL_PREFIXES(FieldTrialListTest, ClearParamsFromSharedMemory);
friend class base::FieldTrialList;
friend class RefCounted<FieldTrial>;
using FieldTrialRef = PersistentMemoryAllocator::Reference;
// This is the group number of the 'default' group when a choice wasn't forced
// by a call to FieldTrialList::CreateFieldTrial. It is kept private so that
// consumers don't use it by mistake in cases where the group was forced.
static const int kDefaultGroupNumber;
// Creates a field trial with the specified parameters. Group assignment will
// be done based on |entropy_value|, which must have a range of [0, 1).
FieldTrial(const std::string& trial_name,
Probability total_probability,
const std::string& default_group_name,
double entropy_value);
virtual ~FieldTrial();
// Return the default group name of the FieldTrial.
std::string default_group_name() const { return default_group_name_; }
// Marks this trial as having been registered with the FieldTrialList. Must be
// called no more than once and before any |group()| calls have occurred.
void SetTrialRegistered();
// Sets the chosen group name and number.
void SetGroupChoice(const std::string& group_name, int number);
// Ensures that a group is chosen, if it hasn't yet been. The field trial
// might yet be disabled, so this call will *not* notify observers of the
// status.
void FinalizeGroupChoice();
// Implements FinalizeGroupChoice() with the added flexibility of being
// deadlock-free if |is_locked| is true and the caller is holding a lock.
void FinalizeGroupChoiceImpl(bool is_locked);
// Returns the trial name and selected group name for this field trial via
// the output parameter |active_group|, but only if the group has already
// been chosen and has been externally observed via |group()| and the trial
// has not been disabled. In that case, true is returned and |active_group|
// is filled in; otherwise, the result is false and |active_group| is left
// untouched.
bool GetActiveGroup(ActiveGroup* active_group) const;
// Returns the trial name and selected group name for this field trial via
// the output parameter |field_trial_state| for all the studies when
// |include_disabled| is true. In case when |include_disabled| is false, if
// the trial has not been disabled true is returned and |field_trial_state|
// is filled in; otherwise, the result is false and |field_trial_state| is
// left untouched.
bool GetStateWhileLocked(State* field_trial_state, bool include_disabled);
// Returns the group_name. A winner need not have been chosen.
std::string group_name_internal() const { return group_name_; }
// The name of the field trial, as can be found via the FieldTrialList.
const std::string trial_name_;
// The maximum sum of all probabilities supplied, which corresponds to 100%.
// This is the scaling factor used to adjust supplied probabilities.
const Probability divisor_;
// The name of the default group.
const std::string default_group_name_;
// The randomly selected probability that is used to select a group (or have
// the instance not participate). It is the product of divisor_ and a random
// number between [0, 1).
Probability random_;
// Sum of the probabilities of all appended groups.
Probability accumulated_group_probability_;
// The number that will be returned by the next AppendGroup() call.
int next_group_number_;
// The pseudo-randomly assigned group number.
// This is kNotFinalized if no group has been assigned.
int group_;
// A textual name for the randomly selected group. Valid after |group()|
// has been called.
std::string group_name_;
// When enable_field_trial_ is false, field trial reverts to the 'default'
// group.
bool enable_field_trial_;
// When forced_ is true, we return the chosen group from AppendGroup when
// appropriate.
bool forced_;
// Specifies whether the group choice has been reported to observers.
bool group_reported_;
// Whether this trial is registered with the global FieldTrialList and thus
// should notify it when its group is queried.
bool trial_registered_;
// Reference to related field trial struct and data in shared memory.
FieldTrialRef ref_;
// When benchmarking is enabled, field trials all revert to the 'default'
// group.
static bool enable_benchmarking_;
DISALLOW_COPY_AND_ASSIGN(FieldTrial);
};
//------------------------------------------------------------------------------
// Class with a list of all active field trials. A trial is active if it has
// been registered, which includes evaluating its state based on its probaility.
// Only one instance of this class exists and outside of testing, will live for
// the entire life time of the process.
class BASE_EXPORT FieldTrialList {
public:
using FieldTrialAllocator = PersistentMemoryAllocator;
// Type for function pointer passed to |AllParamsToString| used to escape
// special characters from |input|.
typedef std::string (*EscapeDataFunc)(const std::string& input);
// Observer is notified when a FieldTrial's group is selected.
class BASE_EXPORT Observer {
public:
// Notify observers when FieldTrials's group is selected.
virtual void OnFieldTrialGroupFinalized(const std::string& trial_name,
const std::string& group_name) = 0;
protected:
virtual ~Observer();
};
// This singleton holds the global list of registered FieldTrials.
//
// To support one-time randomized field trials, specify a non-null
// |entropy_provider| which should be a source of uniformly distributed
// entropy values. If one time randomization is not desired, pass in null for
// |entropy_provider|.
explicit FieldTrialList(
std::unique_ptr<const FieldTrial::EntropyProvider> entropy_provider);
// Destructor Release()'s references to all registered FieldTrial instances.
~FieldTrialList();
// Get a FieldTrial instance from the factory.
//
// |name| is used to register the instance with the FieldTrialList class,
// and can be used to find the trial (only one trial can be present for each
// name). |default_group_name| is the name of the default group which will
// be chosen if none of the subsequent appended groups get to be chosen.
// |default_group_number| can receive the group number of the default group as
// AppendGroup returns the number of the subsequence groups. |trial_name| and
// |default_group_name| may not be empty but |default_group_number| can be
// null if the value is not needed.
//
// Group probabilities that are later supplied must sum to less than or equal
// to the |total_probability|.
//
// Use this static method to get a startup-randomized FieldTrial or a
// previously created forced FieldTrial.
static FieldTrial* FactoryGetFieldTrial(
const std::string& trial_name,
FieldTrial::Probability total_probability,
const std::string& default_group_name,
FieldTrial::RandomizationType randomization_type,
int* default_group_number);
// Same as FactoryGetFieldTrial(), but allows specifying a custom seed to be
// used on one-time randomized field trials (instead of a hash of the trial
// name, which is used otherwise or if |randomization_seed| has value 0). The
// |randomization_seed| value (other than 0) should never be the same for two
// trials, else this would result in correlated group assignments. Note:
// Using a custom randomization seed is only supported by the
// NormalizedMurmurHashEntropyProvider, which is used when UMA is not enabled
// (and is always used in Android WebView, where UMA is enabled
// asyncronously). If |override_entropy_provider| is not null, then it will be
// used for randomization instead of the provider given when the
// FieldTrialList was instantiated.
static FieldTrial* FactoryGetFieldTrialWithRandomizationSeed(
const std::string& trial_name,
FieldTrial::Probability total_probability,
const std::string& default_group_name,
FieldTrial::RandomizationType randomization_type,
uint32_t randomization_seed,
int* default_group_number,
const FieldTrial::EntropyProvider* override_entropy_provider);
// The Find() method can be used to test to see if a named trial was already
// registered, or to retrieve a pointer to it from the global map.
static FieldTrial* Find(const std::string& trial_name);
// Returns the group number chosen for the named trial, or
// FieldTrial::kNotFinalized if the trial does not exist.
static int FindValue(const std::string& trial_name);
// Returns the group name chosen for the named trial, or the empty string if
// the trial does not exist. The first call of this function on a given field
// trial will mark it as active, so that its state will be reported with usage
// metrics, crashes, etc.
// Note: Direct use of this function and related FieldTrial functions is
// generally discouraged - instead please use base::Feature when possible.
static std::string FindFullName(const std::string& trial_name);
// Returns true if the named trial has been registered.
static bool TrialExists(const std::string& trial_name);
// Returns true if the named trial exists and has been activated.
static bool IsTrialActive(const std::string& trial_name);
// Creates a persistent representation of active FieldTrial instances for
// resurrection in another process. This allows randomization to be done in
// one process, and secondary processes can be synchronized on the result.
// The resulting string contains the name and group name pairs of all
// registered FieldTrials for which the group has been chosen and externally
// observed (via |group()|) and which have not been disabled, with "/" used
// to separate all names and to terminate the string. This string is parsed
// by |CreateTrialsFromString()|.
static void StatesToString(std::string* output);
// Creates a persistent representation of all FieldTrial instances for
// resurrection in another process. This allows randomization to be done in
// one process, and secondary processes can be synchronized on the result.
// The resulting string contains the name and group name pairs of all
// registered FieldTrials including disabled based on |include_disabled|,
// with "/" used to separate all names and to terminate the string. All
// activated trials have their name prefixed with "*". This string is parsed
// by |CreateTrialsFromString()|.
static void AllStatesToString(std::string* output, bool include_disabled);
// Creates a persistent representation of all FieldTrial params for
// resurrection in another process. The returned string contains the trial
// name and group name pairs of all registered FieldTrials including disabled
// based on |include_disabled| separated by '.'. The pair is followed by ':'
// separator and list of param name and values separated by '/'. It also takes
// |encode_data_func| function pointer for encodeing special charactors.
// This string is parsed by |AssociateParamsFromString()|.
static std::string AllParamsToString(bool include_disabled,
EscapeDataFunc encode_data_func);
// Fills in the supplied vector |active_groups| (which must be empty when
// called) with a snapshot of all registered FieldTrials for which the group
// has been chosen and externally observed (via |group()|) and which have
// not been disabled.
static void GetActiveFieldTrialGroups(
FieldTrial::ActiveGroups* active_groups);
// Returns the field trials that are marked active in |trials_string|.
static void GetActiveFieldTrialGroupsFromString(
const std::string& trials_string,
FieldTrial::ActiveGroups* active_groups);
// Returns the field trials that were active when the process was
// created. Either parses the field trial string or the shared memory
// holding field trial information.
// Must be called only after a call to CreateTrialsFromCommandLine().
static void GetInitiallyActiveFieldTrials(
const CommandLine& command_line,
FieldTrial::ActiveGroups* active_groups);
// Use a state string (re: StatesToString()) to augment the current list of
// field trials to include the supplied trials, and using a 100% probability
// for each trial, force them to have the same group string. This is commonly
// used in a non-browser process, to carry randomly selected state in a
// browser process into this non-browser process, but could also be invoked
// through a command line argument to the browser process. Created field
// trials will be marked "used" for the purposes of active trial reporting
// if they are prefixed with |kActivationMarker|. Trial names in
// |ignored_trial_names| are ignored when parsing |trials_string|.
static bool CreateTrialsFromString(
const std::string& trials_string,
const std::set<std::string>& ignored_trial_names);
// Achieves the same thing as CreateTrialsFromString, except wraps the logic
// by taking in the trials from the command line, either via shared memory
// handle or command line argument. A bit of a misnomer since on POSIX we
// simply get the trials from opening |fd_key| if using shared memory. On
// Windows, we expect the |cmd_line| switch for |field_trial_handle_switch| to
// contain the shared memory handle that contains the field trial allocator.
// We need the |field_trial_handle_switch| and |fd_key| arguments to be passed
// in since base/ can't depend on content/.
static void CreateTrialsFromCommandLine(const CommandLine& cmd_line,
const char* field_trial_handle_switch,
int fd_key);
// Creates base::Feature overrides from the command line by first trying to
// use shared memory and then falling back to the command line if it fails.
static void CreateFeaturesFromCommandLine(const CommandLine& command_line,
const char* enable_features_switch,
const char* disable_features_switch,
FeatureList* feature_list);
#if defined(OS_WIN)
// On Windows, we need to explicitly pass down any handles to be inherited.
// This function adds the shared memory handle to field trial state to the
// list of handles to be inherited.
static void AppendFieldTrialHandleIfNeeded(HandlesToInheritVector* handles);
#elif defined(OS_FUCHSIA)
// TODO(fuchsia): Implement shared-memory configuration (crbug.com/752368).
#elif defined(OS_MACOSX) && !defined(OS_IOS)
// On Mac, the field trial shared memory is accessed via a Mach server, which
// the child looks up directly.
static void InsertFieldTrialHandleIfNeeded(
MachPortsForRendezvous* rendezvous_ports);
#elif defined(OS_POSIX) && !defined(OS_NACL)
// On POSIX, we also need to explicitly pass down this file descriptor that
// should be shared with the child process. Returns -1 if it was not
// initialized properly. The current process remains the onwer of the passed
// descriptor.
static int GetFieldTrialDescriptor();
#endif
static ReadOnlySharedMemoryRegion DuplicateFieldTrialSharedMemoryForTesting();
// Adds a switch to the command line containing the field trial state as a
// string (if not using shared memory to share field trial state), or the
// shared memory handle + length.
// Needs the |field_trial_handle_switch| argument to be passed in since base/
// can't depend on content/.
static void CopyFieldTrialStateToFlags(const char* field_trial_handle_switch,
const char* enable_features_switch,
const char* disable_features_switch,
CommandLine* cmd_line);
// Create a FieldTrial with the given |name| and using 100% probability for
// the FieldTrial, force FieldTrial to have the same group string as
// |group_name|. This is commonly used in a non-browser process, to carry
// randomly selected state in a browser process into this non-browser process.
// It returns NULL if there is a FieldTrial that is already registered with
// the same |name| but has different finalized group string (|group_name|).
static FieldTrial* CreateFieldTrial(const std::string& name,
const std::string& group_name);
// Add an observer to be notified when a field trial is irrevocably committed
// to being part of some specific field_group (and hence the group_name is
// also finalized for that field_trial). Returns false and does nothing if
// there is no FieldTrialList singleton.
static bool AddObserver(Observer* observer);
// Remove an observer.
static void RemoveObserver(Observer* observer);
// Similar to AddObserver(), but the passed observer will be notified
// synchronously when a field trial is activated and its group selected. It
// will be notified synchronously on the same thread where the activation and
// group selection happened. It is the responsibility of the observer to make
// sure that this is a safe operation and the operation must be fast, as this
// work is done synchronously as part of group() or related APIs. Only a
// single such observer is supported, exposed specifically for crash
// reporting. Must be called on the main thread before any other threads
// have been started.
static void SetSynchronousObserver(Observer* observer);
// Removes the single synchronous observer.
static void RemoveSynchronousObserver(Observer* observer);
// Grabs the lock if necessary and adds the field trial to the allocator. This
// should only be called from FinalizeGroupChoice().
static void OnGroupFinalized(bool is_locked, FieldTrial* field_trial);
// Notify all observers that a group has been finalized for |field_trial|.
static void NotifyFieldTrialGroupSelection(FieldTrial* field_trial);
// Return the number of active field trials.
static size_t GetFieldTrialCount();
// Gets the parameters for |field_trial| from shared memory and stores them in
// |params|. This is only exposed for use by FieldTrialParamAssociator and
// shouldn't be used by anything else.
static bool GetParamsFromSharedMemory(
FieldTrial* field_trial,
std::map<std::string, std::string>* params);
// Clears all the params in the allocator.
static void ClearParamsFromSharedMemoryForTesting();
// Dumps field trial state to an allocator so that it can be analyzed after a
// crash.
static void DumpAllFieldTrialsToPersistentAllocator(
PersistentMemoryAllocator* allocator);
// Retrieves field trial state from an allocator so that it can be analyzed
// after a crash. The pointers in the returned vector are into the persistent
// memory segment and so are only valid as long as the allocator is valid.
static std::vector<const FieldTrial::FieldTrialEntry*>
GetAllFieldTrialsFromPersistentAllocator(
PersistentMemoryAllocator const& allocator);
// Returns a pointer to the global instance. This is exposed so that it can
// be used in a DCHECK in FeatureList and ScopedFeatureList test-only logic
// and is not intended to be used widely beyond those cases.
static FieldTrialList* GetInstance();
// For testing, sets the global instance to null and returns the previous one.
static FieldTrialList* BackupInstanceForTesting();
// For testing, sets the global instance to |instance|.
static void RestoreInstanceForTesting(FieldTrialList* instance);
private:
// Allow tests to access our innards for testing purposes.
FRIEND_TEST_ALL_PREFIXES(FieldTrialListTest, InstantiateAllocator);
FRIEND_TEST_ALL_PREFIXES(FieldTrialListTest, AddTrialsToAllocator);
FRIEND_TEST_ALL_PREFIXES(FieldTrialListTest,
DoNotAddSimulatedFieldTrialsToAllocator);
FRIEND_TEST_ALL_PREFIXES(FieldTrialListTest, AssociateFieldTrialParams);
FRIEND_TEST_ALL_PREFIXES(FieldTrialListTest, ClearParamsFromSharedMemory);
FRIEND_TEST_ALL_PREFIXES(FieldTrialListTest,
SerializeSharedMemoryRegionMetadata);
friend int SerializeSharedMemoryRegionMetadata(void);
FRIEND_TEST_ALL_PREFIXES(FieldTrialListTest, CheckReadOnlySharedMemoryRegion);
// Serialization is used to pass information about the handle to child
// processes. It passes a reference to the relevant OS resource, and it passes
// a GUID. Serialization and deserialization doesn't actually transport the
// underlying OS resource - that must be done by the Process launcher.
static std::string SerializeSharedMemoryRegionMetadata(
const ReadOnlySharedMemoryRegion& shm);
#if defined(OS_WIN) || defined(OS_FUCHSIA) || \
(defined(OS_MACOSX) && !defined(OS_IOS))
static ReadOnlySharedMemoryRegion DeserializeSharedMemoryRegionMetadata(
const std::string& switch_value);
#elif defined(OS_POSIX) && !defined(OS_NACL)
static ReadOnlySharedMemoryRegion DeserializeSharedMemoryRegionMetadata(
int fd,
const std::string& switch_value);
#endif
#if defined(OS_WIN) || defined(OS_FUCHSIA) || \
(defined(OS_MACOSX) && !defined(OS_IOS))
// Takes in |handle_switch| from the command line which represents the shared
// memory handle for field trials, parses it, and creates the field trials.
// Returns true on success, false on failure.
// |switch_value| also contains the serialized GUID.
static bool CreateTrialsFromSwitchValue(const std::string& switch_value);
#elif defined(OS_POSIX) && !defined(OS_NACL)
// On POSIX systems that use the zygote, we look up the correct fd that backs
// the shared memory segment containing the field trials by looking it up via
// an fd key in GlobalDescriptors. Returns true on success, false on failure.
// |switch_value| also contains the serialized GUID.
static bool CreateTrialsFromDescriptor(int fd_key,
const std::string& switch_value);
#endif
// Takes an unmapped ReadOnlySharedMemoryRegion, maps it with the correct size
// and creates field trials via CreateTrialsFromSharedMemoryMapping(). Returns
// true if successful and false otherwise.
static bool CreateTrialsFromSharedMemoryRegion(
const ReadOnlySharedMemoryRegion& shm_region);
// Expects a mapped piece of shared memory |shm_mapping| that was created from
// the browser process's field_trial_allocator and shared via the command
// line. This function recreates the allocator, iterates through all the field
// trials in it, and creates them via CreateFieldTrial(). Returns true if
// successful and false otherwise.
static bool CreateTrialsFromSharedMemoryMapping(
ReadOnlySharedMemoryMapping shm_mapping);
// Instantiate the field trial allocator, add all existing field trials to it,
// and duplicates its handle to a read-only handle, which gets stored in
// |readonly_allocator_handle|.
static void InstantiateFieldTrialAllocatorIfNeeded();
// Adds the field trial to the allocator. Caller must hold a lock before
// calling this.
static void AddToAllocatorWhileLocked(PersistentMemoryAllocator* allocator,
FieldTrial* field_trial);
// Activate the corresponding field trial entry struct in shared memory.
static void ActivateFieldTrialEntryWhileLocked(FieldTrial* field_trial);
// A map from FieldTrial names to the actual instances.
typedef std::map<std::string, FieldTrial*> RegistrationMap;
// If one-time randomization is enabled, returns a weak pointer to the
// corresponding EntropyProvider. Otherwise, returns NULL.
static const FieldTrial::EntropyProvider*
GetEntropyProviderForOneTimeRandomization();
// Helper function should be called only while holding lock_.
FieldTrial* PreLockedFind(const std::string& name);
// Register() stores a pointer to the given trial in a global map.
// This method also AddRef's the indicated trial.
// This should always be called after creating a new FieldTrial instance.
static void Register(FieldTrial* trial);
// Returns all the registered trials.
static RegistrationMap GetRegisteredTrials();
static FieldTrialList* global_; // The singleton of this class.
// This will tell us if there is an attempt to register a field
// trial or check if one-time randomization is enabled without
// creating the FieldTrialList. This is not an error, unless a
// FieldTrialList is created after that.
static bool used_without_global_;
// Lock for access to registered_ and field_trial_allocator_.
Lock lock_;
RegistrationMap registered_;
std::map<std::string, std::string> seen_states_;
// Entropy provider to be used for one-time randomized field trials. If NULL,
// one-time randomization is not supported.
std::unique_ptr<const FieldTrial::EntropyProvider> entropy_provider_;
// List of observers to be notified when a group is selected for a FieldTrial.
scoped_refptr<ObserverListThreadSafe<Observer> > observer_list_;
// Single synchronous observer to be notified when a trial group is chosen.
Observer* synchronous_observer_ = nullptr;
// Allocator in shared memory containing field trial data. Used in both
// browser and child processes, but readonly in the child.
// In the future, we may want to move this to a more generic place if we want
// to start passing more data other than field trials.
std::unique_ptr<FieldTrialAllocator> field_trial_allocator_ = nullptr;
// Readonly copy of the region to the allocator. Needs to be a member variable
// because it's needed from both CopyFieldTrialStateToFlags() and
// AppendFieldTrialHandleIfNeeded().
ReadOnlySharedMemoryRegion readonly_allocator_region_;
// Tracks whether CreateTrialsFromCommandLine() has been called.
bool create_trials_from_command_line_called_ = false;
DISALLOW_COPY_AND_ASSIGN(FieldTrialList);
};
} // namespace base
#endif // BASE_METRICS_FIELD_TRIAL_H_

View file

@ -0,0 +1,88 @@
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/metrics/field_trial_param_associator.h"
#include "base/metrics/field_trial.h"
namespace base {
FieldTrialParamAssociator::FieldTrialParamAssociator() = default;
FieldTrialParamAssociator::~FieldTrialParamAssociator() = default;
// static
FieldTrialParamAssociator* FieldTrialParamAssociator::GetInstance() {
return Singleton<FieldTrialParamAssociator,
LeakySingletonTraits<FieldTrialParamAssociator>>::get();
}
bool FieldTrialParamAssociator::AssociateFieldTrialParams(
const std::string& trial_name,
const std::string& group_name,
const FieldTrialParams& params) {
if (FieldTrialList::IsTrialActive(trial_name))
return false;
AutoLock scoped_lock(lock_);
const FieldTrialKey key(trial_name, group_name);
if (Contains(field_trial_params_, key))
return false;
field_trial_params_[key] = params;
return true;
}
bool FieldTrialParamAssociator::GetFieldTrialParams(
const std::string& trial_name,
FieldTrialParams* params) {
FieldTrial* field_trial = FieldTrialList::Find(trial_name);
if (!field_trial)
return false;
// First try the local map, falling back to getting it from shared memory.
if (GetFieldTrialParamsWithoutFallback(trial_name, field_trial->group_name(),
params)) {
return true;
}
// TODO(lawrencewu): add the params to field_trial_params_ for next time.
return FieldTrialList::GetParamsFromSharedMemory(field_trial, params);
}
bool FieldTrialParamAssociator::GetFieldTrialParamsWithoutFallback(
const std::string& trial_name,
const std::string& group_name,
FieldTrialParams* params) {
AutoLock scoped_lock(lock_);
const FieldTrialKey key(trial_name, group_name);
if (!Contains(field_trial_params_, key))
return false;
*params = field_trial_params_[key];
return true;
}
void FieldTrialParamAssociator::ClearAllParamsForTesting() {
{
AutoLock scoped_lock(lock_);
field_trial_params_.clear();
}
FieldTrialList::ClearParamsFromSharedMemoryForTesting();
}
void FieldTrialParamAssociator::ClearParamsForTesting(
const std::string& trial_name,
const std::string& group_name) {
AutoLock scoped_lock(lock_);
const FieldTrialKey key(trial_name, group_name);
field_trial_params_.erase(key);
}
void FieldTrialParamAssociator::ClearAllCachedParamsForTesting() {
AutoLock scoped_lock(lock_);
field_trial_params_.clear();
}
} // namespace base

View file

@ -0,0 +1,74 @@
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_METRICS_FIELD_TRIAL_PARAM_ASSOCIATOR_H_
#define BASE_METRICS_FIELD_TRIAL_PARAM_ASSOCIATOR_H_
#include <map>
#include <string>
#include <utility>
#include "base/base_export.h"
#include "base/memory/singleton.h"
#include "base/metrics/field_trial.h"
#include "base/metrics/field_trial_params.h"
#include "base/synchronization/lock.h"
namespace base {
// Keeps track of the parameters of all field trials and ensures access to them
// is thread-safe.
class BASE_EXPORT FieldTrialParamAssociator {
public:
FieldTrialParamAssociator();
~FieldTrialParamAssociator();
// Retrieve the singleton.
static FieldTrialParamAssociator* GetInstance();
// Sets parameters for the given field trial name and group.
bool AssociateFieldTrialParams(const std::string& trial_name,
const std::string& group_name,
const FieldTrialParams& params);
// Gets the parameters for a field trial and its chosen group. If not found in
// field_trial_params_, then tries to looks it up in shared memory.
bool GetFieldTrialParams(const std::string& trial_name,
FieldTrialParams* params);
// Gets the parameters for a field trial and its chosen group. Does not
// fallback to looking it up in shared memory. This should only be used if you
// know for sure the params are in the mapping, like if you're in the browser
// process, and even then you should probably just use GetFieldTrialParams().
bool GetFieldTrialParamsWithoutFallback(const std::string& trial_name,
const std::string& group_name,
FieldTrialParams* params);
// Clears the internal field_trial_params_ mapping, plus removes all params in
// shared memory.
void ClearAllParamsForTesting();
// Clears a single field trial param.
// Note: this does NOT remove the param in shared memory.
void ClearParamsForTesting(const std::string& trial_name,
const std::string& group_name);
// Clears the internal field_trial_params_ mapping.
void ClearAllCachedParamsForTesting();
private:
friend struct DefaultSingletonTraits<FieldTrialParamAssociator>;
// (field_trial_name, field_trial_group)
typedef std::pair<std::string, std::string> FieldTrialKey;
Lock lock_;
std::map<FieldTrialKey, FieldTrialParams> field_trial_params_;
DISALLOW_COPY_AND_ASSIGN(FieldTrialParamAssociator);
};
} // namespace base
#endif // BASE_METRICS_FIELD_TRIAL_PARAM_ASSOCIATOR_H_

View file

@ -0,0 +1,207 @@
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/metrics/field_trial_params.h"
#include <set>
#include <utility>
#include <vector>
#include "base/feature_list.h"
#include "base/metrics/field_trial.h"
#include "base/metrics/field_trial_param_associator.h"
#include "base/strings/string_number_conversions.h"
#include "base/strings/string_split.h"
#include "base/strings/stringprintf.h"
namespace base {
bool AssociateFieldTrialParams(const std::string& trial_name,
const std::string& group_name,
const FieldTrialParams& params) {
return FieldTrialParamAssociator::GetInstance()->AssociateFieldTrialParams(
trial_name, group_name, params);
}
bool AssociateFieldTrialParamsFromString(
const std::string& params_string,
FieldTrialParamsDecodeStringFunc decode_data_func) {
// Format: Trial1.Group1:k1/v1/k2/v2,Trial2.Group2:k1/v1/k2/v2
std::set<std::pair<std::string, std::string>> trial_groups;
for (StringPiece experiment_group :
SplitStringPiece(params_string, ",", TRIM_WHITESPACE, SPLIT_WANT_ALL)) {
std::vector<StringPiece> experiment = SplitStringPiece(
experiment_group, ":", TRIM_WHITESPACE, SPLIT_WANT_ALL);
if (experiment.size() != 2) {
DLOG(ERROR) << "Experiment and params should be separated by ':'";
return false;
}
std::vector<std::string> group_parts =
SplitString(experiment[0], ".", TRIM_WHITESPACE, SPLIT_WANT_ALL);
if (group_parts.size() != 2) {
DLOG(ERROR) << "Trial and group name should be separated by '.'";
return false;
}
std::vector<std::string> key_values =
SplitString(experiment[1], "/", TRIM_WHITESPACE, SPLIT_WANT_ALL);
if (key_values.size() % 2 != 0) {
DLOG(ERROR) << "Param name and param value should be separated by '/'";
return false;
}
std::string trial = decode_data_func(group_parts[0]);
std::string group = decode_data_func(group_parts[1]);
auto trial_group = std::make_pair(trial, group);
if (trial_groups.find(trial_group) != trial_groups.end()) {
DLOG(ERROR) << StringPrintf(
"A (trial, group) pair listed more than once. (%s, %s)",
trial.c_str(), group.c_str());
return false;
}
trial_groups.insert(trial_group);
std::map<std::string, std::string> params;
for (size_t i = 0; i < key_values.size(); i += 2) {
std::string key = decode_data_func(key_values[i]);
std::string value = decode_data_func(key_values[i + 1]);
params[key] = value;
}
bool result = AssociateFieldTrialParams(trial, group, params);
if (!result) {
DLOG(ERROR) << "Failed to associate field trial params for group \""
<< group << "\" in trial \"" << trial << "\"";
return false;
}
}
return true;
}
bool GetFieldTrialParams(const std::string& trial_name,
FieldTrialParams* params) {
return FieldTrialParamAssociator::GetInstance()->GetFieldTrialParams(
trial_name, params);
}
bool GetFieldTrialParamsByFeature(const Feature& feature,
FieldTrialParams* params) {
if (!FeatureList::IsEnabled(feature))
return false;
FieldTrial* trial = FeatureList::GetFieldTrial(feature);
if (!trial)
return false;
return GetFieldTrialParams(trial->trial_name(), params);
}
std::string GetFieldTrialParamValue(const std::string& trial_name,
const std::string& param_name) {
FieldTrialParams params;
if (GetFieldTrialParams(trial_name, &params)) {
auto it = params.find(param_name);
if (it != params.end())
return it->second;
}
return std::string();
}
std::string GetFieldTrialParamValueByFeature(const Feature& feature,
const std::string& param_name) {
if (!FeatureList::IsEnabled(feature))
return std::string();
FieldTrial* trial = FeatureList::GetFieldTrial(feature);
if (!trial)
return std::string();
return GetFieldTrialParamValue(trial->trial_name(), param_name);
}
int GetFieldTrialParamByFeatureAsInt(const Feature& feature,
const std::string& param_name,
int default_value) {
std::string value_as_string =
GetFieldTrialParamValueByFeature(feature, param_name);
int value_as_int = 0;
if (!StringToInt(value_as_string, &value_as_int)) {
if (!value_as_string.empty()) {
DLOG(WARNING) << "Failed to parse field trial param " << param_name
<< " with string value " << value_as_string
<< " under feature " << feature.name
<< " into an int. Falling back to default value of "
<< default_value;
}
value_as_int = default_value;
}
return value_as_int;
}
double GetFieldTrialParamByFeatureAsDouble(const Feature& feature,
const std::string& param_name,
double default_value) {
std::string value_as_string =
GetFieldTrialParamValueByFeature(feature, param_name);
double value_as_double = 0;
if (!StringToDouble(value_as_string, &value_as_double)) {
if (!value_as_string.empty()) {
DLOG(WARNING) << "Failed to parse field trial param " << param_name
<< " with string value " << value_as_string
<< " under feature " << feature.name
<< " into a double. Falling back to default value of "
<< default_value;
}
value_as_double = default_value;
}
return value_as_double;
}
bool GetFieldTrialParamByFeatureAsBool(const Feature& feature,
const std::string& param_name,
bool default_value) {
std::string value_as_string =
GetFieldTrialParamValueByFeature(feature, param_name);
if (value_as_string == "true")
return true;
if (value_as_string == "false")
return false;
if (!value_as_string.empty()) {
DLOG(WARNING) << "Failed to parse field trial param " << param_name
<< " with string value " << value_as_string
<< " under feature " << feature.name
<< " into a bool. Falling back to default value of "
<< default_value;
}
return default_value;
}
std::string FeatureParam<std::string>::Get() const {
const std::string value = GetFieldTrialParamValueByFeature(*feature, name);
return value.empty() ? default_value : value;
}
double FeatureParam<double>::Get() const {
return GetFieldTrialParamByFeatureAsDouble(*feature, name, default_value);
}
int FeatureParam<int>::Get() const {
return GetFieldTrialParamByFeatureAsInt(*feature, name, default_value);
}
bool FeatureParam<bool>::Get() const {
return GetFieldTrialParamByFeatureAsBool(*feature, name, default_value);
}
void LogInvalidEnumValue(const Feature& feature,
const std::string& param_name,
const std::string& value_as_string,
int default_value_as_int) {
DLOG(WARNING) << "Failed to parse field trial param " << param_name
<< " with string value " << value_as_string << " under feature "
<< feature.name
<< " into an enum. Falling back to default value of "
<< default_value_as_int;
}
} // namespace base

View file

@ -0,0 +1,279 @@
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_METRICS_FIELD_TRIAL_PARAMS_H_
#define BASE_METRICS_FIELD_TRIAL_PARAMS_H_
#include <map>
#include <string>
#include "base/base_export.h"
#include "base/logging.h"
namespace base {
struct Feature;
// Key-value mapping type for field trial parameters.
typedef std::map<std::string, std::string> FieldTrialParams;
// Param string decoding function for AssociateFieldTrialParamsFromString().
typedef std::string (*FieldTrialParamsDecodeStringFunc)(const std::string& str);
// Associates the specified set of key-value |params| with the field trial
// specified by |trial_name| and |group_name|. Fails and returns false if the
// specified field trial already has params associated with it or the trial
// is already active (group() has been called on it). Thread safe.
BASE_EXPORT bool AssociateFieldTrialParams(const std::string& trial_name,
const std::string& group_name,
const FieldTrialParams& params);
// Provides a mechanism to associate multiple set of params to multiple groups
// with a formatted string as returned by FieldTrialList::AllParamsToString().
// |decode_data_func| allows specifying a custom decoding function.
BASE_EXPORT bool AssociateFieldTrialParamsFromString(
const std::string& params_string,
FieldTrialParamsDecodeStringFunc decode_data_func);
// Retrieves the set of key-value |params| for the specified field trial, based
// on its selected group. If the field trial does not exist or its selected
// group does not have any parameters associated with it, returns false and
// does not modify |params|. Calling this function will result in the field
// trial being marked as active if found (i.e. group() will be called on it),
// if it wasn't already. Thread safe.
BASE_EXPORT bool GetFieldTrialParams(const std::string& trial_name,
FieldTrialParams* params);
// Retrieves the set of key-value |params| for the field trial associated with
// the specified |feature|. A feature is associated with at most one field
// trial and selected group. See base/feature_list.h for more information on
// features. If the feature is not enabled, or if there's no associated params,
// returns false and does not modify |params|. Calling this function will
// result in the associated field trial being marked as active if found (i.e.
// group() will be called on it), if it wasn't already. Thread safe.
BASE_EXPORT bool GetFieldTrialParamsByFeature(const base::Feature& feature,
FieldTrialParams* params);
// Retrieves a specific parameter value corresponding to |param_name| for the
// specified field trial, based on its selected group. If the field trial does
// not exist or the specified parameter does not exist, returns an empty
// string. Calling this function will result in the field trial being marked as
// active if found (i.e. group() will be called on it), if it wasn't already.
// Thread safe.
BASE_EXPORT std::string GetFieldTrialParamValue(const std::string& trial_name,
const std::string& param_name);
// Retrieves a specific parameter value corresponding to |param_name| for the
// field trial associated with the specified |feature|. A feature is associated
// with at most one field trial and selected group. See base/feature_list.h for
// more information on features. If the feature is not enabled, or the
// specified parameter does not exist, returns an empty string. Calling this
// function will result in the associated field trial being marked as active if
// found (i.e. group() will be called on it), if it wasn't already. Thread safe.
BASE_EXPORT std::string GetFieldTrialParamValueByFeature(
const base::Feature& feature,
const std::string& param_name);
// Same as GetFieldTrialParamValueByFeature(). On top of that, it converts the
// string value into an int using base::StringToInt() and returns it, if
// successful. Otherwise, it returns |default_value|. If the string value is not
// empty and the conversion does not succeed, it produces a warning to LOG.
BASE_EXPORT int GetFieldTrialParamByFeatureAsInt(const base::Feature& feature,
const std::string& param_name,
int default_value);
// Same as GetFieldTrialParamValueByFeature(). On top of that, it converts the
// string value into a double using base::StringToDouble() and returns it, if
// successful. Otherwise, it returns |default_value|. If the string value is not
// empty and the conversion does not succeed, it produces a warning to LOG.
BASE_EXPORT double GetFieldTrialParamByFeatureAsDouble(
const base::Feature& feature,
const std::string& param_name,
double default_value);
// Same as GetFieldTrialParamValueByFeature(). On top of that, it converts the
// string value into a boolean and returns it, if successful. Otherwise, it
// returns |default_value|. The only string representations accepted here are
// "true" and "false". If the string value is not empty and the conversion does
// not succeed, it produces a warning to LOG.
BASE_EXPORT bool GetFieldTrialParamByFeatureAsBool(
const base::Feature& feature,
const std::string& param_name,
bool default_value);
// Shared declaration for various FeatureParam<T> types.
//
// This template is defined for the following types T:
// bool
// int
// double
// std::string
// enum types
//
// See the individual definitions below for the appropriate interfaces.
// Attempting to use it with any other type is a compile error.
template <typename T, bool IsEnum = std::is_enum<T>::value>
struct FeatureParam {
// Prevent use of FeatureParam<> with unsupported types (e.g. void*). Uses T
// in its definition so that evaluation is deferred until the template is
// instantiated.
static_assert(!std::is_same<T, T>::value, "unsupported FeatureParam<> type");
};
// Declares a string-valued parameter. Example:
//
// constexpr FeatureParam<string> kAssistantName{
// &kAssistantFeature, "assistant_name", "HAL"};
//
// If the feature is not set, or set to the empty string, then Get() will return
// the default value.
template <>
struct FeatureParam<std::string> {
constexpr FeatureParam(const Feature* feature,
const char* name,
const char* default_value)
: feature(feature), name(name), default_value(default_value) {}
BASE_EXPORT std::string Get() const;
const Feature* const feature;
const char* const name;
const char* const default_value;
};
// Declares a double-valued parameter. Example:
//
// constexpr FeatureParam<double> kAssistantTriggerThreshold{
// &kAssistantFeature, "trigger_threshold", 0.10};
//
// If the feature is not set, or set to an invalid double value, then Get() will
// return the default value.
template <>
struct FeatureParam<double> {
constexpr FeatureParam(const Feature* feature,
const char* name,
double default_value)
: feature(feature), name(name), default_value(default_value) {}
BASE_EXPORT double Get() const;
const Feature* const feature;
const char* const name;
const double default_value;
};
// Declares an int-valued parameter. Example:
//
// constexpr FeatureParam<int> kAssistantParallelism{
// &kAssistantFeature, "parallelism", 4};
//
// If the feature is not set, or set to an invalid int value, then Get() will
// return the default value.
template <>
struct FeatureParam<int> {
constexpr FeatureParam(const Feature* feature,
const char* name,
int default_value)
: feature(feature), name(name), default_value(default_value) {}
BASE_EXPORT int Get() const;
const Feature* const feature;
const char* const name;
const int default_value;
};
// Declares a bool-valued parameter. Example:
//
// constexpr FeatureParam<int> kAssistantIsHelpful{
// &kAssistantFeature, "is_helpful", true};
//
// If the feature is not set, or set to value other than "true" or "false", then
// Get() will return the default value.
template <>
struct FeatureParam<bool> {
constexpr FeatureParam(const Feature* feature,
const char* name,
bool default_value)
: feature(feature), name(name), default_value(default_value) {}
BASE_EXPORT bool Get() const;
const Feature* const feature;
const char* const name;
const bool default_value;
};
BASE_EXPORT void LogInvalidEnumValue(const Feature& feature,
const std::string& param_name,
const std::string& value_as_string,
int default_value_as_int);
// Feature param declaration for an enum, with associated options. Example:
//
// constexpr FeatureParam<ShapeEnum>::Option kShapeParamOptions[] = {
// {SHAPE_CIRCLE, "circle"},
// {SHAPE_CYLINDER, "cylinder"},
// {SHAPE_PAPERCLIP, "paperclip"}};
// constexpr FeatureParam<ShapeEnum> kAssistantShapeParam{
// &kAssistantFeature, "shape", SHAPE_CIRCLE, &kShapeParamOptions};
//
// With this declaration, the parameter may be set to "circle", "cylinder", or
// "paperclip", and that will be translated to one of the three enum values. By
// default, or if the param is set to an unknown value, the parameter will be
// assumed to be SHAPE_CIRCLE.
template <typename Enum>
struct FeatureParam<Enum, true> {
struct Option {
constexpr Option(Enum value, const char* name) : value(value), name(name) {}
const Enum value;
const char* const name;
};
template <size_t option_count>
constexpr FeatureParam(const Feature* feature,
const char* name,
const Enum default_value,
const Option (*options)[option_count])
: feature(feature),
name(name),
default_value(default_value),
options(*options),
option_count(option_count) {
static_assert(option_count >= 1, "FeatureParam<enum> has no options");
}
Enum Get() const {
std::string value = GetFieldTrialParamValueByFeature(*feature, name);
if (value.empty())
return default_value;
for (size_t i = 0; i < option_count; ++i) {
if (value == options[i].name)
return options[i].value;
}
LogInvalidEnumValue(*feature, name, value, static_cast<int>(default_value));
return default_value;
}
// Returns the param-string for the given enum value.
std::string GetName(Enum value) const {
for (size_t i = 0; i < option_count; ++i) {
if (value == options[i].value)
return options[i].name;
}
NOTREACHED();
return "";
}
const base::Feature* const feature;
const char* const name;
const Enum default_value;
const Option* const options;
const size_t option_count;
};
} // namespace base
#endif // BASE_METRICS_FIELD_TRIAL_PARAMS_H_

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,610 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Histogram is an object that aggregates statistics, and can summarize them in
// various forms, including ASCII graphical, HTML, and numerically (as a
// vector of numbers corresponding to each of the aggregating buckets).
// It supports calls to accumulate either time intervals (which are processed
// as integral number of milliseconds), or arbitrary integral units.
// For Histogram (exponential histogram), LinearHistogram and CustomHistogram,
// the minimum for a declared range is 1 (instead of 0), while the maximum is
// (HistogramBase::kSampleType_MAX - 1). However, there will always be underflow
// and overflow buckets added automatically, so a 0 bucket will always exist
// even when a minimum value of 1 is specified.
// Each use of a histogram with the same name will reference the same underlying
// data, so it is safe to record to the same histogram from multiple locations
// in the code. It is a runtime error if all uses of the same histogram do not
// agree exactly in type, bucket size and range.
// For Histogram and LinearHistogram, the maximum for a declared range should
// always be larger (not equal) than minimal range. Zero and
// HistogramBase::kSampleType_MAX are implicitly added as first and last ranges,
// so the smallest legal bucket_count is 3. However CustomHistogram can have
// bucket count as 2 (when you give a custom ranges vector containing only 1
// range).
// For these 3 kinds of histograms, the max bucket count is always
// (Histogram::kBucketCount_MAX - 1).
// The buckets layout of class Histogram is exponential. For example, buckets
// might contain (sequentially) the count of values in the following intervals:
// [0,1), [1,2), [2,4), [4,8), [8,16), [16,32), [32,64), [64,infinity)
// That bucket allocation would actually result from construction of a histogram
// for values between 1 and 64, with 8 buckets, such as:
// Histogram count("some name", 1, 64, 8);
// Note that the underflow bucket [0,1) and the overflow bucket [64,infinity)
// are also counted by the constructor in the user supplied "bucket_count"
// argument.
// The above example has an exponential ratio of 2 (doubling the bucket width
// in each consecutive bucket). The Histogram class automatically calculates
// the smallest ratio that it can use to construct the number of buckets
// selected in the constructor. An another example, if you had 50 buckets,
// and millisecond time values from 1 to 10000, then the ratio between
// consecutive bucket widths will be approximately somewhere around the 50th
// root of 10000. This approach provides very fine grain (narrow) buckets
// at the low end of the histogram scale, but allows the histogram to cover a
// gigantic range with the addition of very few buckets.
// Usually we use macros to define and use a histogram, which are defined in
// base/metrics/histogram_macros.h. Note: Callers should include that header
// directly if they only access the histogram APIs through macros.
//
// Macros use a pattern involving a function static variable, that is a pointer
// to a histogram. This static is explicitly initialized on any thread
// that detects a uninitialized (NULL) pointer. The potentially racy
// initialization is not a problem as it is always set to point to the same
// value (i.e., the FactoryGet always returns the same value). FactoryGet
// is also completely thread safe, which results in a completely thread safe,
// and relatively fast, set of counters. To avoid races at shutdown, the static
// pointer is NOT deleted, and we leak the histograms at process termination.
#ifndef BASE_METRICS_HISTOGRAM_H_
#define BASE_METRICS_HISTOGRAM_H_
#include <stddef.h>
#include <stdint.h>
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "base/base_export.h"
#include "base/compiler_specific.h"
#include "base/containers/span.h"
#include "base/gtest_prod_util.h"
#include "base/logging.h"
#include "base/macros.h"
#include "base/metrics/bucket_ranges.h"
#include "base/metrics/histogram_base.h"
#include "base/metrics/histogram_samples.h"
#include "base/strings/string_piece.h"
#include "base/time/time.h"
namespace base {
class BooleanHistogram;
class CustomHistogram;
class DelayedPersistentAllocation;
class Histogram;
class HistogramTest;
class LinearHistogram;
class Pickle;
class PickleIterator;
class SampleVector;
class SampleVectorBase;
class BASE_EXPORT Histogram : public HistogramBase {
public:
// Initialize maximum number of buckets in histograms as 16,384.
static const uint32_t kBucketCount_MAX;
typedef std::vector<Count> Counts;
~Histogram() override;
//----------------------------------------------------------------------------
// For a valid histogram, input should follow these restrictions:
// minimum > 0 (if a minimum below 1 is specified, it will implicitly be
// normalized up to 1)
// maximum > minimum
// buckets > 2 [minimum buckets needed: underflow, overflow and the range]
// Additionally,
// buckets <= (maximum - minimum + 2) - this is to ensure that we don't have
// more buckets than the range of numbers; having more buckets than 1 per
// value in the range would be nonsensical.
static HistogramBase* FactoryGet(const std::string& name,
Sample minimum,
Sample maximum,
uint32_t bucket_count,
int32_t flags);
static HistogramBase* FactoryTimeGet(const std::string& name,
base::TimeDelta minimum,
base::TimeDelta maximum,
uint32_t bucket_count,
int32_t flags);
static HistogramBase* FactoryMicrosecondsTimeGet(const std::string& name,
base::TimeDelta minimum,
base::TimeDelta maximum,
uint32_t bucket_count,
int32_t flags);
// Overloads of the above functions that take a const char* |name| param, to
// avoid code bloat from the std::string constructor being inlined into call
// sites.
static HistogramBase* FactoryGet(const char* name,
Sample minimum,
Sample maximum,
uint32_t bucket_count,
int32_t flags);
static HistogramBase* FactoryTimeGet(const char* name,
base::TimeDelta minimum,
base::TimeDelta maximum,
uint32_t bucket_count,
int32_t flags);
static HistogramBase* FactoryMicrosecondsTimeGet(const char* name,
base::TimeDelta minimum,
base::TimeDelta maximum,
uint32_t bucket_count,
int32_t flags);
// Create a histogram using data in persistent storage.
static std::unique_ptr<HistogramBase> PersistentCreate(
const char* name,
Sample minimum,
Sample maximum,
const BucketRanges* ranges,
const DelayedPersistentAllocation& counts,
const DelayedPersistentAllocation& logged_counts,
HistogramSamples::Metadata* meta,
HistogramSamples::Metadata* logged_meta);
static void InitializeBucketRanges(Sample minimum,
Sample maximum,
BucketRanges* ranges);
// This constant if for FindCorruption. Since snapshots of histograms are
// taken asynchronously relative to sampling, and our counting code currently
// does not prevent race conditions, it is pretty likely that we'll catch a
// redundant count that doesn't match the sample count. We allow for a
// certain amount of slop before flagging this as an inconsistency. Even with
// an inconsistency, we'll snapshot it again (for UMA in about a half hour),
// so we'll eventually get the data, if it was not the result of a corruption.
static const int kCommonRaceBasedCountMismatch;
// Check to see if bucket ranges, counts and tallies in the snapshot are
// consistent with the bucket ranges and checksums in our histogram. This can
// produce a false-alarm if a race occurred in the reading of the data during
// a SnapShot process, but should otherwise be false at all times (unless we
// have memory over-writes, or DRAM failures). Flag definitions are located
// under "enum Inconsistency" in base/metrics/histogram_base.h.
uint32_t FindCorruption(const HistogramSamples& samples) const override;
//----------------------------------------------------------------------------
// Accessors for factory construction, serialization and testing.
//----------------------------------------------------------------------------
const BucketRanges* bucket_ranges() const;
Sample declared_min() const;
Sample declared_max() const;
virtual Sample ranges(uint32_t i) const;
virtual uint32_t bucket_count() const;
// This function validates histogram construction arguments. It returns false
// if some of the arguments are bad but also corrects them so they should
// function on non-dcheck builds without crashing.
// Note. Currently it allow some bad input, e.g. 0 as minimum, but silently
// converts it to good input: 1.
// TODO(bcwhite): Use false returns to create "sink" histograms so that bad
// data doesn't create confusion on the servers.
static bool InspectConstructionArguments(StringPiece name,
Sample* minimum,
Sample* maximum,
uint32_t* bucket_count);
// HistogramBase implementation:
uint64_t name_hash() const override;
HistogramType GetHistogramType() const override;
bool HasConstructionArguments(Sample expected_minimum,
Sample expected_maximum,
uint32_t expected_bucket_count) const override;
void Add(Sample value) override;
void AddCount(Sample value, int count) override;
std::unique_ptr<HistogramSamples> SnapshotSamples() const override;
std::unique_ptr<HistogramSamples> SnapshotDelta() override;
std::unique_ptr<HistogramSamples> SnapshotFinalDelta() const override;
void AddSamples(const HistogramSamples& samples) override;
bool AddSamplesFromPickle(base::PickleIterator* iter) override;
void WriteHTMLGraph(std::string* output) const override;
void WriteAscii(std::string* output) const override;
// Validates the histogram contents and CHECKs on errors.
// TODO(bcwhite): Remove this after https://crbug/836875.
void ValidateHistogramContents() const override;
protected:
// This class, defined entirely within the .cc file, contains all the
// common logic for building a Histogram and can be overridden by more
// specific types to alter details of how the creation is done. It is
// defined as an embedded class (rather than an anonymous one) so it
// can access the protected constructors.
class Factory;
// |ranges| should contain the underflow and overflow buckets. See top
// comments for example.
Histogram(const char* name,
Sample minimum,
Sample maximum,
const BucketRanges* ranges);
// Traditionally, histograms allocate their own memory for the bucket
// vector but "shared" histograms use memory regions allocated from a
// special memory segment that is passed in here. It is assumed that
// the life of this memory is managed externally and exceeds the lifetime
// of this object. Practically, this memory is never released until the
// process exits and the OS cleans it up.
Histogram(const char* name,
Sample minimum,
Sample maximum,
const BucketRanges* ranges,
const DelayedPersistentAllocation& counts,
const DelayedPersistentAllocation& logged_counts,
HistogramSamples::Metadata* meta,
HistogramSamples::Metadata* logged_meta);
// HistogramBase implementation:
void SerializeInfoImpl(base::Pickle* pickle) const override;
// Method to override to skip the display of the i'th bucket if it's empty.
virtual bool PrintEmptyBucket(uint32_t index) const;
// Get normalized size, relative to the ranges(i).
virtual double GetBucketSize(Count current, uint32_t i) const;
// Return a string description of what goes in a given bucket.
// Most commonly this is the numeric value, but in derived classes it may
// be a name (or string description) given to the bucket.
virtual const std::string GetAsciiBucketRange(uint32_t it) const;
private:
// Allow tests to corrupt our innards for testing purposes.
friend class HistogramTest;
FRIEND_TEST_ALL_PREFIXES(HistogramTest, BoundsTest);
FRIEND_TEST_ALL_PREFIXES(HistogramTest, BucketPlacementTest);
FRIEND_TEST_ALL_PREFIXES(HistogramTest, CorruptSampleCounts);
friend class StatisticsRecorder; // To allow it to delete duplicates.
friend class StatisticsRecorderTest;
friend BASE_EXPORT HistogramBase* DeserializeHistogramInfo(
base::PickleIterator* iter);
static HistogramBase* DeserializeInfoImpl(base::PickleIterator* iter);
// Create a snapshot containing all samples (both logged and unlogged).
// Implementation of SnapshotSamples method with a more specific type for
// internal use.
std::unique_ptr<SampleVector> SnapshotAllSamples() const;
// Create a copy of unlogged samples.
std::unique_ptr<SampleVector> SnapshotUnloggedSamples() const;
//----------------------------------------------------------------------------
// Helpers for emitting Ascii graphic. Each method appends data to output.
void WriteAsciiBody(const SampleVector& snapshot,
bool graph_it,
const std::string& newline,
std::string* output) const;
// Find out how large (graphically) the largest bucket will appear to be.
double GetPeakBucketSize(const SampleVectorBase& samples) const;
// Write a common header message describing this histogram.
void WriteAsciiHeader(const SampleVectorBase& samples,
std::string* output) const;
// Write information about previous, current, and next buckets.
// Information such as cumulative percentage, etc.
void WriteAsciiBucketContext(const int64_t past,
const Count current,
const int64_t remaining,
const uint32_t i,
std::string* output) const;
// WriteJSON calls these.
void GetParameters(DictionaryValue* params) const override;
void GetCountAndBucketData(Count* count,
int64_t* sum,
ListValue* buckets) const override;
// Samples that have not yet been logged with SnapshotDelta().
std::unique_ptr<SampleVectorBase> unlogged_samples_;
// Accumulation of all samples that have been logged with SnapshotDelta().
std::unique_ptr<SampleVectorBase> logged_samples_;
#if DCHECK_IS_ON() // Don't waste memory if it won't be used.
// Flag to indicate if PrepareFinalDelta has been previously called. It is
// used to DCHECK that a final delta is not created multiple times.
mutable bool final_delta_created_ = false;
#endif
DISALLOW_COPY_AND_ASSIGN(Histogram);
};
//------------------------------------------------------------------------------
// LinearHistogram is a more traditional histogram, with evenly spaced
// buckets.
class BASE_EXPORT LinearHistogram : public Histogram {
public:
~LinearHistogram() override;
/* minimum should start from 1. 0 is as minimum is invalid. 0 is an implicit
default underflow bucket. */
static HistogramBase* FactoryGet(const std::string& name,
Sample minimum,
Sample maximum,
uint32_t bucket_count,
int32_t flags);
static HistogramBase* FactoryTimeGet(const std::string& name,
TimeDelta minimum,
TimeDelta maximum,
uint32_t bucket_count,
int32_t flags);
// Overloads of the above two functions that take a const char* |name| param,
// to avoid code bloat from the std::string constructor being inlined into
// call sites.
static HistogramBase* FactoryGet(const char* name,
Sample minimum,
Sample maximum,
uint32_t bucket_count,
int32_t flags);
static HistogramBase* FactoryTimeGet(const char* name,
TimeDelta minimum,
TimeDelta maximum,
uint32_t bucket_count,
int32_t flags);
// Create a histogram using data in persistent storage.
static std::unique_ptr<HistogramBase> PersistentCreate(
const char* name,
Sample minimum,
Sample maximum,
const BucketRanges* ranges,
const DelayedPersistentAllocation& counts,
const DelayedPersistentAllocation& logged_counts,
HistogramSamples::Metadata* meta,
HistogramSamples::Metadata* logged_meta);
struct DescriptionPair {
Sample sample;
const char* description; // Null means end of a list of pairs.
};
// Create a LinearHistogram and store a list of number/text values for use in
// writing the histogram graph.
// |descriptions| can be NULL, which means no special descriptions to set. If
// it's not NULL, the last element in the array must has a NULL in its
// "description" field.
static HistogramBase* FactoryGetWithRangeDescription(
const std::string& name,
Sample minimum,
Sample maximum,
uint32_t bucket_count,
int32_t flags,
const DescriptionPair descriptions[]);
static void InitializeBucketRanges(Sample minimum,
Sample maximum,
BucketRanges* ranges);
// Overridden from Histogram:
HistogramType GetHistogramType() const override;
protected:
class Factory;
LinearHistogram(const char* name,
Sample minimum,
Sample maximum,
const BucketRanges* ranges);
LinearHistogram(const char* name,
Sample minimum,
Sample maximum,
const BucketRanges* ranges,
const DelayedPersistentAllocation& counts,
const DelayedPersistentAllocation& logged_counts,
HistogramSamples::Metadata* meta,
HistogramSamples::Metadata* logged_meta);
double GetBucketSize(Count current, uint32_t i) const override;
// If we have a description for a bucket, then return that. Otherwise
// let parent class provide a (numeric) description.
const std::string GetAsciiBucketRange(uint32_t i) const override;
// Skip printing of name for numeric range if we have a name (and if this is
// an empty bucket).
bool PrintEmptyBucket(uint32_t index) const override;
private:
friend BASE_EXPORT HistogramBase* DeserializeHistogramInfo(
base::PickleIterator* iter);
static HistogramBase* DeserializeInfoImpl(base::PickleIterator* iter);
// For some ranges, we store a printable description of a bucket range.
// If there is no description, then GetAsciiBucketRange() uses parent class
// to provide a description.
typedef std::map<Sample, std::string> BucketDescriptionMap;
BucketDescriptionMap bucket_description_;
DISALLOW_COPY_AND_ASSIGN(LinearHistogram);
};
//------------------------------------------------------------------------------
// ScaledLinearHistogram is a wrapper around a linear histogram that scales the
// counts down by some factor. Remainder values are kept locally but lost when
// uploaded or serialized. The integral counts are rounded up/down so should
// average to the correct value when many reports are added.
//
// This is most useful when adding many counts at once via AddCount() that can
// cause overflows of the 31-bit counters, usually with an enum as the value.
class BASE_EXPORT ScaledLinearHistogram {
using AtomicCount = Histogram::AtomicCount;
using Sample = Histogram::Sample;
public:
// Currently only works with "exact" linear histograms: minimum=1, maximum=N,
// and bucket_count=N+1.
ScaledLinearHistogram(const char* name,
Sample minimum,
Sample maximum,
uint32_t bucket_count,
int32_t scale,
int32_t flags);
~ScaledLinearHistogram();
// Like AddCount() but actually accumulates |count|/|scale| and increments
// the accumulated remainder by |count|%|scale|. An additional increment
// is done when the remainder has grown sufficiently large.
void AddScaledCount(Sample value, int count);
int32_t scale() const { return scale_; }
LinearHistogram* histogram() { return histogram_; }
private:
// Pointer to the underlying histogram. Ownership of it remains with
// the statistics-recorder.
LinearHistogram* const histogram_;
// The scale factor of the sample counts.
const int32_t scale_;
// A vector of "remainder" counts indexed by bucket number. These values
// may be negative as the scaled count is actually bumped once the
// remainder is 1/2 way to the scale value (thus "rounding").
std::vector<AtomicCount> remainders_;
DISALLOW_COPY_AND_ASSIGN(ScaledLinearHistogram);
};
//------------------------------------------------------------------------------
// BooleanHistogram is a histogram for booleans.
class BASE_EXPORT BooleanHistogram : public LinearHistogram {
public:
static HistogramBase* FactoryGet(const std::string& name, int32_t flags);
// Overload of the above function that takes a const char* |name| param,
// to avoid code bloat from the std::string constructor being inlined into
// call sites.
static HistogramBase* FactoryGet(const char* name, int32_t flags);
// Create a histogram using data in persistent storage.
static std::unique_ptr<HistogramBase> PersistentCreate(
const char* name,
const BucketRanges* ranges,
const DelayedPersistentAllocation& counts,
const DelayedPersistentAllocation& logged_counts,
HistogramSamples::Metadata* meta,
HistogramSamples::Metadata* logged_meta);
HistogramType GetHistogramType() const override;
protected:
class Factory;
private:
BooleanHistogram(const char* name, const BucketRanges* ranges);
BooleanHistogram(const char* name,
const BucketRanges* ranges,
const DelayedPersistentAllocation& counts,
const DelayedPersistentAllocation& logged_counts,
HistogramSamples::Metadata* meta,
HistogramSamples::Metadata* logged_meta);
friend BASE_EXPORT HistogramBase* DeserializeHistogramInfo(
base::PickleIterator* iter);
static HistogramBase* DeserializeInfoImpl(base::PickleIterator* iter);
DISALLOW_COPY_AND_ASSIGN(BooleanHistogram);
};
//------------------------------------------------------------------------------
// CustomHistogram is a histogram for a set of custom integers.
class BASE_EXPORT CustomHistogram : public Histogram {
public:
// |custom_ranges| contains a vector of limits on ranges. Each limit should be
// > 0 and < kSampleType_MAX. (Currently 0 is still accepted for backward
// compatibility). The limits can be unordered or contain duplication, but
// client should not depend on this.
static HistogramBase* FactoryGet(const std::string& name,
const std::vector<Sample>& custom_ranges,
int32_t flags);
// Overload of the above function that takes a const char* |name| param,
// to avoid code bloat from the std::string constructor being inlined into
// call sites.
static HistogramBase* FactoryGet(const char* name,
const std::vector<Sample>& custom_ranges,
int32_t flags);
// Create a histogram using data in persistent storage.
static std::unique_ptr<HistogramBase> PersistentCreate(
const char* name,
const BucketRanges* ranges,
const DelayedPersistentAllocation& counts,
const DelayedPersistentAllocation& logged_counts,
HistogramSamples::Metadata* meta,
HistogramSamples::Metadata* logged_meta);
// Overridden from Histogram:
HistogramType GetHistogramType() const override;
// Helper method for transforming an array of valid enumeration values
// to the std::vector<int> expected by UMA_HISTOGRAM_CUSTOM_ENUMERATION.
// This function ensures that a guard bucket exists right after any
// valid sample value (unless the next higher sample is also a valid value),
// so that invalid samples never fall into the same bucket as valid samples.
static std::vector<Sample> ArrayToCustomEnumRanges(
base::span<const Sample> values);
protected:
class Factory;
CustomHistogram(const char* name, const BucketRanges* ranges);
CustomHistogram(const char* name,
const BucketRanges* ranges,
const DelayedPersistentAllocation& counts,
const DelayedPersistentAllocation& logged_counts,
HistogramSamples::Metadata* meta,
HistogramSamples::Metadata* logged_meta);
// HistogramBase implementation:
void SerializeInfoImpl(base::Pickle* pickle) const override;
double GetBucketSize(Count current, uint32_t i) const override;
private:
friend BASE_EXPORT HistogramBase* DeserializeHistogramInfo(
base::PickleIterator* iter);
static HistogramBase* DeserializeInfoImpl(base::PickleIterator* iter);
static bool ValidateCustomRanges(const std::vector<Sample>& custom_ranges);
DISALLOW_COPY_AND_ASSIGN(CustomHistogram);
};
} // namespace base
#endif // BASE_METRICS_HISTOGRAM_H_

View file

@ -0,0 +1,220 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/metrics/histogram_base.h"
#include <limits.h>
#include <memory>
#include <set>
#include <utility>
#include "base/json/json_string_value_serializer.h"
#include "base/logging.h"
#include "base/metrics/histogram.h"
#include "base/metrics/histogram_macros.h"
#include "base/metrics/histogram_samples.h"
#include "base/metrics/sparse_histogram.h"
#include "base/metrics/statistics_recorder.h"
#include "base/no_destructor.h"
#include "base/numerics/safe_conversions.h"
#include "base/pickle.h"
#include "base/process/process_handle.h"
#include "base/rand_util.h"
#include "base/strings/stringprintf.h"
#include "base/synchronization/lock.h"
#include "base/values.h"
namespace base {
std::string HistogramTypeToString(HistogramType type) {
switch (type) {
case HISTOGRAM:
return "HISTOGRAM";
case LINEAR_HISTOGRAM:
return "LINEAR_HISTOGRAM";
case BOOLEAN_HISTOGRAM:
return "BOOLEAN_HISTOGRAM";
case CUSTOM_HISTOGRAM:
return "CUSTOM_HISTOGRAM";
case SPARSE_HISTOGRAM:
return "SPARSE_HISTOGRAM";
case DUMMY_HISTOGRAM:
return "DUMMY_HISTOGRAM";
}
NOTREACHED();
return "UNKNOWN";
}
HistogramBase* DeserializeHistogramInfo(PickleIterator* iter) {
int type;
if (!iter->ReadInt(&type))
return nullptr;
switch (type) {
case HISTOGRAM:
return Histogram::DeserializeInfoImpl(iter);
case LINEAR_HISTOGRAM:
return LinearHistogram::DeserializeInfoImpl(iter);
case BOOLEAN_HISTOGRAM:
return BooleanHistogram::DeserializeInfoImpl(iter);
case CUSTOM_HISTOGRAM:
return CustomHistogram::DeserializeInfoImpl(iter);
case SPARSE_HISTOGRAM:
return SparseHistogram::DeserializeInfoImpl(iter);
default:
return nullptr;
}
}
const HistogramBase::Sample HistogramBase::kSampleType_MAX = INT_MAX;
HistogramBase::HistogramBase(const char* name)
: histogram_name_(name), flags_(kNoFlags) {}
HistogramBase::~HistogramBase() = default;
void HistogramBase::CheckName(const StringPiece& name) const {
DCHECK_EQ(StringPiece(histogram_name()), name);
}
void HistogramBase::SetFlags(int32_t flags) {
HistogramBase::Count old_flags = subtle::NoBarrier_Load(&flags_);
subtle::NoBarrier_Store(&flags_, old_flags | flags);
}
void HistogramBase::ClearFlags(int32_t flags) {
HistogramBase::Count old_flags = subtle::NoBarrier_Load(&flags_);
subtle::NoBarrier_Store(&flags_, old_flags & ~flags);
}
void HistogramBase::AddScaled(Sample value, int count, int scale) {
DCHECK_LT(0, scale);
// Convert raw count and probabilistically round up/down if the remainder
// is more than a random number [0, scale). This gives a more accurate
// count when there are a large number of records. RandInt is "inclusive",
// hence the -1 for the max value.
int64_t count_scaled = count / scale;
if (count - (count_scaled * scale) > base::RandInt(0, scale - 1))
count_scaled += 1;
if (count_scaled == 0)
return;
AddCount(value, count_scaled);
}
void HistogramBase::AddKilo(Sample value, int count) {
AddScaled(value, count, 1000);
}
void HistogramBase::AddKiB(Sample value, int count) {
AddScaled(value, count, 1024);
}
void HistogramBase::AddTimeMillisecondsGranularity(const TimeDelta& time) {
Add(saturated_cast<Sample>(time.InMilliseconds()));
}
void HistogramBase::AddTimeMicrosecondsGranularity(const TimeDelta& time) {
// Intentionally drop high-resolution reports on clients with low-resolution
// clocks. High-resolution metrics cannot make use of low-resolution data and
// reporting it merely adds noise to the metric. https://crbug.com/807615#c16
if (TimeTicks::IsHighResolution())
Add(saturated_cast<Sample>(time.InMicroseconds()));
}
void HistogramBase::AddBoolean(bool value) {
Add(value ? 1 : 0);
}
void HistogramBase::SerializeInfo(Pickle* pickle) const {
pickle->WriteInt(GetHistogramType());
SerializeInfoImpl(pickle);
}
uint32_t HistogramBase::FindCorruption(const HistogramSamples& samples) const {
// Not supported by default.
return NO_INCONSISTENCIES;
}
void HistogramBase::ValidateHistogramContents() const {}
void HistogramBase::WriteJSON(std::string* output,
JSONVerbosityLevel verbosity_level) const {
Count count = 0;
int64_t sum = 0;
std::unique_ptr<ListValue> buckets(new ListValue());
GetCountAndBucketData(&count, &sum, buckets.get());
std::unique_ptr<DictionaryValue> parameters(new DictionaryValue());
GetParameters(parameters.get());
JSONStringValueSerializer serializer(output);
DictionaryValue root;
root.SetStringKey("name", histogram_name());
root.SetIntKey("count", count);
root.SetDoubleKey("sum", static_cast<double>(sum));
root.SetIntKey("flags", flags());
root.Set("params", std::move(parameters));
if (verbosity_level != JSON_VERBOSITY_LEVEL_OMIT_BUCKETS)
root.Set("buckets", std::move(buckets));
root.SetIntKey("pid", GetUniqueIdForProcess().GetUnsafeValue());
serializer.Serialize(root);
}
void HistogramBase::FindAndRunCallback(HistogramBase::Sample sample) const {
StatisticsRecorder::GlobalSampleCallback global_sample_callback =
StatisticsRecorder::global_sample_callback();
if (global_sample_callback)
global_sample_callback(histogram_name(), name_hash(), sample);
if ((flags() & kCallbackExists) == 0)
return;
StatisticsRecorder::OnSampleCallback cb =
StatisticsRecorder::FindCallback(histogram_name());
if (!cb.is_null())
cb.Run(sample);
}
void HistogramBase::WriteAsciiBucketGraph(double current_size,
double max_size,
std::string* output) const {
const int k_line_length = 72; // Maximal horizontal width of graph.
int x_count = static_cast<int>(k_line_length * (current_size / max_size)
+ 0.5);
int x_remainder = k_line_length - x_count;
while (0 < x_count--)
output->append("-");
output->append("O");
while (0 < x_remainder--)
output->append(" ");
}
const std::string HistogramBase::GetSimpleAsciiBucketRange(
Sample sample) const {
return StringPrintf("%d", sample);
}
void HistogramBase::WriteAsciiBucketValue(Count current,
double scaled_sum,
std::string* output) const {
StringAppendF(output, " (%d = %3.1f%%)", current, current/scaled_sum);
}
// static
char const* HistogramBase::GetPermanentName(const std::string& name) {
// A set of histogram names that provides the "permanent" lifetime required
// by histogram objects for those strings that are not already code constants
// or held in persistent memory.
static base::NoDestructor<std::set<std::string>> permanent_names;
static base::NoDestructor<Lock> permanent_names_lock;
AutoLock lock(*permanent_names_lock);
auto result = permanent_names->insert(name);
return result.first->c_str();
}
} // namespace base

View file

@ -0,0 +1,307 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_METRICS_HISTOGRAM_BASE_H_
#define BASE_METRICS_HISTOGRAM_BASE_H_
#include <limits.h>
#include <stddef.h>
#include <stdint.h>
#include <memory>
#include <string>
#include <vector>
#include "base/atomicops.h"
#include "base/base_export.h"
#include "base/macros.h"
#include "base/strings/string_piece.h"
#include "base/time/time.h"
namespace base {
class DictionaryValue;
class HistogramBase;
class HistogramSamples;
class ListValue;
class Pickle;
class PickleIterator;
////////////////////////////////////////////////////////////////////////////////
// This enum is used to facilitate deserialization of histograms from other
// processes into the browser. If you create another class that inherits from
// HistogramBase, add new histogram types and names below.
enum HistogramType {
HISTOGRAM,
LINEAR_HISTOGRAM,
BOOLEAN_HISTOGRAM,
CUSTOM_HISTOGRAM,
SPARSE_HISTOGRAM,
DUMMY_HISTOGRAM,
};
// Controls the verbosity of the information when the histogram is serialized to
// a JSON.
// GENERATED_JAVA_ENUM_PACKAGE: org.chromium.base.metrics
enum JSONVerbosityLevel {
// The histogram is completely serialized.
JSON_VERBOSITY_LEVEL_FULL,
// The bucket information is not serialized.
JSON_VERBOSITY_LEVEL_OMIT_BUCKETS,
};
std::string HistogramTypeToString(HistogramType type);
// This enum is used for reporting how many histograms and of what types and
// variations are being created. It has to be in the main .h file so it is
// visible to files that define the various histogram types.
enum HistogramReport {
// Count the number of reports created. The other counts divided by this
// number will give the average per run of the program.
HISTOGRAM_REPORT_CREATED = 0,
// Count the total number of histograms created. It is the limit against
// which all others are compared.
HISTOGRAM_REPORT_HISTOGRAM_CREATED = 1,
// Count the total number of histograms looked-up. It's better to cache
// the result of a single lookup rather than do it repeatedly.
HISTOGRAM_REPORT_HISTOGRAM_LOOKUP = 2,
// These count the individual histogram types. This must follow the order
// of HistogramType above.
HISTOGRAM_REPORT_TYPE_LOGARITHMIC = 3,
HISTOGRAM_REPORT_TYPE_LINEAR = 4,
HISTOGRAM_REPORT_TYPE_BOOLEAN = 5,
HISTOGRAM_REPORT_TYPE_CUSTOM = 6,
HISTOGRAM_REPORT_TYPE_SPARSE = 7,
// These indicate the individual flags that were set.
HISTOGRAM_REPORT_FLAG_UMA_TARGETED = 8,
HISTOGRAM_REPORT_FLAG_UMA_STABILITY = 9,
HISTOGRAM_REPORT_FLAG_PERSISTENT = 10,
// This must be last.
HISTOGRAM_REPORT_MAX = 11
};
// Create or find existing histogram that matches the pickled info.
// Returns NULL if the pickled data has problems.
BASE_EXPORT HistogramBase* DeserializeHistogramInfo(base::PickleIterator* iter);
////////////////////////////////////////////////////////////////////////////////
class BASE_EXPORT HistogramBase {
public:
typedef int32_t Sample; // Used for samples.
typedef subtle::Atomic32 AtomicCount; // Used to count samples.
typedef int32_t Count; // Used to manipulate counts in temporaries.
static const Sample kSampleType_MAX; // INT_MAX
enum Flags {
kNoFlags = 0x0,
// Histogram should be UMA uploaded.
kUmaTargetedHistogramFlag = 0x1,
// Indicates that this is a stability histogram. This flag exists to specify
// which histograms should be included in the initial stability log. Please
// refer to |MetricsService::PrepareInitialStabilityLog|.
kUmaStabilityHistogramFlag = kUmaTargetedHistogramFlag | 0x2,
// Indicates that the histogram was pickled to be sent across an IPC
// Channel. If we observe this flag on a histogram being aggregated into
// after IPC, then we are running in a single process mode, and the
// aggregation should not take place (as we would be aggregating back into
// the source histogram!).
kIPCSerializationSourceFlag = 0x10,
// Indicates that a callback exists for when a new sample is recorded on
// this histogram. We store this as a flag with the histogram since
// histograms can be in performance critical code, and this allows us
// to shortcut looking up the callback if it doesn't exist.
kCallbackExists = 0x20,
// Indicates that the histogram is held in "persistent" memory and may
// be accessible between processes. This is only possible if such a
// memory segment has been created/attached, used to create a Persistent-
// MemoryAllocator, and that loaded into the Histogram module before this
// histogram is created.
kIsPersistent = 0x40,
};
// Histogram data inconsistency types.
enum Inconsistency : uint32_t {
NO_INCONSISTENCIES = 0x0,
RANGE_CHECKSUM_ERROR = 0x1,
BUCKET_ORDER_ERROR = 0x2,
COUNT_HIGH_ERROR = 0x4,
COUNT_LOW_ERROR = 0x8,
NEVER_EXCEEDED_VALUE = 0x10,
};
// Construct the base histogram. The name is not copied; it's up to the
// caller to ensure that it lives at least as long as this object.
explicit HistogramBase(const char* name);
virtual ~HistogramBase();
const char* histogram_name() const { return histogram_name_; }
// Compares |name| to the histogram name and triggers a DCHECK if they do not
// match. This is a helper function used by histogram macros, which results in
// in more compact machine code being generated by the macros.
virtual void CheckName(const StringPiece& name) const;
// Get a unique ID for this histogram's samples.
virtual uint64_t name_hash() const = 0;
// Operations with Flags enum.
int32_t flags() const { return subtle::NoBarrier_Load(&flags_); }
void SetFlags(int32_t flags);
void ClearFlags(int32_t flags);
virtual HistogramType GetHistogramType() const = 0;
// Whether the histogram has construction arguments as parameters specified.
// For histograms that don't have the concept of minimum, maximum or
// bucket_count, this function always returns false.
virtual bool HasConstructionArguments(
Sample expected_minimum,
Sample expected_maximum,
uint32_t expected_bucket_count) const = 0;
virtual void Add(Sample value) = 0;
// In Add function the |value| bucket is increased by one, but in some use
// cases we need to increase this value by an arbitrary integer. AddCount
// function increases the |value| bucket by |count|. |count| should be greater
// than or equal to 1.
virtual void AddCount(Sample value, int count) = 0;
// Similar to above but divides |count| by the |scale| amount. Probabilistic
// rounding is used to yield a reasonably accurate total when many samples
// are added. Methods for common cases of scales 1000 and 1024 are included.
// The ScaledLinearHistogram (which can also used be for enumerations) may be
// a better (and faster) solution.
void AddScaled(Sample value, int count, int scale);
void AddKilo(Sample value, int count); // scale=1000
void AddKiB(Sample value, int count); // scale=1024
// Convenient functions that call Add(Sample).
void AddTime(const TimeDelta& time) { AddTimeMillisecondsGranularity(time); }
void AddTimeMillisecondsGranularity(const TimeDelta& time);
// Note: AddTimeMicrosecondsGranularity() drops the report if this client
// doesn't have a high-resolution clock.
void AddTimeMicrosecondsGranularity(const TimeDelta& time);
void AddBoolean(bool value);
virtual void AddSamples(const HistogramSamples& samples) = 0;
virtual bool AddSamplesFromPickle(base::PickleIterator* iter) = 0;
// Serialize the histogram info into |pickle|.
// Note: This only serializes the construction arguments of the histogram, but
// does not serialize the samples.
void SerializeInfo(base::Pickle* pickle) const;
// Try to find out data corruption from histogram and the samples.
// The returned value is a combination of Inconsistency enum.
virtual uint32_t FindCorruption(const HistogramSamples& samples) const;
// Snapshot the current complete set of sample data.
// Override with atomic/locked snapshot if needed.
// NOTE: this data can overflow for long-running sessions. It should be
// handled with care and this method is recommended to be used only
// in about:histograms and test code.
virtual std::unique_ptr<HistogramSamples> SnapshotSamples() const = 0;
// Calculate the change (delta) in histogram counts since the previous call
// to this method. Each successive call will return only those counts
// changed since the last call.
virtual std::unique_ptr<HistogramSamples> SnapshotDelta() = 0;
// Calculate the change (delta) in histogram counts since the previous call
// to SnapshotDelta() but do so without modifying any internal data as to
// what was previous logged. After such a call, no further calls to this
// method or to SnapshotDelta() should be done as the result would include
// data previously returned. Because no internal data is changed, this call
// can be made on "const" histograms such as those with data held in
// read-only memory.
virtual std::unique_ptr<HistogramSamples> SnapshotFinalDelta() const = 0;
// The following methods provide graphical histogram displays.
virtual void WriteHTMLGraph(std::string* output) const = 0;
virtual void WriteAscii(std::string* output) const = 0;
// TODO(bcwhite): Remove this after https://crbug/836875.
virtual void ValidateHistogramContents() const;
// Produce a JSON representation of the histogram with |verbosity_level| as
// the serialization verbosity. This is implemented with the help of
// GetParameters and GetCountAndBucketData; overwrite them to customize the
// output.
void WriteJSON(std::string* output, JSONVerbosityLevel verbosity_level) const;
protected:
enum ReportActivity { HISTOGRAM_CREATED, HISTOGRAM_LOOKUP };
// Subclasses should implement this function to make SerializeInfo work.
virtual void SerializeInfoImpl(base::Pickle* pickle) const = 0;
// Writes information about the construction parameters in |params|.
virtual void GetParameters(DictionaryValue* params) const = 0;
// Writes information about the current (non-empty) buckets and their sample
// counts to |buckets|, the total sample count to |count| and the total sum
// to |sum|.
virtual void GetCountAndBucketData(Count* count,
int64_t* sum,
ListValue* buckets) const = 0;
//// Produce actual graph (set of blank vs non blank char's) for a bucket.
void WriteAsciiBucketGraph(double current_size,
double max_size,
std::string* output) const;
// Return a string description of what goes in a given bucket.
const std::string GetSimpleAsciiBucketRange(Sample sample) const;
// Write textual description of the bucket contents (relative to histogram).
// Output is the count in the buckets, as well as the percentage.
void WriteAsciiBucketValue(Count current,
double scaled_sum,
std::string* output) const;
// Retrieves the callback for this histogram, if one exists, and runs it
// passing |sample| as the parameter.
void FindAndRunCallback(Sample sample) const;
// Gets a permanent string that can be used for histogram objects when the
// original is not a code constant or held in persistent memory.
static const char* GetPermanentName(const std::string& name);
private:
friend class HistogramBaseTest;
// A pointer to permanent storage where the histogram name is held. This can
// be code space or the output of GetPermanentName() or any other storage
// that is known to never change. This is not StringPiece because (a) char*
// is 1/2 the size and (b) StringPiece transparently casts from std::string
// which can easily lead to a pointer to non-permanent space.
// For persistent histograms, this will simply point into the persistent
// memory segment, thus avoiding duplication. For heap histograms, the
// GetPermanentName method will create the necessary copy.
const char* const histogram_name_;
// Additional information about the histogram.
AtomicCount flags_;
DISALLOW_COPY_AND_ASSIGN(HistogramBase);
};
} // namespace base
#endif // BASE_METRICS_HISTOGRAM_BASE_H_

View file

@ -0,0 +1,81 @@
// Copyright 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/metrics/histogram_delta_serialization.h"
#include "base/logging.h"
#include "base/metrics/histogram_base.h"
#include "base/metrics/histogram_snapshot_manager.h"
#include "base/metrics/statistics_recorder.h"
#include "base/numerics/safe_conversions.h"
#include "base/pickle.h"
#include "base/values.h"
namespace base {
namespace {
// Create or find existing histogram and add the samples from pickle.
// Silently returns when seeing any data problem in the pickle.
void DeserializeHistogramAndAddSamples(PickleIterator* iter) {
HistogramBase* histogram = DeserializeHistogramInfo(iter);
if (!histogram)
return;
if (histogram->flags() & HistogramBase::kIPCSerializationSourceFlag) {
DVLOG(1) << "Single process mode, histogram observed and not copied: "
<< histogram->histogram_name();
return;
}
histogram->AddSamplesFromPickle(iter);
}
} // namespace
HistogramDeltaSerialization::HistogramDeltaSerialization(
const std::string& caller_name)
: histogram_snapshot_manager_(this), serialized_deltas_(nullptr) {}
HistogramDeltaSerialization::~HistogramDeltaSerialization() = default;
void HistogramDeltaSerialization::PrepareAndSerializeDeltas(
std::vector<std::string>* serialized_deltas,
bool include_persistent) {
DCHECK(thread_checker_.CalledOnValidThread());
serialized_deltas_ = serialized_deltas;
// Note: Before serializing, we set the kIPCSerializationSourceFlag for all
// the histograms, so that the receiving process can distinguish them from the
// local histograms.
StatisticsRecorder::PrepareDeltas(
include_persistent, Histogram::kIPCSerializationSourceFlag,
Histogram::kNoFlags, &histogram_snapshot_manager_);
serialized_deltas_ = nullptr;
}
// static
void HistogramDeltaSerialization::DeserializeAndAddSamples(
const std::vector<std::string>& serialized_deltas) {
for (auto it = serialized_deltas.begin(); it != serialized_deltas.end();
++it) {
Pickle pickle(it->data(), checked_cast<int>(it->size()));
PickleIterator iter(pickle);
DeserializeHistogramAndAddSamples(&iter);
}
}
void HistogramDeltaSerialization::RecordDelta(
const HistogramBase& histogram,
const HistogramSamples& snapshot) {
DCHECK(thread_checker_.CalledOnValidThread());
DCHECK_NE(0, snapshot.TotalCount());
Pickle pickle;
histogram.SerializeInfo(&pickle);
snapshot.Serialize(&pickle);
serialized_deltas_->push_back(
std::string(static_cast<const char*>(pickle.data()), pickle.size()));
}
} // namespace base

View file

@ -0,0 +1,61 @@
// Copyright 2013 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_METRICS_HISTOGRAM_DELTA_SERIALIZATION_H_
#define BASE_METRICS_HISTOGRAM_DELTA_SERIALIZATION_H_
#include <memory>
#include <string>
#include <vector>
#include "base/base_export.h"
#include "base/macros.h"
#include "base/metrics/histogram_flattener.h"
#include "base/metrics/histogram_snapshot_manager.h"
#include "base/threading/thread_checker.h"
namespace base {
class HistogramBase;
// Serializes and restores histograms deltas.
class BASE_EXPORT HistogramDeltaSerialization : public HistogramFlattener {
public:
// |caller_name| is string used in histograms for counting inconsistencies.
explicit HistogramDeltaSerialization(const std::string& caller_name);
~HistogramDeltaSerialization() override;
// Computes deltas in histogram bucket counts relative to the previous call to
// this method. Stores the deltas in serialized form into |serialized_deltas|.
// If |serialized_deltas| is null, no data is serialized, though the next call
// will compute the deltas relative to this one. Setting |include_persistent|
// will include histograms held in persistent memory (and thus may be reported
// elsewhere); otherwise only histograms local to this process are serialized.
void PrepareAndSerializeDeltas(std::vector<std::string>* serialized_deltas,
bool include_persistent);
// Deserialize deltas and add samples to corresponding histograms, creating
// them if necessary. Silently ignores errors in |serialized_deltas|.
static void DeserializeAndAddSamples(
const std::vector<std::string>& serialized_deltas);
private:
// HistogramFlattener implementation.
void RecordDelta(const HistogramBase& histogram,
const HistogramSamples& snapshot) override;
ThreadChecker thread_checker_;
// Calculates deltas in histogram counters.
HistogramSnapshotManager histogram_snapshot_manager_;
// Output buffer for serialized deltas.
std::vector<std::string>* serialized_deltas_;
DISALLOW_COPY_AND_ASSIGN(HistogramDeltaSerialization);
};
} // namespace base
#endif // BASE_METRICS_HISTOGRAM_DELTA_SERIALIZATION_H_

View file

@ -0,0 +1,36 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_METRICS_HISTOGRAM_FLATTENER_H_
#define BASE_METRICS_HISTOGRAM_FLATTENER_H_
#include <map>
#include <string>
#include "base/macros.h"
#include "base/metrics/histogram.h"
namespace base {
class HistogramSamples;
// HistogramFlattener is an interface used by HistogramSnapshotManager, which
// handles the logistics of gathering up available histograms for recording.
class BASE_EXPORT HistogramFlattener {
public:
virtual ~HistogramFlattener() = default;
virtual void RecordDelta(const HistogramBase& histogram,
const HistogramSamples& snapshot) = 0;
protected:
HistogramFlattener() = default;
private:
DISALLOW_COPY_AND_ASSIGN(HistogramFlattener);
};
} // namespace base
#endif // BASE_METRICS_HISTOGRAM_FLATTENER_H_

View file

@ -0,0 +1,264 @@
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/metrics/histogram_functions.h"
#include "base/metrics/histogram.h"
#include "base/metrics/histogram_base.h"
#include "base/metrics/sparse_histogram.h"
#include "base/time/time.h"
namespace base {
void UmaHistogramBoolean(const std::string& name, bool sample) {
HistogramBase* histogram = BooleanHistogram::FactoryGet(
name, HistogramBase::kUmaTargetedHistogramFlag);
histogram->Add(sample);
}
void UmaHistogramBoolean(const char* name, bool sample) {
HistogramBase* histogram = BooleanHistogram::FactoryGet(
name, HistogramBase::kUmaTargetedHistogramFlag);
histogram->Add(sample);
}
void UmaHistogramExactLinear(const std::string& name,
int sample,
int value_max) {
HistogramBase* histogram =
LinearHistogram::FactoryGet(name, 1, value_max, value_max + 1,
HistogramBase::kUmaTargetedHistogramFlag);
histogram->Add(sample);
}
void UmaHistogramExactLinear(const char* name, int sample, int value_max) {
HistogramBase* histogram =
LinearHistogram::FactoryGet(name, 1, value_max, value_max + 1,
HistogramBase::kUmaTargetedHistogramFlag);
histogram->Add(sample);
}
void UmaHistogramPercentage(const std::string& name, int percent) {
UmaHistogramExactLinear(name, percent, 100);
}
void UmaHistogramPercentage(const char* name, int percent) {
UmaHistogramExactLinear(name, percent, 100);
}
void UmaHistogramCustomCounts(const std::string& name,
int sample,
int min,
int max,
int buckets) {
HistogramBase* histogram = Histogram::FactoryGet(
name, min, max, buckets, HistogramBase::kUmaTargetedHistogramFlag);
histogram->Add(sample);
}
void UmaHistogramCustomCounts(const char* name,
int sample,
int min,
int max,
int buckets) {
HistogramBase* histogram = Histogram::FactoryGet(
name, min, max, buckets, HistogramBase::kUmaTargetedHistogramFlag);
histogram->Add(sample);
}
void UmaHistogramCounts100(const std::string& name, int sample) {
UmaHistogramCustomCounts(name, sample, 1, 100, 50);
}
void UmaHistogramCounts100(const char* name, int sample) {
UmaHistogramCustomCounts(name, sample, 1, 100, 50);
}
void UmaHistogramCounts1000(const std::string& name, int sample) {
UmaHistogramCustomCounts(name, sample, 1, 1000, 50);
}
void UmaHistogramCounts1000(const char* name, int sample) {
UmaHistogramCustomCounts(name, sample, 1, 1000, 50);
}
void UmaHistogramCounts10000(const std::string& name, int sample) {
UmaHistogramCustomCounts(name, sample, 1, 10000, 50);
}
void UmaHistogramCounts10000(const char* name, int sample) {
UmaHistogramCustomCounts(name, sample, 1, 10000, 50);
}
void UmaHistogramCounts100000(const std::string& name, int sample) {
UmaHistogramCustomCounts(name, sample, 1, 100000, 50);
}
void UmaHistogramCounts100000(const char* name, int sample) {
UmaHistogramCustomCounts(name, sample, 1, 100000, 50);
}
void UmaHistogramCounts1M(const std::string& name, int sample) {
UmaHistogramCustomCounts(name, sample, 1, 1000000, 50);
}
void UmaHistogramCounts1M(const char* name, int sample) {
UmaHistogramCustomCounts(name, sample, 1, 1000000, 50);
}
void UmaHistogramCounts10M(const std::string& name, int sample) {
UmaHistogramCustomCounts(name, sample, 1, 10000000, 50);
}
void UmaHistogramCounts10M(const char* name, int sample) {
UmaHistogramCustomCounts(name, sample, 1, 10000000, 50);
}
void UmaHistogramCustomTimes(const std::string& name,
TimeDelta sample,
TimeDelta min,
TimeDelta max,
int buckets) {
HistogramBase* histogram = Histogram::FactoryTimeGet(
name, min, max, buckets, HistogramBase::kUmaTargetedHistogramFlag);
histogram->AddTimeMillisecondsGranularity(sample);
}
void UmaHistogramCustomTimes(const char* name,
TimeDelta sample,
TimeDelta min,
TimeDelta max,
int buckets) {
HistogramBase* histogram = Histogram::FactoryTimeGet(
name, min, max, buckets, HistogramBase::kUmaTargetedHistogramFlag);
histogram->AddTimeMillisecondsGranularity(sample);
}
void UmaHistogramTimes(const std::string& name, TimeDelta sample) {
UmaHistogramCustomTimes(name, sample, TimeDelta::FromMilliseconds(1),
TimeDelta::FromSeconds(10), 50);
}
void UmaHistogramTimes(const char* name, TimeDelta sample) {
UmaHistogramCustomTimes(name, sample, TimeDelta::FromMilliseconds(1),
TimeDelta::FromSeconds(10), 50);
}
void UmaHistogramMediumTimes(const std::string& name, TimeDelta sample) {
UmaHistogramCustomTimes(name, sample, TimeDelta::FromMilliseconds(1),
TimeDelta::FromMinutes(3), 50);
}
void UmaHistogramMediumTimes(const char* name, TimeDelta sample) {
UmaHistogramCustomTimes(name, sample, TimeDelta::FromMilliseconds(1),
TimeDelta::FromMinutes(3), 50);
}
void UmaHistogramLongTimes(const std::string& name, TimeDelta sample) {
UmaHistogramCustomTimes(name, sample, TimeDelta::FromMilliseconds(1),
TimeDelta::FromHours(1), 50);
}
void UmaHistogramLongTimes(const char* name, TimeDelta sample) {
UmaHistogramCustomTimes(name, sample, TimeDelta::FromMilliseconds(1),
TimeDelta::FromHours(1), 50);
}
void UmaHistogramLongTimes100(const std::string& name, TimeDelta sample) {
UmaHistogramCustomTimes(name, sample, TimeDelta::FromMilliseconds(1),
TimeDelta::FromHours(1), 100);
}
void UmaHistogramLongTimes100(const char* name, TimeDelta sample) {
UmaHistogramCustomTimes(name, sample, TimeDelta::FromMilliseconds(1),
TimeDelta::FromHours(1), 100);
}
void UmaHistogramCustomMicrosecondsTimes(const std::string& name,
TimeDelta sample,
TimeDelta min,
TimeDelta max,
int buckets) {
HistogramBase* histogram = Histogram::FactoryMicrosecondsTimeGet(
name, min, max, buckets, HistogramBase::kUmaTargetedHistogramFlag);
histogram->AddTimeMicrosecondsGranularity(sample);
}
void UmaHistogramCustomMicrosecondsTimes(const char* name,
TimeDelta sample,
TimeDelta min,
TimeDelta max,
int buckets) {
HistogramBase* histogram = Histogram::FactoryMicrosecondsTimeGet(
name, min, max, buckets, HistogramBase::kUmaTargetedHistogramFlag);
histogram->AddTimeMicrosecondsGranularity(sample);
}
void UmaHistogramMicrosecondsTimes(const std::string& name, TimeDelta sample) {
UmaHistogramCustomMicrosecondsTimes(name, sample,
TimeDelta::FromMicroseconds(1),
TimeDelta::FromSeconds(10), 50);
}
void UmaHistogramMicrosecondsTimes(const char* name, TimeDelta sample) {
UmaHistogramCustomMicrosecondsTimes(name, sample,
TimeDelta::FromMicroseconds(1),
TimeDelta::FromSeconds(10), 50);
}
// TODO(crbug.com/983261) Remove this method after moving to
// UmaHistogramMicrosecondsTimes.
void UmaHistogramMicrosecondsTimesUnderTenMilliseconds(const std::string& name,
TimeDelta sample) {
UmaHistogramCustomMicrosecondsTimes(name, sample,
TimeDelta::FromMicroseconds(1),
TimeDelta::FromMilliseconds(10), 50);
}
// TODO(crbug.com/983261) Remove this method after moving to
// UmaHistogramMicrosecondsTimes.
void UmaHistogramMicrosecondsTimesUnderTenMilliseconds(const char* name,
TimeDelta sample) {
UmaHistogramCustomMicrosecondsTimes(name, sample,
TimeDelta::FromMicroseconds(1),
TimeDelta::FromMilliseconds(10), 50);
}
void UmaHistogramMemoryKB(const std::string& name, int sample) {
UmaHistogramCustomCounts(name, sample, 1000, 500000, 50);
}
void UmaHistogramMemoryKB(const char* name, int sample) {
UmaHistogramCustomCounts(name, sample, 1000, 500000, 50);
}
void UmaHistogramMemoryMB(const std::string& name, int sample) {
UmaHistogramCustomCounts(name, sample, 1, 1000, 50);
}
void UmaHistogramMemoryMB(const char* name, int sample) {
UmaHistogramCustomCounts(name, sample, 1, 1000, 50);
}
void UmaHistogramMemoryLargeMB(const std::string& name, int sample) {
UmaHistogramCustomCounts(name, sample, 1, 64000, 100);
}
void UmaHistogramMemoryLargeMB(const char* name, int sample) {
UmaHistogramCustomCounts(name, sample, 1, 64000, 100);
}
void UmaHistogramSparse(const std::string& name, int sample) {
HistogramBase* histogram = SparseHistogram::FactoryGet(
name, HistogramBase::kUmaTargetedHistogramFlag);
histogram->Add(sample);
}
void UmaHistogramSparse(const char* name, int sample) {
HistogramBase* histogram = SparseHistogram::FactoryGet(
name, HistogramBase::kUmaTargetedHistogramFlag);
histogram->Add(sample);
}
} // namespace base

View file

@ -0,0 +1,262 @@
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_METRICS_HISTOGRAM_FUNCTIONS_H_
#define BASE_METRICS_HISTOGRAM_FUNCTIONS_H_
#include <type_traits>
#include "base/metrics/histogram.h"
#include "base/metrics/histogram_base.h"
#include "base/time/time.h"
// Functions for recording metrics.
//
// For best practices on deciding when to emit to a histogram and what form
// the histogram should take, see
// https://chromium.googlesource.com/chromium/src.git/+/HEAD/tools/metrics/histograms/README.md
// Functions for recording UMA histograms. These can be used for cases
// when the histogram name is generated at runtime. The functionality is
// equivalent to macros defined in histogram_macros.h but allowing non-constant
// histogram names. These functions are slower compared to their macro
// equivalent because the histogram objects are not cached between calls.
// So, these shouldn't be used in performance critical code.
//
// Every function is duplicated to take both std::string and char* for the
// name. This avoids ctor/dtor instantiation for constant strigs to std::string
// which makes the call be larger than caching macros (which do accept char*)
// in those cases.
namespace base {
// For histograms with linear buckets.
// Used for capturing integer data with a linear bucketing scheme. This can be
// used when you want the exact value of some small numeric count, with a max of
// 100 or less. If you need to capture a range of greater than 100, we recommend
// the use of the COUNT histograms below.
// Sample usage:
// base::UmaHistogramExactLinear("Histogram.Linear", some_value, 10);
BASE_EXPORT void UmaHistogramExactLinear(const std::string& name,
int sample,
int value_max);
BASE_EXPORT void UmaHistogramExactLinear(const char* name,
int sample,
int value_max);
// For adding a sample to an enumerated histogram.
// Sample usage:
// // These values are persisted to logs. Entries should not be renumbered and
// // numeric values should never be reused.
// enum class NewTabPageAction {
// kUseOmnibox = 0,
// kClickTitle = 1,
// // kUseSearchbox = 2, // no longer used, combined into omnibox
// kOpenBookmark = 3,
// kMaxValue = kOpenBookmark,
// };
// base::UmaHistogramEnumeration("My.Enumeration",
// NewTabPageAction::kUseSearchbox);
template <typename T>
void UmaHistogramEnumeration(const std::string& name, T sample) {
static_assert(std::is_enum<T>::value, "T is not an enum.");
// This also ensures that an enumeration that doesn't define kMaxValue fails
// with a semi-useful error ("no member named 'kMaxValue' in ...").
static_assert(static_cast<uintmax_t>(T::kMaxValue) <=
static_cast<uintmax_t>(INT_MAX) - 1,
"Enumeration's kMaxValue is out of range of INT_MAX!");
DCHECK_LE(static_cast<uintmax_t>(sample),
static_cast<uintmax_t>(T::kMaxValue));
return UmaHistogramExactLinear(name, static_cast<int>(sample),
static_cast<int>(T::kMaxValue) + 1);
}
template <typename T>
void UmaHistogramEnumeration(const char* name, T sample) {
static_assert(std::is_enum<T>::value, "T is not an enum.");
// This also ensures that an enumeration that doesn't define kMaxValue fails
// with a semi-useful error ("no member named 'kMaxValue' in ...").
static_assert(static_cast<uintmax_t>(T::kMaxValue) <=
static_cast<uintmax_t>(INT_MAX) - 1,
"Enumeration's kMaxValue is out of range of INT_MAX!");
DCHECK_LE(static_cast<uintmax_t>(sample),
static_cast<uintmax_t>(T::kMaxValue));
return UmaHistogramExactLinear(name, static_cast<int>(sample),
static_cast<int>(T::kMaxValue) + 1);
}
// Some legacy histograms may manually specify a max value, with a kCount,
// COUNT, kMaxValue, or MAX_VALUE sentinel like so:
// // These values are persisted to logs. Entries should not be renumbered and
// // numeric values should never be reused.
// enum class NewTabPageAction {
// kUseOmnibox = 0,
// kClickTitle = 1,
// // kUseSearchbox = 2, // no longer used, combined into omnibox
// kOpenBookmark = 3,
// kMaxValue,
// };
// base::UmaHistogramEnumeration("My.Enumeration",
// NewTabPageAction::kUseSearchbox,
// kMaxValue);
// Note: The value in |sample| must be strictly less than |kMaxValue|. This is
// otherwise functionally equivalent to the above.
template <typename T>
void UmaHistogramEnumeration(const std::string& name, T sample, T enum_size) {
static_assert(std::is_enum<T>::value, "T is not an enum.");
DCHECK_LE(static_cast<uintmax_t>(enum_size), static_cast<uintmax_t>(INT_MAX));
DCHECK_LT(static_cast<uintmax_t>(sample), static_cast<uintmax_t>(enum_size));
return UmaHistogramExactLinear(name, static_cast<int>(sample),
static_cast<int>(enum_size));
}
template <typename T>
void UmaHistogramEnumeration(const char* name, T sample, T enum_size) {
static_assert(std::is_enum<T>::value, "T is not an enum.");
DCHECK_LE(static_cast<uintmax_t>(enum_size), static_cast<uintmax_t>(INT_MAX));
DCHECK_LT(static_cast<uintmax_t>(sample), static_cast<uintmax_t>(enum_size));
return UmaHistogramExactLinear(name, static_cast<int>(sample),
static_cast<int>(enum_size));
}
// For adding boolean sample to histogram.
// Sample usage:
// base::UmaHistogramBoolean("My.Boolean", true)
BASE_EXPORT void UmaHistogramBoolean(const std::string& name, bool sample);
BASE_EXPORT void UmaHistogramBoolean(const char* name, bool sample);
// For adding histogram with percent.
// Percents are integer between 1 and 100.
// Sample usage:
// base::UmaHistogramPercentage("My.Percent", 69)
BASE_EXPORT void UmaHistogramPercentage(const std::string& name, int percent);
BASE_EXPORT void UmaHistogramPercentage(const char* name, int percent);
// For adding counts histogram.
// Sample usage:
// base::UmaHistogramCustomCounts("My.Counts", some_value, 1, 600, 30)
BASE_EXPORT void UmaHistogramCustomCounts(const std::string& name,
int sample,
int min,
int max,
int buckets);
BASE_EXPORT void UmaHistogramCustomCounts(const char* name,
int sample,
int min,
int max,
int buckets);
// Counts specialization for maximum counts 100, 1000, 10k, 100k, 1M and 10M.
BASE_EXPORT void UmaHistogramCounts100(const std::string& name, int sample);
BASE_EXPORT void UmaHistogramCounts100(const char* name, int sample);
BASE_EXPORT void UmaHistogramCounts1000(const std::string& name, int sample);
BASE_EXPORT void UmaHistogramCounts1000(const char* name, int sample);
BASE_EXPORT void UmaHistogramCounts10000(const std::string& name, int sample);
BASE_EXPORT void UmaHistogramCounts10000(const char* name, int sample);
BASE_EXPORT void UmaHistogramCounts100000(const std::string& name, int sample);
BASE_EXPORT void UmaHistogramCounts100000(const char* name, int sample);
BASE_EXPORT void UmaHistogramCounts1M(const std::string& name, int sample);
BASE_EXPORT void UmaHistogramCounts1M(const char* name, int sample);
BASE_EXPORT void UmaHistogramCounts10M(const std::string& name, int sample);
BASE_EXPORT void UmaHistogramCounts10M(const char* name, int sample);
// For histograms storing times. It uses milliseconds granularity.
BASE_EXPORT void UmaHistogramCustomTimes(const std::string& name,
TimeDelta sample,
TimeDelta min,
TimeDelta max,
int buckets);
BASE_EXPORT void UmaHistogramCustomTimes(const char* name,
TimeDelta sample,
TimeDelta min,
TimeDelta max,
int buckets);
// For short timings from 1 ms up to 10 seconds (50 buckets).
BASE_EXPORT void UmaHistogramTimes(const std::string& name, TimeDelta sample);
BASE_EXPORT void UmaHistogramTimes(const char* name, TimeDelta sample);
// For medium timings up to 3 minutes (50 buckets).
BASE_EXPORT void UmaHistogramMediumTimes(const std::string& name,
TimeDelta sample);
BASE_EXPORT void UmaHistogramMediumTimes(const char* name, TimeDelta sample);
// For time intervals up to 1 hr (50 buckets).
BASE_EXPORT void UmaHistogramLongTimes(const std::string& name,
TimeDelta sample);
BASE_EXPORT void UmaHistogramLongTimes(const char* name, TimeDelta sample);
// For time intervals up to 1 hr (100 buckets).
BASE_EXPORT void UmaHistogramLongTimes100(const std::string& name,
TimeDelta sample);
BASE_EXPORT void UmaHistogramLongTimes100(const char* name, TimeDelta sample);
// For histograms storing times with microseconds granularity.
BASE_EXPORT void UmaHistogramCustomMicrosecondsTimes(const std::string& name,
TimeDelta sample,
TimeDelta min,
TimeDelta max,
int buckets);
BASE_EXPORT void UmaHistogramCustomMicrosecondsTimes(const char* name,
TimeDelta sample,
TimeDelta min,
TimeDelta max,
int buckets);
// For microseconds timings from 1 microsecond up to 10 seconds (50 buckets).
BASE_EXPORT void UmaHistogramMicrosecondsTimes(const std::string& name,
TimeDelta sample);
BASE_EXPORT void UmaHistogramMicrosecondsTimes(const char* name,
TimeDelta sample);
// For microseconds timings from 1 microsecond up to 10 ms (50 buckets).
// TODO(crbug.com/983261) Remove this method after moving to
// UmaHistogramMicrosecondsTimes.
BASE_EXPORT void UmaHistogramMicrosecondsTimesUnderTenMilliseconds(
const std::string& name,
TimeDelta sample);
BASE_EXPORT void UmaHistogramMicrosecondsTimesUnderTenMilliseconds(
const char* name,
TimeDelta sample);
// For recording memory related histograms.
// Used to measure common KB-granularity memory stats. Range is up to 500M.
BASE_EXPORT void UmaHistogramMemoryKB(const std::string& name, int sample);
BASE_EXPORT void UmaHistogramMemoryKB(const char* name, int sample);
// Used to measure common MB-granularity memory stats. Range is up to ~1G.
BASE_EXPORT void UmaHistogramMemoryMB(const std::string& name, int sample);
BASE_EXPORT void UmaHistogramMemoryMB(const char* name, int sample);
// Used to measure common MB-granularity memory stats. Range is up to ~64G.
BASE_EXPORT void UmaHistogramMemoryLargeMB(const std::string& name, int sample);
BASE_EXPORT void UmaHistogramMemoryLargeMB(const char* name, int sample);
// For recording sparse histograms.
// The |sample| can be a negative or non-negative number.
//
// Sparse histograms are well suited for recording counts of exact sample values
// that are sparsely distributed over a relatively large range, in cases where
// ultra-fast performance is not critical. For instance, Sqlite.Version.* are
// sparse because for any given database, there's going to be exactly one
// version logged.
//
// Performance:
// ------------
// Sparse histograms are typically more memory-efficient but less time-efficient
// than other histograms. Essentially, they sparse histograms use a map rather
// than a vector for their backing storage; they also require lock acquisition
// to increment a sample, whereas other histogram do not. Hence, each increment
// operation is a bit slower than for other histograms. But, if the data is
// sparse, then they use less memory client-side, because they allocate buckets
// on demand rather than preallocating.
//
// Data size:
// ----------
// Note that server-side, we still need to load all buckets, across all users,
// at once. Thus, please avoid exploding such histograms, i.e. uploading many
// many distinct values to the server (across all users). Concretely, keep the
// number of distinct values <= 100 ideally, definitely <= 1000. If you have no
// guarantees on the range of your data, use clamping, e.g.:
// UmaHistogramSparse("MyHistogram", ClampToRange(value, 0, 200));
BASE_EXPORT void UmaHistogramSparse(const std::string& name, int sample);
BASE_EXPORT void UmaHistogramSparse(const char* name, int sample);
} // namespace base
#endif // BASE_METRICS_HISTOGRAM_FUNCTIONS_H_

View file

@ -0,0 +1,417 @@
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_METRICS_HISTOGRAM_MACROS_H_
#define BASE_METRICS_HISTOGRAM_MACROS_H_
#include "base/macros.h"
#include "base/metrics/histogram.h"
#include "base/metrics/histogram_macros_internal.h"
#include "base/metrics/histogram_macros_local.h"
#include "base/time/time.h"
// Macros for efficient use of histograms.
//
// For best practices on deciding when to emit to a histogram and what form
// the histogram should take, see
// https://chromium.googlesource.com/chromium/src.git/+/HEAD/tools/metrics/histograms/README.md
// All of these macros must be called with |name| as a runtime constant - it
// doesn't have to literally be a constant, but it must be the same string on
// all calls from a particular call site. If this rule is violated, it is
// possible the data will be written to the wrong histogram.
//------------------------------------------------------------------------------
// Enumeration histograms.
// These macros create histograms for enumerated data. Ideally, the data should
// be of the form of "event occurs, log the result". We recommended not putting
// related but not directly connected data as enums within the same histogram.
// You should be defining an associated Enum, and the input sample should be
// an element of the Enum.
// All of these macros must be called with |name| as a runtime constant.
// The first variant of UMA_HISTOGRAM_ENUMERATION accepts two arguments: the
// histogram name and the enum sample. It deduces the correct boundary value to
// use by looking for an enumerator with the name kMaxValue. kMaxValue should
// share the value of the highest enumerator: this avoids switch statements
// having to handle a sentinel no-op value.
//
// Sample usage:
// // These values are logged to UMA. Entries should not be renumbered and
// // numeric values should never be reused. Please keep in sync with "MyEnum"
// // in src/tools/metrics/histograms/enums.xml.
// enum class MyEnum {
// kFirstValue = 0,
// kSecondValue = 1,
// ...
// kFinalValue = N,
// kMaxValue = kFinalValue,
// };
// UMA_HISTOGRAM_ENUMERATION("My.Enumeration", MyEnum::kSomeValue);
//
// The second variant requires three arguments: the first two are the same as
// before, and the third argument is the enum boundary: this must be strictly
// greater than any other enumerator that will be sampled.
//
// Sample usage:
// // These values are logged to UMA. Entries should not be renumbered and
// // numeric values should never be reused. Please keep in sync with "MyEnum"
// // in src/tools/metrics/histograms/enums.xml.
// enum class MyEnum {
// FIRST_VALUE = 0,
// SECOND_VALUE = 1,
// ...
// FINAL_VALUE = N,
// COUNT
// };
// UMA_HISTOGRAM_ENUMERATION("My.Enumeration",
// MyEnum::SOME_VALUE, MyEnum::COUNT);
//
// Note: If the enum is used in a switch, it is often desirable to avoid writing
// a case statement to handle an unused sentinel value (i.e. COUNT in the above
// example). For scoped enums, this is awkward since it requires casting the
// enum to an arithmetic type and adding one. Instead, prefer the two argument
// version of the macro which automatically deduces the boundary from kMaxValue.
#define UMA_HISTOGRAM_ENUMERATION(name, ...) \
INTERNAL_UMA_HISTOGRAM_ENUMERATION_GET_MACRO( \
__VA_ARGS__, INTERNAL_UMA_HISTOGRAM_ENUMERATION_SPECIFY_BOUNDARY, \
INTERNAL_UMA_HISTOGRAM_ENUMERATION_DEDUCE_BOUNDARY) \
(name, __VA_ARGS__, base::HistogramBase::kUmaTargetedHistogramFlag)
// As above but "scaled" count to avoid overflows caused by increments of
// large amounts. See UMA_HISTOGRAM_SCALED_EXACT_LINEAR for more information.
// Only the new format utilizing an internal kMaxValue is supported.
// It'll be necessary to #include "base/lazy_instance.h" to use this macro.
// name: Full constant name of the histogram (must not change between calls).
// sample: Bucket to be incremented.
// count: Amount by which to increment.
// scale: Amount by which |count| is divided.
// Sample usage:
// UMA_HISTOGRAM_SCALED_ENUMERATION("FooKiB", kEnumValue, byte_count, 1024)
#define UMA_HISTOGRAM_SCALED_ENUMERATION(name, sample, count, scale) \
INTERNAL_HISTOGRAM_SCALED_ENUMERATION_WITH_FLAG( \
name, sample, count, scale, \
base::HistogramBase::kUmaTargetedHistogramFlag)
// Histogram for boolean values.
// Sample usage:
// UMA_HISTOGRAM_BOOLEAN("Histogram.Boolean", bool);
#define UMA_HISTOGRAM_BOOLEAN(name, sample) \
STATIC_HISTOGRAM_POINTER_BLOCK(name, AddBoolean(sample), \
base::BooleanHistogram::FactoryGet(name, \
base::HistogramBase::kUmaTargetedHistogramFlag))
//------------------------------------------------------------------------------
// Linear histograms.
// All of these macros must be called with |name| as a runtime constant.
// Used for capturing integer data with a linear bucketing scheme. This can be
// used when you want the exact value of some small numeric count, with a max of
// 100 or less. If you need to capture a range of greater than 100, we recommend
// the use of the COUNT histograms below.
// Sample usage:
// UMA_HISTOGRAM_EXACT_LINEAR("Histogram.Linear", count, 10);
#define UMA_HISTOGRAM_EXACT_LINEAR(name, sample, value_max) \
INTERNAL_HISTOGRAM_EXACT_LINEAR_WITH_FLAG( \
name, sample, value_max, base::HistogramBase::kUmaTargetedHistogramFlag)
// Used for capturing basic percentages. This will be 100 buckets of size 1.
// Sample usage:
// UMA_HISTOGRAM_PERCENTAGE("Histogram.Percent", percent_as_int);
#define UMA_HISTOGRAM_PERCENTAGE(name, percent_as_int) \
UMA_HISTOGRAM_EXACT_LINEAR(name, percent_as_int, 101)
//------------------------------------------------------------------------------
// Scaled linear histograms.
// These take |count| and |scale| parameters to allow cumulative reporting of
// large numbers. For example, code might pass a count of 1825 bytes and a scale
// of 1024 bytes to report values in kilobytes. Only the scaled count is
// reported, but the remainder is tracked between calls, so that multiple calls
// will accumulate correctly. Only "exact linear" is supported.
// It'll be necessary to #include "base/lazy_instance.h" to use this macro.
// name: Full constant name of the histogram (must not change between calls).
// sample: Bucket to be incremented.
// count: Amount by which to increment.
// sample_max: Maximum (exclusive) allowed sample value.
// scale: Amount by which |count| is divided.
// Sample usage:
// UMA_HISTOGRAM_SCALED_EXACT_LINER("FooKiB", bucket_no, byte_count,
// kBucketsMax+1, 1024)
#define UMA_HISTOGRAM_SCALED_EXACT_LINEAR(name, sample, count, sample_max, \
scale) \
INTERNAL_HISTOGRAM_SCALED_EXACT_LINEAR_WITH_FLAG( \
name, sample, count, sample_max, scale, \
base::HistogramBase::kUmaTargetedHistogramFlag)
//------------------------------------------------------------------------------
// Count histograms. These are used for collecting numeric data. Note that we
// have macros for more specialized use cases below (memory, time, percentages).
// The number suffixes here refer to the max size of the sample, i.e. COUNT_1000
// will be able to collect samples of counts up to 1000. The default number of
// buckets in all default macros is 50. We recommend erring on the side of too
// large a range versus too short a range.
// These macros default to exponential histograms - i.e. the lengths of the
// bucket ranges exponentially increase as the sample range increases.
// These should *not* be used if you are interested in exact counts, i.e. a
// bucket range of 1. In these cases, you should use the ENUMERATION macros
// defined later. These should also not be used to capture the number of some
// event, i.e. "button X was clicked N times". In this cases, an enum should be
// used, ideally with an appropriate baseline enum entry included.
// All of these macros must be called with |name| as a runtime constant.
// Sample usage:
// UMA_HISTOGRAM_COUNTS_1M("My.Histogram", sample);
#define UMA_HISTOGRAM_COUNTS_100(name, sample) UMA_HISTOGRAM_CUSTOM_COUNTS( \
name, sample, 1, 100, 50)
#define UMA_HISTOGRAM_COUNTS_1000(name, sample) UMA_HISTOGRAM_CUSTOM_COUNTS( \
name, sample, 1, 1000, 50)
#define UMA_HISTOGRAM_COUNTS_10000(name, sample) UMA_HISTOGRAM_CUSTOM_COUNTS( \
name, sample, 1, 10000, 50)
#define UMA_HISTOGRAM_COUNTS_100000(name, sample) UMA_HISTOGRAM_CUSTOM_COUNTS( \
name, sample, 1, 100000, 50)
#define UMA_HISTOGRAM_COUNTS_1M(name, sample) UMA_HISTOGRAM_CUSTOM_COUNTS( \
name, sample, 1, 1000000, 50)
#define UMA_HISTOGRAM_COUNTS_10M(name, sample) UMA_HISTOGRAM_CUSTOM_COUNTS( \
name, sample, 1, 10000000, 50)
// This can be used when the default ranges are not sufficient. This macro lets
// the metric developer customize the min and max of the sampled range, as well
// as the number of buckets recorded.
// Any data outside the range here will be put in underflow and overflow
// buckets. Min values should be >=1 as emitted 0s will still go into the
// underflow bucket.
// Sample usage:
// UMA_HISTOGRAM_CUSTOM_COUNTS("My.Histogram", sample, 1, 100000000, 50);
#define UMA_HISTOGRAM_CUSTOM_COUNTS(name, sample, min, max, bucket_count) \
INTERNAL_HISTOGRAM_CUSTOM_COUNTS_WITH_FLAG( \
name, sample, min, max, bucket_count, \
base::HistogramBase::kUmaTargetedHistogramFlag)
//------------------------------------------------------------------------------
// Timing histograms. These are used for collecting timing data (generally
// latencies).
// These macros create exponentially sized histograms (lengths of the bucket
// ranges exponentially increase as the sample range increases). The input
// sample is a base::TimeDelta. The output data is measured in ms granularity.
// All of these macros must be called with |name| as a runtime constant.
// Sample usage:
// UMA_HISTOGRAM_TIMES("My.Timing.Histogram", time_delta);
// Short timings - up to 10 seconds. For high-resolution (microseconds) timings,
// see UMA_HISTOGRAM_CUSTOM_MICROSECONDS_TIMES.
#define UMA_HISTOGRAM_TIMES(name, sample) UMA_HISTOGRAM_CUSTOM_TIMES( \
name, sample, base::TimeDelta::FromMilliseconds(1), \
base::TimeDelta::FromSeconds(10), 50)
// Medium timings - up to 3 minutes. Note this starts at 10ms (no good reason,
// but not worth changing).
#define UMA_HISTOGRAM_MEDIUM_TIMES(name, sample) UMA_HISTOGRAM_CUSTOM_TIMES( \
name, sample, base::TimeDelta::FromMilliseconds(10), \
base::TimeDelta::FromMinutes(3), 50)
// Long timings - up to an hour.
#define UMA_HISTOGRAM_LONG_TIMES(name, sample) UMA_HISTOGRAM_CUSTOM_TIMES( \
name, sample, base::TimeDelta::FromMilliseconds(1), \
base::TimeDelta::FromHours(1), 50)
// Long timings with higher granularity - up to an hour with 100 buckets.
#define UMA_HISTOGRAM_LONG_TIMES_100(name, sample) UMA_HISTOGRAM_CUSTOM_TIMES( \
name, sample, base::TimeDelta::FromMilliseconds(1), \
base::TimeDelta::FromHours(1), 100)
// This can be used when the default ranges are not sufficient. This macro lets
// the metric developer customize the min and max of the sampled range, as well
// as the number of buckets recorded.
// Sample usage:
// UMA_HISTOGRAM_CUSTOM_TIMES("Very.Long.Timing.Histogram", time_delta,
// base::TimeDelta::FromSeconds(1), base::TimeDelta::FromDays(1), 100);
#define UMA_HISTOGRAM_CUSTOM_TIMES(name, sample, min, max, bucket_count) \
STATIC_HISTOGRAM_POINTER_BLOCK( \
name, AddTimeMillisecondsGranularity(sample), \
base::Histogram::FactoryTimeGet( \
name, min, max, bucket_count, \
base::HistogramBase::kUmaTargetedHistogramFlag))
// Same as UMA_HISTOGRAM_CUSTOM_TIMES but reports |sample| in microseconds,
// dropping the report if this client doesn't have a high-resolution clock.
//
// Note: dropping reports on clients with low-resolution clocks means these
// reports will be biased to a portion of the population on Windows. See
// Windows.HasHighResolutionTimeTicks for the affected sample.
//
// Sample usage:
// UMA_HISTOGRAM_CUSTOM_MICROSECONDS_TIMES(
// "High.Resolution.TimingMicroseconds.Histogram", time_delta,
// base::TimeDelta::FromMicroseconds(1),
// base::TimeDelta::FromMilliseconds(10), 100);
#define UMA_HISTOGRAM_CUSTOM_MICROSECONDS_TIMES(name, sample, min, max, \
bucket_count) \
STATIC_HISTOGRAM_POINTER_BLOCK( \
name, AddTimeMicrosecondsGranularity(sample), \
base::Histogram::FactoryMicrosecondsTimeGet( \
name, min, max, bucket_count, \
base::HistogramBase::kUmaTargetedHistogramFlag))
// Scoped class which logs its time on this earth in milliseconds as a UMA
// statistic. This is recommended for when you want a histogram which measures
// the time it takes for a method to execute. This measures up to 10 seconds. It
// uses UMA_HISTOGRAM_TIMES under the hood.
// Sample usage:
// void Function() {
// SCOPED_UMA_HISTOGRAM_TIMER("Component.FunctionTime");
// ...
// }
#define SCOPED_UMA_HISTOGRAM_TIMER(name) \
INTERNAL_SCOPED_UMA_HISTOGRAM_TIMER_EXPANDER(name, false, __COUNTER__)
// Similar scoped histogram timer, but this uses UMA_HISTOGRAM_LONG_TIMES_100,
// which measures up to an hour, and uses 100 buckets. This is more expensive
// to store, so only use if this often takes >10 seconds.
#define SCOPED_UMA_HISTOGRAM_LONG_TIMER(name) \
INTERNAL_SCOPED_UMA_HISTOGRAM_TIMER_EXPANDER(name, true, __COUNTER__)
//------------------------------------------------------------------------------
// Memory histograms.
// These macros create exponentially sized histograms (lengths of the bucket
// ranges exponentially increase as the sample range increases). The input
// sample must be a number measured in kilobytes.
// All of these macros must be called with |name| as a runtime constant.
// Sample usage:
// UMA_HISTOGRAM_MEMORY_KB("My.Memory.Histogram", memory_in_kb);
// Used to measure common KB-granularity memory stats. Range is up to 500000KB -
// approximately 500M.
#define UMA_HISTOGRAM_MEMORY_KB(name, sample) \
UMA_HISTOGRAM_CUSTOM_COUNTS(name, sample, 1000, 500000, 50)
// Used to measure common MB-granularity memory stats. Range is up to ~64G.
#define UMA_HISTOGRAM_MEMORY_LARGE_MB(name, sample) \
UMA_HISTOGRAM_CUSTOM_COUNTS(name, sample, 1, 64000, 100)
//------------------------------------------------------------------------------
// Stability-specific histograms.
// Histograms logged in as stability histograms will be included in the initial
// stability log. See comments by declaration of
// MetricsService::PrepareInitialStabilityLog().
// All of these macros must be called with |name| as a runtime constant.
// For details on usage, see the documentation on the non-stability equivalents.
#define UMA_STABILITY_HISTOGRAM_BOOLEAN(name, sample) \
STATIC_HISTOGRAM_POINTER_BLOCK( \
name, AddBoolean(sample), \
base::BooleanHistogram::FactoryGet( \
name, base::HistogramBase::kUmaStabilityHistogramFlag))
#define UMA_STABILITY_HISTOGRAM_COUNTS_100(name, sample) \
UMA_STABILITY_HISTOGRAM_CUSTOM_COUNTS(name, sample, 1, 100, 50)
#define UMA_STABILITY_HISTOGRAM_CUSTOM_COUNTS(name, sample, min, max, \
bucket_count) \
INTERNAL_HISTOGRAM_CUSTOM_COUNTS_WITH_FLAG( \
name, sample, min, max, bucket_count, \
base::HistogramBase::kUmaStabilityHistogramFlag)
#define UMA_STABILITY_HISTOGRAM_ENUMERATION(name, ...) \
INTERNAL_UMA_HISTOGRAM_ENUMERATION_GET_MACRO( \
__VA_ARGS__, INTERNAL_UMA_HISTOGRAM_ENUMERATION_SPECIFY_BOUNDARY, \
INTERNAL_UMA_HISTOGRAM_ENUMERATION_DEDUCE_BOUNDARY) \
(name, __VA_ARGS__, base::HistogramBase::kUmaStabilityHistogramFlag)
#define UMA_STABILITY_HISTOGRAM_LONG_TIMES(name, sample) \
STATIC_HISTOGRAM_POINTER_BLOCK( \
name, AddTimeMillisecondsGranularity(sample), \
base::Histogram::FactoryTimeGet( \
name, base::TimeDelta::FromMilliseconds(1), \
base::TimeDelta::FromHours(1), 50, \
base::HistogramBase::kUmaStabilityHistogramFlag))
#define UMA_STABILITY_HISTOGRAM_PERCENTAGE(name, percent_as_int) \
INTERNAL_HISTOGRAM_EXACT_LINEAR_WITH_FLAG( \
name, percent_as_int, 101, \
base::HistogramBase::kUmaStabilityHistogramFlag)
//------------------------------------------------------------------------------
// Histogram instantiation helpers.
// Support a collection of histograms, perhaps one for each entry in an
// enumeration. This macro manages a block of pointers, adding to a specific
// one by its index.
//
// A typical instantiation looks something like this:
// STATIC_HISTOGRAM_POINTER_GROUP(
// GetHistogramNameForIndex(histogram_index),
// histogram_index, MAXIMUM_HISTOGRAM_INDEX, Add(some_delta),
// base::Histogram::FactoryGet(
// GetHistogramNameForIndex(histogram_index),
// MINIMUM_SAMPLE, MAXIMUM_SAMPLE, BUCKET_COUNT,
// base::HistogramBase::kUmaTargetedHistogramFlag));
//
// Though it seems inefficient to generate the name twice, the first
// instance will be used only for DCHECK builds and the second will
// execute only during the first access to the given index, after which
// the pointer is cached and the name never needed again.
#define STATIC_HISTOGRAM_POINTER_GROUP( \
constant_histogram_name, index, constant_maximum, \
histogram_add_method_invocation, histogram_factory_get_invocation) \
do { \
static std::atomic_uintptr_t atomic_histograms[constant_maximum]; \
DCHECK_LE(0, index); \
DCHECK_LT(index, constant_maximum); \
HISTOGRAM_POINTER_USE( \
std::addressof(atomic_histograms[index]), constant_histogram_name, \
histogram_add_method_invocation, histogram_factory_get_invocation); \
} while (0)
//------------------------------------------------------------------------------
// Deprecated histogram macros. Not recommended for current use.
// Legacy name for UMA_HISTOGRAM_COUNTS_1M. Suggest using explicit naming
// and not using this macro going forward.
#define UMA_HISTOGRAM_COUNTS(name, sample) UMA_HISTOGRAM_CUSTOM_COUNTS( \
name, sample, 1, 1000000, 50)
// MB-granularity memory metric. This has a short max (1G).
#define UMA_HISTOGRAM_MEMORY_MB(name, sample) \
UMA_HISTOGRAM_CUSTOM_COUNTS(name, sample, 1, 1000, 50)
// For an enum with customized range. In general, sparse histograms should be
// used instead.
// Samples should be one of the std::vector<int> list provided via
// |custom_ranges|. See comments above CustomRanges::FactoryGet about the
// requirement of |custom_ranges|. You can use the helper function
// CustomHistogram::ArrayToCustomEnumRanges to transform a C-style array of
// valid sample values to a std::vector<int>.
#define UMA_HISTOGRAM_CUSTOM_ENUMERATION(name, sample, custom_ranges) \
STATIC_HISTOGRAM_POINTER_BLOCK(name, Add(sample), \
base::CustomHistogram::FactoryGet(name, custom_ranges, \
base::HistogramBase::kUmaTargetedHistogramFlag))
#endif // BASE_METRICS_HISTOGRAM_MACROS_H_

View file

@ -0,0 +1,261 @@
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_METRICS_HISTOGRAM_MACROS_INTERNAL_H_
#define BASE_METRICS_HISTOGRAM_MACROS_INTERNAL_H_
#include <stdint.h>
#include <atomic>
#include <limits>
#include <memory>
#include <type_traits>
#include "base/logging.h"
#include "base/metrics/histogram.h"
#include "base/metrics/sparse_histogram.h"
#include "base/time/time.h"
// This is for macros and helpers internal to base/metrics. They should not be
// used outside of this directory. For writing to UMA histograms, see
// histogram_macros.h.
namespace base {
namespace internal {
// Helper traits for deducing the boundary value for enums.
template <typename Enum, typename SFINAE = void>
struct EnumSizeTraits {
static constexpr Enum Count() {
static_assert(
sizeof(Enum) == 0,
"enumerator must define kMaxValue enumerator to use this macro!");
return Enum();
}
};
// Since the UMA histogram macros expect a value one larger than the max defined
// enumerator value, add one.
template <typename Enum>
struct EnumSizeTraits<
Enum,
std::enable_if_t<std::is_enum<decltype(Enum::kMaxValue)>::value>> {
static constexpr Enum Count() {
return static_cast<Enum>(
static_cast<std::underlying_type_t<Enum>>(Enum::kMaxValue) + 1);
}
};
} // namespace internal
} // namespace base
// TODO(rkaplow): Improve commenting of these methods.
//------------------------------------------------------------------------------
// Histograms are often put in areas where they are called many many times, and
// performance is critical. As a result, they are designed to have a very low
// recurring cost of executing (adding additional samples). Toward that end,
// the macros declare a static pointer to the histogram in question, and only
// take a "slow path" to construct (or find) the histogram on the first run
// through the macro. We leak the histograms at shutdown time so that we don't
// have to validate using the pointers at any time during the running of the
// process.
// In some cases (integration into 3rd party code), it's useful to separate the
// definition of |atomic_histogram_pointer| from its use. To achieve this we
// define HISTOGRAM_POINTER_USE, which uses an |atomic_histogram_pointer|, and
// STATIC_HISTOGRAM_POINTER_BLOCK, which defines an |atomic_histogram_pointer|
// and forwards to HISTOGRAM_POINTER_USE.
#define HISTOGRAM_POINTER_USE( \
atomic_histogram_pointer, constant_histogram_name, \
histogram_add_method_invocation, histogram_factory_get_invocation) \
do { \
base::HistogramBase* histogram_pointer( \
reinterpret_cast<base::HistogramBase*>( \
atomic_histogram_pointer->load(std::memory_order_acquire))); \
if (!histogram_pointer) { \
/* \
* This is the slow path, which will construct OR find the \
* matching histogram. |histogram_factory_get_invocation| includes \
* locks on a global histogram name map and is completely thread \
* safe. \
*/ \
histogram_pointer = histogram_factory_get_invocation; \
\
/* \
* We could do this without any barrier, since FactoryGet() \
* entered and exited a lock after construction, but this barrier \
* makes things clear. \
*/ \
atomic_histogram_pointer->store( \
reinterpret_cast<uintptr_t>(histogram_pointer), \
std::memory_order_release); \
} \
if (DCHECK_IS_ON()) \
histogram_pointer->CheckName(constant_histogram_name); \
histogram_pointer->histogram_add_method_invocation; \
} while (0)
// This is a helper macro used by other macros and shouldn't be used directly.
// Defines the static |atomic_histogram_pointer| and forwards to
// HISTOGRAM_POINTER_USE.
#define STATIC_HISTOGRAM_POINTER_BLOCK(constant_histogram_name, \
histogram_add_method_invocation, \
histogram_factory_get_invocation) \
do { \
/* \
* The pointer's presence indicates that the initialization is complete. \
* Initialization is idempotent, so it can safely be atomically repeated. \
*/ \
static std::atomic_uintptr_t atomic_histogram_pointer; \
HISTOGRAM_POINTER_USE( \
std::addressof(atomic_histogram_pointer), constant_histogram_name, \
histogram_add_method_invocation, histogram_factory_get_invocation); \
} while (0)
// This is a helper macro used by other macros and shouldn't be used directly.
#define INTERNAL_HISTOGRAM_CUSTOM_COUNTS_WITH_FLAG(name, sample, min, max, \
bucket_count, flag) \
STATIC_HISTOGRAM_POINTER_BLOCK( \
name, Add(sample), \
base::Histogram::FactoryGet(name, min, max, bucket_count, flag))
// This is a helper macro used by other macros and shouldn't be used directly.
// The bucketing scheme is linear with a bucket size of 1. For N items,
// recording values in the range [0, N - 1] creates a linear histogram with N +
// 1 buckets:
// [0, 1), [1, 2), ..., [N - 1, N)
// and an overflow bucket [N, infinity).
//
// Code should never emit to the overflow bucket; only to the other N buckets.
// This allows future versions of Chrome to safely increase the boundary size.
// Otherwise, the histogram would have [N - 1, infinity) as its overflow bucket,
// and so the maximal value (N - 1) would be emitted to this overflow bucket.
// But, if an additional value were later added, the bucket label for
// the value (N - 1) would change to [N - 1, N), which would result in different
// versions of Chrome using different bucket labels for identical data.
#define INTERNAL_HISTOGRAM_EXACT_LINEAR_WITH_FLAG(name, sample, boundary, \
flag) \
do { \
static_assert(!std::is_enum<std::decay_t<decltype(sample)>>::value, \
"|sample| should not be an enum type!"); \
static_assert(!std::is_enum<std::decay_t<decltype(boundary)>>::value, \
"|boundary| should not be an enum type!"); \
STATIC_HISTOGRAM_POINTER_BLOCK( \
name, Add(sample), \
base::LinearHistogram::FactoryGet(name, 1, boundary, boundary + 1, \
flag)); \
} while (0)
// While this behaves the same as the above macro, the wrapping of a linear
// histogram with another object to do the scaling means the POINTER_BLOCK
// macro can't be used as it is tied to HistogramBase
#define INTERNAL_HISTOGRAM_SCALED_EXACT_LINEAR_WITH_FLAG( \
name, sample, count, boundary, scale, flag) \
do { \
static_assert(!std::is_enum<std::decay_t<decltype(sample)>>::value, \
"|sample| should not be an enum type!"); \
static_assert(!std::is_enum<std::decay_t<decltype(boundary)>>::value, \
"|boundary| should not be an enum type!"); \
class ScaledLinearHistogramInstance : public base::ScaledLinearHistogram { \
public: \
ScaledLinearHistogramInstance() \
: ScaledLinearHistogram(name, \
1, \
boundary, \
boundary + 1, \
scale, \
flag) {} \
}; \
static base::LazyInstance<ScaledLinearHistogramInstance>::Leaky scaled; \
scaled.Get().AddScaledCount(sample, count); \
} while (0)
// Helper for 'overloading' UMA_HISTOGRAM_ENUMERATION with a variable number of
// arguments.
#define INTERNAL_UMA_HISTOGRAM_ENUMERATION_GET_MACRO(_1, _2, NAME, ...) NAME
#define INTERNAL_UMA_HISTOGRAM_ENUMERATION_DEDUCE_BOUNDARY(name, sample, \
flags) \
INTERNAL_HISTOGRAM_ENUMERATION_WITH_FLAG( \
name, sample, \
base::internal::EnumSizeTraits<std::decay_t<decltype(sample)>>::Count(), \
flags)
// Note: The value in |sample| must be strictly less than |enum_size|.
#define INTERNAL_UMA_HISTOGRAM_ENUMERATION_SPECIFY_BOUNDARY(name, sample, \
enum_size, flags) \
INTERNAL_HISTOGRAM_ENUMERATION_WITH_FLAG(name, sample, enum_size, flags)
// Similar to the previous macro but intended for enumerations. This delegates
// the work to the previous macro, but supports scoped enumerations as well by
// forcing an explicit cast to the HistogramBase::Sample integral type.
//
// Note the range checks verify two separate issues:
// - that the declared enum size isn't out of range of HistogramBase::Sample
// - that the declared enum size is > 0
//
// TODO(dcheng): This should assert that the passed in types are actually enum
// types.
#define INTERNAL_HISTOGRAM_ENUMERATION_WITH_FLAG(name, sample, boundary, flag) \
do { \
using decayed_sample = std::decay<decltype(sample)>::type; \
using decayed_boundary = std::decay<decltype(boundary)>::type; \
static_assert(!std::is_enum<decayed_boundary>::value || \
std::is_enum<decayed_sample>::value, \
"Unexpected: |boundary| is enum, but |sample| is not."); \
static_assert(!std::is_enum<decayed_sample>::value || \
!std::is_enum<decayed_boundary>::value || \
std::is_same<decayed_sample, decayed_boundary>::value, \
"|sample| and |boundary| shouldn't be of different enums"); \
static_assert( \
static_cast<uintmax_t>(boundary) < \
static_cast<uintmax_t>( \
std::numeric_limits<base::HistogramBase::Sample>::max()), \
"|boundary| is out of range of HistogramBase::Sample"); \
INTERNAL_HISTOGRAM_EXACT_LINEAR_WITH_FLAG( \
name, static_cast<base::HistogramBase::Sample>(sample), \
static_cast<base::HistogramBase::Sample>(boundary), flag); \
} while (0)
#define INTERNAL_HISTOGRAM_SCALED_ENUMERATION_WITH_FLAG(name, sample, count, \
scale, flag) \
do { \
using decayed_sample = std::decay<decltype(sample)>::type; \
static_assert(std::is_enum<decayed_sample>::value, \
"Unexpected: |sample| is not at enum."); \
constexpr auto boundary = base::internal::EnumSizeTraits< \
std::decay_t<decltype(sample)>>::Count(); \
static_assert( \
static_cast<uintmax_t>(boundary) < \
static_cast<uintmax_t>( \
std::numeric_limits<base::HistogramBase::Sample>::max()), \
"|boundary| is out of range of HistogramBase::Sample"); \
INTERNAL_HISTOGRAM_SCALED_EXACT_LINEAR_WITH_FLAG( \
name, static_cast<base::HistogramBase::Sample>(sample), count, \
static_cast<base::HistogramBase::Sample>(boundary), scale, flag); \
} while (0)
// This is a helper macro used by other macros and shouldn't be used directly.
// This is necessary to expand __COUNTER__ to an actual value.
#define INTERNAL_SCOPED_UMA_HISTOGRAM_TIMER_EXPANDER(name, is_long, key) \
INTERNAL_SCOPED_UMA_HISTOGRAM_TIMER_UNIQUE(name, is_long, key)
// This is a helper macro used by other macros and shouldn't be used directly.
#define INTERNAL_SCOPED_UMA_HISTOGRAM_TIMER_UNIQUE(name, is_long, key) \
class ScopedHistogramTimer##key { \
public: \
ScopedHistogramTimer##key() : constructed_(base::TimeTicks::Now()) {} \
~ScopedHistogramTimer##key() { \
base::TimeDelta elapsed = base::TimeTicks::Now() - constructed_; \
if (is_long) { \
UMA_HISTOGRAM_LONG_TIMES_100(name, elapsed); \
} else { \
UMA_HISTOGRAM_TIMES(name, elapsed); \
} \
} \
private: \
base::TimeTicks constructed_; \
} scoped_histogram_timer_##key
#endif // BASE_METRICS_HISTOGRAM_MACROS_INTERNAL_H_

View file

@ -0,0 +1,90 @@
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_METRICS_HISTOGRAM_MACROS_LOCAL_H_
#define BASE_METRICS_HISTOGRAM_MACROS_LOCAL_H_
#include "base/logging.h"
#include "base/metrics/histogram.h"
#include "base/metrics/histogram_macros_internal.h"
#include "base/time/time.h"
// TODO(rkaplow): Migrate all LOCAL_* usage within Chromium to include this
// file instead of the histogram_macros.h file.
//------------------------------------------------------------------------------
// Enumeration histograms.
//
// For usage details, see the equivalents in histogram_macros.h.
#define LOCAL_HISTOGRAM_ENUMERATION(name, ...) \
INTERNAL_UMA_HISTOGRAM_ENUMERATION_GET_MACRO( \
__VA_ARGS__, INTERNAL_UMA_HISTOGRAM_ENUMERATION_SPECIFY_BOUNDARY, \
INTERNAL_UMA_HISTOGRAM_ENUMERATION_DEDUCE_BOUNDARY) \
(name, __VA_ARGS__, base::HistogramBase::kNoFlags)
#define LOCAL_HISTOGRAM_BOOLEAN(name, sample) \
STATIC_HISTOGRAM_POINTER_BLOCK(name, AddBoolean(sample), \
base::BooleanHistogram::FactoryGet(name, base::Histogram::kNoFlags))
//------------------------------------------------------------------------------
// Percentage histograms.
//
// For usage details, see the equivalents in histogram_macros.h
#define LOCAL_HISTOGRAM_PERCENTAGE(name, under_one_hundred) \
LOCAL_HISTOGRAM_ENUMERATION(name, under_one_hundred, 101)
//------------------------------------------------------------------------------
// Count histograms. These are used for collecting numeric data. Note that we
// have macros for more specialized use cases below (memory, time, percentages).
// For usage details, see the equivalents in histogram_macros.h.
#define LOCAL_HISTOGRAM_COUNTS_100(name, sample) \
LOCAL_HISTOGRAM_CUSTOM_COUNTS(name, sample, 1, 100, 50)
#define LOCAL_HISTOGRAM_COUNTS_10000(name, sample) \
LOCAL_HISTOGRAM_CUSTOM_COUNTS(name, sample, 1, 10000, 50)
#define LOCAL_HISTOGRAM_COUNTS_1000000(name, sample) \
LOCAL_HISTOGRAM_CUSTOM_COUNTS(name, sample, 1, 1000000, 50)
#define LOCAL_HISTOGRAM_CUSTOM_COUNTS(name, sample, min, max, bucket_count) \
INTERNAL_HISTOGRAM_CUSTOM_COUNTS_WITH_FLAG( \
name, sample, min, max, bucket_count, base::HistogramBase::kNoFlags)
//------------------------------------------------------------------------------
// Timing histograms. These are used for collecting timing data (generally
// latencies).
//
// For usage details, see the equivalents in histogram_macros.h.
#define LOCAL_HISTOGRAM_TIMES(name, sample) LOCAL_HISTOGRAM_CUSTOM_TIMES( \
name, sample, base::TimeDelta::FromMilliseconds(1), \
base::TimeDelta::FromSeconds(10), 50)
#define LOCAL_HISTOGRAM_CUSTOM_TIMES(name, sample, min, max, bucket_count) \
STATIC_HISTOGRAM_POINTER_BLOCK( \
name, AddTimeMillisecondsGranularity(sample), \
base::Histogram::FactoryTimeGet(name, min, max, bucket_count, \
base::HistogramBase::kNoFlags))
//------------------------------------------------------------------------------
// Memory histograms.
//
// For usage details, see the equivalents in histogram_macros.h.
#define LOCAL_HISTOGRAM_MEMORY_KB(name, sample) LOCAL_HISTOGRAM_CUSTOM_COUNTS( \
name, sample, 1000, 500000, 50)
//------------------------------------------------------------------------------
// Deprecated histograms. Not recommended for current use.
// TODO(rkaplow): See if we can clean up this macro and usage.
// Legacy non-explicit version. We suggest using LOCAL_HISTOGRAM_COUNTS_1000000
// instead.
#define LOCAL_HISTOGRAM_COUNTS(name, sample) \
LOCAL_HISTOGRAM_CUSTOM_COUNTS(name, sample, 1, 1000000, 50)
#endif // BASE_METRICS_HISTOGRAM_MACROS_LOCAL_H_

View file

@ -0,0 +1,315 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/metrics/histogram_samples.h"
#include <limits>
#include "base/compiler_specific.h"
#include "base/metrics/histogram_functions.h"
#include "base/metrics/histogram_macros.h"
#include "base/numerics/safe_conversions.h"
#include "base/numerics/safe_math.h"
#include "base/pickle.h"
namespace base {
namespace {
// A shorthand constant for the max value of size_t.
constexpr size_t kSizeMax = std::numeric_limits<size_t>::max();
// A constant stored in an AtomicSingleSample (as_atomic) to indicate that the
// sample is "disabled" and no further accumulation should be done with it. The
// value is chosen such that it will be MAX_UINT16 for both |bucket| & |count|,
// and thus less likely to conflict with real use. Conflicts are explicitly
// handled in the code but it's worth making them as unlikely as possible.
constexpr int32_t kDisabledSingleSample = -1;
class SampleCountPickleIterator : public SampleCountIterator {
public:
explicit SampleCountPickleIterator(PickleIterator* iter);
bool Done() const override;
void Next() override;
void Get(HistogramBase::Sample* min,
int64_t* max,
HistogramBase::Count* count) const override;
private:
PickleIterator* const iter_;
HistogramBase::Sample min_;
int64_t max_;
HistogramBase::Count count_;
bool is_done_;
};
SampleCountPickleIterator::SampleCountPickleIterator(PickleIterator* iter)
: iter_(iter),
is_done_(false) {
Next();
}
bool SampleCountPickleIterator::Done() const {
return is_done_;
}
void SampleCountPickleIterator::Next() {
DCHECK(!Done());
if (!iter_->ReadInt(&min_) || !iter_->ReadInt64(&max_) ||
!iter_->ReadInt(&count_)) {
is_done_ = true;
}
}
void SampleCountPickleIterator::Get(HistogramBase::Sample* min,
int64_t* max,
HistogramBase::Count* count) const {
DCHECK(!Done());
*min = min_;
*max = max_;
*count = count_;
}
} // namespace
static_assert(sizeof(HistogramSamples::AtomicSingleSample) ==
sizeof(subtle::Atomic32),
"AtomicSingleSample isn't 32 bits");
HistogramSamples::SingleSample HistogramSamples::AtomicSingleSample::Load()
const {
AtomicSingleSample single_sample = subtle::Acquire_Load(&as_atomic);
// If the sample was extracted/disabled, it's still zero to the outside.
if (single_sample.as_atomic == kDisabledSingleSample)
single_sample.as_atomic = 0;
return single_sample.as_parts;
}
HistogramSamples::SingleSample HistogramSamples::AtomicSingleSample::Extract(
bool disable) {
AtomicSingleSample single_sample = subtle::NoBarrier_AtomicExchange(
&as_atomic, disable ? kDisabledSingleSample : 0);
if (single_sample.as_atomic == kDisabledSingleSample)
single_sample.as_atomic = 0;
return single_sample.as_parts;
}
bool HistogramSamples::AtomicSingleSample::Accumulate(
size_t bucket,
HistogramBase::Count count) {
if (count == 0)
return true;
// Convert the parameters to 16-bit variables because it's all 16-bit below.
// To support decrements/subtractions, divide the |count| into sign/value and
// do the proper operation below. The alternative is to change the single-
// sample's count to be a signed integer (int16_t) and just add an int16_t
// |count16| but that is somewhat wasteful given that the single-sample is
// never expected to have a count less than zero.
if (count < -std::numeric_limits<uint16_t>::max() ||
count > std::numeric_limits<uint16_t>::max() ||
bucket > std::numeric_limits<uint16_t>::max()) {
return false;
}
bool count_is_negative = count < 0;
uint16_t count16 = static_cast<uint16_t>(count_is_negative ? -count : count);
uint16_t bucket16 = static_cast<uint16_t>(bucket);
// A local, unshared copy of the single-sample is necessary so the parts
// can be manipulated without worrying about atomicity.
AtomicSingleSample single_sample;
bool sample_updated;
do {
subtle::Atomic32 original = subtle::Acquire_Load(&as_atomic);
if (original == kDisabledSingleSample)
return false;
single_sample.as_atomic = original;
if (single_sample.as_atomic != 0) {
// Only the same bucket (parameter and stored) can be counted multiple
// times.
if (single_sample.as_parts.bucket != bucket16)
return false;
} else {
// The |single_ sample| was zero so becomes the |bucket| parameter, the
// contents of which were checked above to fit in 16 bits.
single_sample.as_parts.bucket = bucket16;
}
// Update count, making sure that it doesn't overflow.
CheckedNumeric<uint16_t> new_count(single_sample.as_parts.count);
if (count_is_negative)
new_count -= count16;
else
new_count += count16;
if (!new_count.AssignIfValid(&single_sample.as_parts.count))
return false;
// Don't let this become equivalent to the "disabled" value.
if (single_sample.as_atomic == kDisabledSingleSample)
return false;
// Store the updated single-sample back into memory. |existing| is what
// was in that memory location at the time of the call; if it doesn't
// match |original| then the swap didn't happen so loop again.
subtle::Atomic32 existing = subtle::Release_CompareAndSwap(
&as_atomic, original, single_sample.as_atomic);
sample_updated = (existing == original);
} while (!sample_updated);
return true;
}
bool HistogramSamples::AtomicSingleSample::IsDisabled() const {
return subtle::Acquire_Load(&as_atomic) == kDisabledSingleSample;
}
HistogramSamples::LocalMetadata::LocalMetadata() {
// This is the same way it's done for persistent metadata since no ctor
// is called for the data members in that case.
memset(this, 0, sizeof(*this));
}
HistogramSamples::HistogramSamples(uint64_t id, Metadata* meta)
: meta_(meta) {
DCHECK(meta_->id == 0 || meta_->id == id);
// It's possible that |meta| is contained in initialized, read-only memory
// so it's essential that no write be done in that case.
if (!meta_->id)
meta_->id = id;
}
// This mustn't do anything with |meta_|. It was passed to the ctor and may
// be invalid by the time this dtor gets called.
HistogramSamples::~HistogramSamples() = default;
void HistogramSamples::Add(const HistogramSamples& other) {
IncreaseSumAndCount(other.sum(), other.redundant_count());
std::unique_ptr<SampleCountIterator> it = other.Iterator();
bool success = AddSubtractImpl(it.get(), ADD);
DCHECK(success);
}
bool HistogramSamples::AddFromPickle(PickleIterator* iter) {
int64_t sum;
HistogramBase::Count redundant_count;
if (!iter->ReadInt64(&sum) || !iter->ReadInt(&redundant_count))
return false;
IncreaseSumAndCount(sum, redundant_count);
SampleCountPickleIterator pickle_iter(iter);
return AddSubtractImpl(&pickle_iter, ADD);
}
void HistogramSamples::Subtract(const HistogramSamples& other) {
IncreaseSumAndCount(-other.sum(), -other.redundant_count());
std::unique_ptr<SampleCountIterator> it = other.Iterator();
bool success = AddSubtractImpl(it.get(), SUBTRACT);
DCHECK(success);
}
void HistogramSamples::Serialize(Pickle* pickle) const {
pickle->WriteInt64(sum());
pickle->WriteInt(redundant_count());
HistogramBase::Sample min;
int64_t max;
HistogramBase::Count count;
for (std::unique_ptr<SampleCountIterator> it = Iterator(); !it->Done();
it->Next()) {
it->Get(&min, &max, &count);
pickle->WriteInt(min);
pickle->WriteInt64(max);
pickle->WriteInt(count);
}
}
bool HistogramSamples::AccumulateSingleSample(HistogramBase::Sample value,
HistogramBase::Count count,
size_t bucket) {
if (single_sample().Accumulate(bucket, count)) {
// Success. Update the (separate) sum and redundant-count.
IncreaseSumAndCount(strict_cast<int64_t>(value) * count, count);
return true;
}
return false;
}
void HistogramSamples::IncreaseSumAndCount(int64_t sum,
HistogramBase::Count count) {
#ifdef ARCH_CPU_64_BITS
subtle::NoBarrier_AtomicIncrement(&meta_->sum, sum);
#else
meta_->sum += sum;
#endif
subtle::NoBarrier_AtomicIncrement(&meta_->redundant_count, count);
}
void HistogramSamples::RecordNegativeSample(NegativeSampleReason reason,
HistogramBase::Count increment) {
UMA_HISTOGRAM_ENUMERATION("UMA.NegativeSamples.Reason", reason,
MAX_NEGATIVE_SAMPLE_REASONS);
UMA_HISTOGRAM_CUSTOM_COUNTS("UMA.NegativeSamples.Increment", increment, 1,
1 << 30, 100);
UmaHistogramSparse("UMA.NegativeSamples.Histogram",
static_cast<int32_t>(id()));
}
SampleCountIterator::~SampleCountIterator() = default;
bool SampleCountIterator::GetBucketIndex(size_t* index) const {
DCHECK(!Done());
return false;
}
SingleSampleIterator::SingleSampleIterator(HistogramBase::Sample min,
int64_t max,
HistogramBase::Count count)
: SingleSampleIterator(min, max, count, kSizeMax) {}
SingleSampleIterator::SingleSampleIterator(HistogramBase::Sample min,
int64_t max,
HistogramBase::Count count,
size_t bucket_index)
: min_(min), max_(max), bucket_index_(bucket_index), count_(count) {}
SingleSampleIterator::~SingleSampleIterator() = default;
bool SingleSampleIterator::Done() const {
return count_ == 0;
}
void SingleSampleIterator::Next() {
DCHECK(!Done());
count_ = 0;
}
void SingleSampleIterator::Get(HistogramBase::Sample* min,
int64_t* max,
HistogramBase::Count* count) const {
DCHECK(!Done());
if (min != nullptr)
*min = min_;
if (max != nullptr)
*max = max_;
if (count != nullptr)
*count = count_;
}
bool SingleSampleIterator::GetBucketIndex(size_t* index) const {
DCHECK(!Done());
if (bucket_index_ == kSizeMax)
return false;
*index = bucket_index_;
return true;
}
} // namespace base

View file

@ -0,0 +1,267 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_METRICS_HISTOGRAM_SAMPLES_H_
#define BASE_METRICS_HISTOGRAM_SAMPLES_H_
#include <stddef.h>
#include <stdint.h>
#include <limits>
#include <memory>
#include "base/atomicops.h"
#include "base/macros.h"
#include "base/metrics/histogram_base.h"
namespace base {
class Pickle;
class PickleIterator;
class SampleCountIterator;
// HistogramSamples is a container storing all samples of a histogram. All
// elements must be of a fixed width to ensure 32/64-bit interoperability.
// If this structure changes, bump the version number for kTypeIdHistogram
// in persistent_histogram_allocator.cc.
//
// Note that though these samples are individually consistent (through the use
// of atomic operations on the counts), there is only "eventual consistency"
// overall when multiple threads are accessing this data. That means that the
// sum, redundant-count, etc. could be momentarily out-of-sync with the stored
// counts but will settle to a consistent "steady state" once all threads have
// exited this code.
class BASE_EXPORT HistogramSamples {
public:
// A single bucket and count. To fit within a single atomic on 32-bit build
// architectures, both |bucket| and |count| are limited in size to 16 bits.
// This limits the functionality somewhat but if an entry can't fit then
// the full array of samples can be allocated and used.
struct SingleSample {
uint16_t bucket;
uint16_t count;
};
// A structure for managing an atomic single sample. Because this is generally
// used in association with other atomic values, the defined methods use
// acquire/release operations to guarantee ordering with outside values.
union BASE_EXPORT AtomicSingleSample {
AtomicSingleSample() : as_atomic(0) {}
AtomicSingleSample(subtle::Atomic32 rhs) : as_atomic(rhs) {}
// Returns the single sample in an atomic manner. This in an "acquire"
// load. The returned sample isn't shared and thus its fields can be safely
// accessed.
SingleSample Load() const;
// Extracts the single sample in an atomic manner. If |disable| is true
// then this object will be set so it will never accumulate another value.
// This is "no barrier" so doesn't enforce ordering with other atomic ops.
SingleSample Extract(bool disable);
// Adds a given count to the held bucket. If not possible, it returns false
// and leaves the parts unchanged. Once extracted/disabled, this always
// returns false. This in an "acquire/release" operation.
bool Accumulate(size_t bucket, HistogramBase::Count count);
// Returns if the sample has been "disabled" (via Extract) and thus not
// allowed to accept further accumulation.
bool IsDisabled() const;
private:
// union field: The actual sample bucket and count.
SingleSample as_parts;
// union field: The sample as an atomic value. Atomic64 would provide
// more flexibility but isn't available on all builds. This can hold a
// special, internal "disabled" value indicating that it must not accept
// further accumulation.
subtle::Atomic32 as_atomic;
};
// A structure of information about the data, common to all sample containers.
// Because of how this is used in persistent memory, it must be a POD object
// that makes sense when initialized to all zeros.
struct Metadata {
// Expected size for 32/64-bit check.
static constexpr size_t kExpectedInstanceSize = 24;
// Initialized when the sample-set is first created with a value provided
// by the caller. It is generally used to identify the sample-set across
// threads and processes, though not necessarily uniquely as it is possible
// to have multiple sample-sets representing subsets of the data.
uint64_t id;
// The sum of all the entries, effectivly the sum(sample * count) for
// all samples. Despite being atomic, no guarantees are made on the
// accuracy of this value; there may be races during histogram
// accumulation and snapshotting that we choose to accept. It should
// be treated as approximate.
#ifdef ARCH_CPU_64_BITS
subtle::Atomic64 sum;
#else
// 32-bit systems don't have atomic 64-bit operations. Use a basic type
// and don't worry about "shearing".
int64_t sum;
#endif
// A "redundant" count helps identify memory corruption. It redundantly
// stores the total number of samples accumulated in the histogram. We
// can compare this count to the sum of the counts (TotalCount() function),
// and detect problems. Note, depending on the implementation of different
// histogram types, there might be races during histogram accumulation
// and snapshotting that we choose to accept. In this case, the tallies
// might mismatch even when no memory corruption has happened.
HistogramBase::AtomicCount redundant_count;
// A single histogram value and associated count. This allows histograms
// that typically report only a single value to not require full storage
// to be allocated.
AtomicSingleSample single_sample; // 32 bits
};
// Because structures held in persistent memory must be POD, there can be no
// default constructor to clear the fields. This derived class exists just
// to clear them when being allocated on the heap.
struct BASE_EXPORT LocalMetadata : Metadata {
LocalMetadata();
};
HistogramSamples(uint64_t id, Metadata* meta);
virtual ~HistogramSamples();
virtual void Accumulate(HistogramBase::Sample value,
HistogramBase::Count count) = 0;
virtual HistogramBase::Count GetCount(HistogramBase::Sample value) const = 0;
virtual HistogramBase::Count TotalCount() const = 0;
virtual void Add(const HistogramSamples& other);
// Add from serialized samples.
virtual bool AddFromPickle(PickleIterator* iter);
virtual void Subtract(const HistogramSamples& other);
virtual std::unique_ptr<SampleCountIterator> Iterator() const = 0;
virtual void Serialize(Pickle* pickle) const;
// Accessor fuctions.
uint64_t id() const { return meta_->id; }
int64_t sum() const {
#ifdef ARCH_CPU_64_BITS
return subtle::NoBarrier_Load(&meta_->sum);
#else
return meta_->sum;
#endif
}
HistogramBase::Count redundant_count() const {
return subtle::NoBarrier_Load(&meta_->redundant_count);
}
protected:
enum NegativeSampleReason {
SAMPLES_HAVE_LOGGED_BUT_NOT_SAMPLE,
SAMPLES_SAMPLE_LESS_THAN_LOGGED,
SAMPLES_ADDED_NEGATIVE_COUNT,
SAMPLES_ADD_WENT_NEGATIVE,
SAMPLES_ADD_OVERFLOW,
SAMPLES_ACCUMULATE_NEGATIVE_COUNT,
SAMPLES_ACCUMULATE_WENT_NEGATIVE,
DEPRECATED_SAMPLES_ACCUMULATE_OVERFLOW,
SAMPLES_ACCUMULATE_OVERFLOW,
MAX_NEGATIVE_SAMPLE_REASONS
};
// Based on |op| type, add or subtract sample counts data from the iterator.
enum Operator { ADD, SUBTRACT };
virtual bool AddSubtractImpl(SampleCountIterator* iter, Operator op) = 0;
// Accumulates to the embedded single-sample field if possible. Returns true
// on success, false otherwise. Sum and redundant-count are also updated in
// the success case.
bool AccumulateSingleSample(HistogramBase::Sample value,
HistogramBase::Count count,
size_t bucket);
// Atomically adjust the sum and redundant-count.
void IncreaseSumAndCount(int64_t sum, HistogramBase::Count count);
// Record a negative-sample observation and the reason why.
void RecordNegativeSample(NegativeSampleReason reason,
HistogramBase::Count increment);
AtomicSingleSample& single_sample() { return meta_->single_sample; }
const AtomicSingleSample& single_sample() const {
return meta_->single_sample;
}
Metadata* meta() { return meta_; }
private:
// Depending on derived class meta values can come from local stoarge or
// external storage in which case HistogramSamples class cannot take ownership
// of Metadata*.
Metadata* meta_;
DISALLOW_COPY_AND_ASSIGN(HistogramSamples);
};
class BASE_EXPORT SampleCountIterator {
public:
virtual ~SampleCountIterator();
virtual bool Done() const = 0;
virtual void Next() = 0;
// Get the sample and count at current position.
// |min| |max| and |count| can be NULL if the value is not of interest.
// Note: |max| is int64_t because histograms support logged values in the
// full int32_t range and bucket max is exclusive, so it needs to support
// values up to MAXINT32+1.
// Requires: !Done();
virtual void Get(HistogramBase::Sample* min,
int64_t* max,
HistogramBase::Count* count) const = 0;
static_assert(std::numeric_limits<HistogramBase::Sample>::max() <
std::numeric_limits<int64_t>::max(),
"Get() |max| must be able to hold Histogram::Sample max + 1");
// Get the index of current histogram bucket.
// For histograms that don't use predefined buckets, it returns false.
// Requires: !Done();
virtual bool GetBucketIndex(size_t* index) const;
};
class BASE_EXPORT SingleSampleIterator : public SampleCountIterator {
public:
SingleSampleIterator(HistogramBase::Sample min,
int64_t max,
HistogramBase::Count count);
SingleSampleIterator(HistogramBase::Sample min,
int64_t max,
HistogramBase::Count count,
size_t bucket_index);
~SingleSampleIterator() override;
// SampleCountIterator:
bool Done() const override;
void Next() override;
void Get(HistogramBase::Sample* min,
int64_t* max,
HistogramBase::Count* count) const override;
// SampleVector uses predefined buckets so iterator can return bucket index.
bool GetBucketIndex(size_t* index) const override;
private:
// Information about the single value to return.
const HistogramBase::Sample min_;
const int64_t max_;
const size_t bucket_index_;
HistogramBase::Count count_;
};
} // namespace base
#endif // BASE_METRICS_HISTOGRAM_SAMPLES_H_

View file

@ -0,0 +1,123 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/metrics/histogram_snapshot_manager.h"
#include <memory>
#include "base/debug/alias.h"
#include "base/metrics/histogram_flattener.h"
#include "base/metrics/histogram_samples.h"
#include "base/metrics/statistics_recorder.h"
#include "base/stl_util.h"
namespace base {
namespace {
// A simple object to set an "active" flag and clear it upon destruction. It is
// an error if the flag is already set.
class MakeActive {
public:
MakeActive(std::atomic<bool>* is_active) : is_active_(is_active) {
bool was_active = is_active_->exchange(true, std::memory_order_relaxed);
CHECK(!was_active);
}
~MakeActive() { is_active_->store(false, std::memory_order_relaxed); }
private:
std::atomic<bool>* is_active_;
DISALLOW_COPY_AND_ASSIGN(MakeActive);
};
} // namespace
HistogramSnapshotManager::HistogramSnapshotManager(
HistogramFlattener* histogram_flattener)
: histogram_flattener_(histogram_flattener) {
DCHECK(histogram_flattener_);
is_active_.store(false, std::memory_order_relaxed);
}
HistogramSnapshotManager::~HistogramSnapshotManager() = default;
void HistogramSnapshotManager::PrepareDeltas(
const std::vector<HistogramBase*>& histograms,
HistogramBase::Flags flags_to_set,
HistogramBase::Flags required_flags) {
for (HistogramBase* const histogram : histograms) {
histogram->SetFlags(flags_to_set);
if ((histogram->flags() & required_flags) == required_flags)
PrepareDelta(histogram);
}
}
void HistogramSnapshotManager::PrepareDelta(HistogramBase* histogram) {
histogram->ValidateHistogramContents();
PrepareSamples(histogram, histogram->SnapshotDelta());
}
void HistogramSnapshotManager::PrepareFinalDelta(
const HistogramBase* histogram) {
histogram->ValidateHistogramContents();
PrepareSamples(histogram, histogram->SnapshotFinalDelta());
}
void HistogramSnapshotManager::PrepareSamples(
const HistogramBase* histogram,
std::unique_ptr<HistogramSamples> samples) {
DCHECK(histogram_flattener_);
// Ensure that there is no concurrent access going on while accessing the
// set of known histograms. The flag will be reset when this object goes
// out of scope.
MakeActive make_active(&is_active_);
// Get information known about this histogram. If it did not previously
// exist, one will be created and initialized.
SampleInfo* sample_info = &known_histograms_[histogram->name_hash()];
// Crash if we detect that our histograms have been overwritten. This may be
// a fair distance from the memory smasher, but we hope to correlate these
// crashes with other events, such as plugins, or usage patterns, etc.
uint32_t corruption = histogram->FindCorruption(*samples);
if (HistogramBase::BUCKET_ORDER_ERROR & corruption) {
// Extract fields useful during debug.
const BucketRanges* ranges =
static_cast<const Histogram*>(histogram)->bucket_ranges();
uint32_t ranges_checksum = ranges->checksum();
uint32_t ranges_calc_checksum = ranges->CalculateChecksum();
int32_t flags = histogram->flags();
// The checksum should have caught this, so crash separately if it didn't.
CHECK_NE(0U, HistogramBase::RANGE_CHECKSUM_ERROR & corruption);
CHECK(false); // Crash for the bucket order corruption.
// Ensure that compiler keeps around pointers to |histogram| and its
// internal |bucket_ranges_| for any minidumps.
base::debug::Alias(&ranges_checksum);
base::debug::Alias(&ranges_calc_checksum);
base::debug::Alias(&flags);
}
// Checksum corruption might not have caused order corruption.
CHECK_EQ(0U, HistogramBase::RANGE_CHECKSUM_ERROR & corruption);
// Note, at this point corruption can only be COUNT_HIGH_ERROR or
// COUNT_LOW_ERROR and they never arise together, so we don't need to extract
// bits from corruption.
if (corruption) {
DLOG(ERROR) << "Histogram: \"" << histogram->histogram_name()
<< "\" has data corruption: " << corruption;
// Don't record corrupt data to metrics services.
const uint32_t old_corruption = sample_info->inconsistencies;
if (old_corruption == (corruption | old_corruption))
return; // We've already seen this corruption for this histogram.
sample_info->inconsistencies |= corruption;
return;
}
if (samples->TotalCount() > 0)
histogram_flattener_->RecordDelta(*histogram, *samples);
}
} // namespace base

View file

@ -0,0 +1,90 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_METRICS_HISTOGRAM_SNAPSHOT_MANAGER_H_
#define BASE_METRICS_HISTOGRAM_SNAPSHOT_MANAGER_H_
#include <stdint.h>
#include <atomic>
#include <map>
#include <string>
#include <vector>
#include "base/gtest_prod_util.h"
#include "base/macros.h"
#include "base/metrics/histogram_base.h"
namespace base {
class HistogramSamples;
class HistogramFlattener;
// HistogramSnapshotManager handles the logistics of gathering up available
// histograms for recording either to disk or for transmission (such as from
// renderer to browser, or from browser to UMA upload). Since histograms can sit
// in memory for an extended period of time, and are vulnerable to memory
// corruption, this class also validates as much redundancy as it can before
// calling for the marginal change (a.k.a., delta) in a histogram to be
// recorded.
class BASE_EXPORT HistogramSnapshotManager final {
public:
explicit HistogramSnapshotManager(HistogramFlattener* histogram_flattener);
~HistogramSnapshotManager();
// Snapshot all histograms, and ask |histogram_flattener_| to record the
// delta. |flags_to_set| is used to set flags for each histogram.
// |required_flags| is used to select histograms to be recorded.
// Only histograms that have all the flags specified by the argument will be
// chosen. If all histograms should be recorded, set it to
// |Histogram::kNoFlags|.
void PrepareDeltas(const std::vector<HistogramBase*>& histograms,
HistogramBase::Flags flags_to_set,
HistogramBase::Flags required_flags);
// When the collection is not so simple as can be done using a single
// iterator, the steps can be performed separately. Call PerpareDelta()
// as many times as necessary. PrepareFinalDelta() works like PrepareDelta()
// except that it does not update the previous logged values and can thus
// be used with read-only files.
void PrepareDelta(HistogramBase* histogram);
void PrepareFinalDelta(const HistogramBase* histogram);
private:
FRIEND_TEST_ALL_PREFIXES(HistogramSnapshotManagerTest, CheckMerge);
// During a snapshot, samples are acquired and aggregated. This structure
// contains all the information for a given histogram that persists between
// collections.
struct SampleInfo {
// The set of inconsistencies (flags) already seen for the histogram.
// See HistogramBase::Inconsistency for values.
uint32_t inconsistencies = 0;
};
// Capture and hold samples from a histogram. This does all the heavy
// lifting for PrepareDelta() and PrepareAbsolute().
void PrepareSamples(const HistogramBase* histogram,
std::unique_ptr<HistogramSamples> samples);
// |histogram_flattener_| handles the logistics of recording the histogram
// deltas.
HistogramFlattener* const histogram_flattener_; // Weak.
// For histograms, track what has been previously seen, indexed
// by the hash of the histogram name.
std::map<uint64_t, SampleInfo> known_histograms_;
// A flag indicating if a thread is currently doing an operation. This is
// used to check against concurrent access which is not supported. A Thread-
// Checker is not sufficient because it may be guarded by at outside lock
// (as is the case with cronet).
std::atomic<bool> is_active_;
DISALLOW_COPY_AND_ASSIGN(HistogramSnapshotManager);
};
} // namespace base
#endif // BASE_METRICS_HISTOGRAM_SNAPSHOT_MANAGER_H_

View file

@ -0,0 +1,31 @@
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/metrics/metrics_hashes.h"
#include "base/hash/md5.h"
#include "base/logging.h"
#include "base/sys_byteorder.h"
namespace base {
namespace {
// Converts the 8-byte prefix of an MD5 hash into a uint64_t value.
inline uint64_t DigestToUInt64(const base::MD5Digest& digest) {
uint64_t value;
DCHECK_GE(sizeof(digest.a), sizeof(value));
memcpy(&value, digest.a, sizeof(value));
return base::NetToHost64(value);
}
} // namespace
uint64_t HashMetricName(base::StringPiece name) {
base::MD5Digest digest;
base::MD5Sum(name.data(), name.size(), &digest);
return DigestToUInt64(digest);
}
} // namespace metrics

View file

@ -0,0 +1,21 @@
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_METRICS_METRICS_HASHES_H_
#define BASE_METRICS_METRICS_HASHES_H_
#include <stdint.h>
#include "base/base_export.h"
#include "base/strings/string_piece.h"
namespace base {
// Computes a uint64_t hash of a given string based on its MD5 hash. Suitable
// for metric names.
BASE_EXPORT uint64_t HashMetricName(base::StringPiece name);
} // namespace metrics
#endif // BASE_METRICS_METRICS_HASHES_H_

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,507 @@
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_METRICS_PERSISTENT_HISTOGRAM_ALLOCATOR_H_
#define BASE_METRICS_PERSISTENT_HISTOGRAM_ALLOCATOR_H_
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "base/atomicops.h"
#include "base/base_export.h"
#include "base/feature_list.h"
#include "base/metrics/histogram_base.h"
#include "base/metrics/persistent_memory_allocator.h"
#include "base/process/process_handle.h"
#include "base/strings/string_piece.h"
#include "base/synchronization/lock.h"
namespace base {
class BucketRanges;
class FilePath;
class PersistentSampleMapRecords;
class PersistentSparseHistogramDataManager;
class WritableSharedMemoryRegion;
// Feature definition for enabling histogram persistence.
BASE_EXPORT extern const Feature kPersistentHistogramsFeature;
// A data manager for sparse histograms so each instance of such doesn't have
// to separately iterate over the entire memory segment. Though this class
// will generally be accessed through the PersistentHistogramAllocator above,
// it can be used independently on any PersistentMemoryAllocator (making it
// useable for testing). This object supports only one instance of a sparse
// histogram for a given id. Tests that create multiple identical histograms,
// perhaps to simulate multiple processes, should create a separate manager
// for each.
class BASE_EXPORT PersistentSparseHistogramDataManager {
public:
// Constructs the data manager. The allocator must live longer than any
// managers that reference it.
explicit PersistentSparseHistogramDataManager(
PersistentMemoryAllocator* allocator);
~PersistentSparseHistogramDataManager();
// Returns the object that manages the persistent-sample-map records for a
// given |id|. Only one |user| of this data is allowed at a time. This does
// an automatic Acquire() on the records. The user must call Release() on
// the returned object when it is finished with it. Ownership of the records
// object stays with this manager.
PersistentSampleMapRecords* UseSampleMapRecords(uint64_t id,
const void* user);
// Convenience method that gets the object for a given reference so callers
// don't have to also keep their own pointer to the appropriate allocator.
template <typename T>
T* GetAsObject(PersistentMemoryAllocator::Reference ref) {
return allocator_->GetAsObject<T>(ref);
}
private:
friend class PersistentSampleMapRecords;
// Gets the object holding records for a given sample-map id.
PersistentSampleMapRecords* GetSampleMapRecordsWhileLocked(uint64_t id)
EXCLUSIVE_LOCKS_REQUIRED(lock_);
// Loads sample-map records looking for those belonging to the specified
// |load_id|. Records found for other sample-maps are held for later use
// without having to iterate again. This should be called only from a
// PersistentSampleMapRecords object because those objects have a contract
// that there are no other threads accessing the internal records_ field
// of the object that is passed in.
bool LoadRecords(PersistentSampleMapRecords* sample_map_records);
// Weak-pointer to the allocator used by the sparse histograms.
PersistentMemoryAllocator* allocator_;
// Iterator within the allocator for finding sample records.
PersistentMemoryAllocator::Iterator record_iterator_ GUARDED_BY(lock_);
// Mapping of sample-map IDs to their sample records.
std::map<uint64_t, std::unique_ptr<PersistentSampleMapRecords>>
sample_records_ GUARDED_BY(lock_);
base::Lock lock_;
DISALLOW_COPY_AND_ASSIGN(PersistentSparseHistogramDataManager);
};
// This class manages sample-records used by a PersistentSampleMap container
// that underlies a persistent SparseHistogram object. It is broken out into a
// top-level class so that it can be forward-declared in other header files
// rather than include this entire file as would be necessary if it were
// declared within the PersistentSparseHistogramDataManager class above.
class BASE_EXPORT PersistentSampleMapRecords {
public:
// Constructs an instance of this class. The manager object must live longer
// than all instances of this class that reference it, which is not usually
// a problem since these objects are generally managed from within that
// manager instance.
PersistentSampleMapRecords(PersistentSparseHistogramDataManager* data_manager,
uint64_t sample_map_id);
~PersistentSampleMapRecords();
// Resets the internal state for a new object using this data. The return
// value is "this" as a convenience.
PersistentSampleMapRecords* Acquire(const void* user);
// Indicates that the using object is done with this data.
void Release(const void* user);
// Gets the next reference to a persistent sample-map record. The type and
// layout of the data being referenced is defined entirely within the
// PersistentSampleMap class.
PersistentMemoryAllocator::Reference GetNext();
// Creates a new persistent sample-map record for sample |value| and returns
// a reference to it.
PersistentMemoryAllocator::Reference CreateNew(HistogramBase::Sample value);
// Convenience method that gets the object for a given reference so callers
// don't have to also keep their own pointer to the appropriate allocator.
// This is expected to be used with the SampleRecord structure defined inside
// the persistent_sample_map.cc file but since that isn't exported (for
// cleanliness of the interface), a template is defined that will be
// resolved when used inside that file.
template <typename T>
T* GetAsObject(PersistentMemoryAllocator::Reference ref) {
return data_manager_->GetAsObject<T>(ref);
}
private:
friend PersistentSparseHistogramDataManager;
// Weak-pointer to the parent data-manager object.
PersistentSparseHistogramDataManager* data_manager_;
// ID of PersistentSampleMap to which these records apply.
const uint64_t sample_map_id_;
// The current user of this set of records. It is used to ensure that no
// more than one object is using these records at a given time.
const void* user_ = nullptr;
// This is the count of how many "records" have already been read by the
// owning sample-map.
size_t seen_ = 0;
// This is the set of records previously found for a sample map. Because
// there is ever only one object with a given ID (typically a hash of a
// histogram name) and because the parent SparseHistogram has acquired
// its own lock before accessing the PersistentSampleMap it controls, this
// list can be accessed without acquiring any additional lock.
std::vector<PersistentMemoryAllocator::Reference> records_;
// This is the set of records found during iteration through memory. It
// is appended in bulk to "records". Access to this vector can be done
// only while holding the parent manager's lock.
std::vector<PersistentMemoryAllocator::Reference> found_;
DISALLOW_COPY_AND_ASSIGN(PersistentSampleMapRecords);
};
// This class manages histograms created within a PersistentMemoryAllocator.
class BASE_EXPORT PersistentHistogramAllocator {
public:
// A reference to a histogram. While this is implemented as PMA::Reference,
// it is not conceptually the same thing. Outside callers should always use
// a Reference matching the class it is for and not mix the two.
using Reference = PersistentMemoryAllocator::Reference;
// Iterator used for fetching persistent histograms from an allocator.
// It is lock-free and thread-safe.
// See PersistentMemoryAllocator::Iterator for more information.
class BASE_EXPORT Iterator {
public:
// Constructs an iterator on a given |allocator|, starting at the beginning.
// The allocator must live beyond the lifetime of the iterator.
explicit Iterator(PersistentHistogramAllocator* allocator);
// Gets the next histogram from persistent memory; returns null if there
// are no more histograms to be found. This may still be called again
// later to retrieve any new histograms added in the meantime.
std::unique_ptr<HistogramBase> GetNext() { return GetNextWithIgnore(0); }
// Gets the next histogram from persistent memory, ignoring one particular
// reference in the process. Pass |ignore| of zero (0) to ignore nothing.
std::unique_ptr<HistogramBase> GetNextWithIgnore(Reference ignore);
private:
// Weak-pointer to histogram allocator being iterated over.
PersistentHistogramAllocator* allocator_;
// The iterator used for stepping through objects in persistent memory.
// It is lock-free and thread-safe which is why this class is also such.
PersistentMemoryAllocator::Iterator memory_iter_;
DISALLOW_COPY_AND_ASSIGN(Iterator);
};
// A PersistentHistogramAllocator is constructed from a PersistentMemory-
// Allocator object of which it takes ownership.
explicit PersistentHistogramAllocator(
std::unique_ptr<PersistentMemoryAllocator> memory);
virtual ~PersistentHistogramAllocator();
// Direct access to underlying memory allocator. If the segment is shared
// across threads or processes, reading data through these values does
// not guarantee consistency. Use with care. Do not write.
PersistentMemoryAllocator* memory_allocator() {
return memory_allocator_.get();
}
// Implement the "metadata" API of a PersistentMemoryAllocator, forwarding
// those requests to the real one.
uint64_t Id() const { return memory_allocator_->Id(); }
const char* Name() const { return memory_allocator_->Name(); }
const void* data() const { return memory_allocator_->data(); }
size_t length() const { return memory_allocator_->length(); }
size_t size() const { return memory_allocator_->size(); }
size_t used() const { return memory_allocator_->used(); }
// Recreate a Histogram from data held in persistent memory. Though this
// object will be local to the current process, the sample data will be
// shared with all other threads referencing it. This method takes a |ref|
// to where the top-level histogram data may be found in this allocator.
// This method will return null if any problem is detected with the data.
std::unique_ptr<HistogramBase> GetHistogram(Reference ref);
// Allocate a new persistent histogram. The returned histogram will not
// be able to be located by other allocators until it is "finalized".
std::unique_ptr<HistogramBase> AllocateHistogram(
HistogramType histogram_type,
const std::string& name,
int minimum,
int maximum,
const BucketRanges* bucket_ranges,
int32_t flags,
Reference* ref_ptr);
// Finalize the creation of the histogram, making it available to other
// processes if |registered| (as in: added to the StatisticsRecorder) is
// True, forgetting it otherwise.
void FinalizeHistogram(Reference ref, bool registered);
// Merges the data in a persistent histogram with one held globally by the
// StatisticsRecorder, updating the "logged" samples within the passed
// object so that repeated merges are allowed. Don't call this on a "global"
// allocator because histograms created there will already be in the SR.
void MergeHistogramDeltaToStatisticsRecorder(HistogramBase* histogram);
// As above but merge the "final" delta. No update of "logged" samples is
// done which means it can operate on read-only objects. It's essential,
// however, not to call this more than once or those final samples will
// get recorded again.
void MergeHistogramFinalDeltaToStatisticsRecorder(
const HistogramBase* histogram);
// Returns the object that manages the persistent-sample-map records for a
// given |id|. Only one |user| of this data is allowed at a time. This does
// an automatic Acquire() on the records. The user must call Release() on
// the returned object when it is finished with it. Ownership stays with
// this allocator.
PersistentSampleMapRecords* UseSampleMapRecords(uint64_t id,
const void* user);
// Create internal histograms for tracking memory use and allocation sizes
// for allocator of |name| (which can simply be the result of Name()). This
// is done seperately from construction for situations such as when the
// histograms will be backed by memory provided by this very allocator.
//
// IMPORTANT: Callers must update tools/metrics/histograms/histograms.xml
// with the following histograms:
// UMA.PersistentAllocator.name.Allocs
// UMA.PersistentAllocator.name.UsedPct
void CreateTrackingHistograms(StringPiece name);
void UpdateTrackingHistograms();
// Clears the internal |last_created_| reference so testing can validate
// operation without that optimization.
void ClearLastCreatedReferenceForTesting();
protected:
// The structure used to hold histogram data in persistent memory. It is
// defined and used entirely within the .cc file.
struct PersistentHistogramData;
// Gets the reference of the last histogram created, used to avoid
// trying to import what was just created.
PersistentHistogramAllocator::Reference last_created() {
return subtle::NoBarrier_Load(&last_created_);
}
// Gets the next histogram in persistent data based on iterator while
// ignoring a particular reference if it is found.
std::unique_ptr<HistogramBase> GetNextHistogramWithIgnore(Iterator* iter,
Reference ignore);
private:
// Create a histogram based on saved (persistent) information about it.
std::unique_ptr<HistogramBase> CreateHistogram(
PersistentHistogramData* histogram_data_ptr);
// Gets or creates an object in the global StatisticsRecorder matching
// the |histogram| passed. Null is returned if one was not found and
// one could not be created.
HistogramBase* GetOrCreateStatisticsRecorderHistogram(
const HistogramBase* histogram);
// The memory allocator that provides the actual histogram storage.
std::unique_ptr<PersistentMemoryAllocator> memory_allocator_;
// The data-manager used to improve performance of sparse histograms.
PersistentSparseHistogramDataManager sparse_histogram_data_manager_;
// A reference to the last-created histogram in the allocator, used to avoid
// trying to import what was just created.
// TODO(bcwhite): Change this to std::atomic<PMA::Reference> when available.
subtle::Atomic32 last_created_ = 0;
DISALLOW_COPY_AND_ASSIGN(PersistentHistogramAllocator);
};
// A special case of the PersistentHistogramAllocator that operates on a
// global scale, collecting histograms created through standard macros and
// the FactoryGet() method.
class BASE_EXPORT GlobalHistogramAllocator
: public PersistentHistogramAllocator {
public:
~GlobalHistogramAllocator() override;
// Create a global allocator using the passed-in memory |base|, |size|, and
// other parameters. Ownership of the memory segment remains with the caller.
static void CreateWithPersistentMemory(void* base,
size_t size,
size_t page_size,
uint64_t id,
StringPiece name);
// Create a global allocator using an internal block of memory of the
// specified |size| taken from the heap.
static void CreateWithLocalMemory(size_t size, uint64_t id, StringPiece name);
#if !defined(OS_NACL)
// Create a global allocator by memory-mapping a |file|. If the file does
// not exist, it will be created with the specified |size|. If the file does
// exist, the allocator will use and add to its contents, ignoring the passed
// size in favor of the existing size. Returns whether the global allocator
// was set.
static bool CreateWithFile(const FilePath& file_path,
size_t size,
uint64_t id,
StringPiece name);
// Creates a new file at |active_path|. If it already exists, it will first be
// moved to |base_path|. In all cases, any old file at |base_path| will be
// removed. If |spare_path| is non-empty and exists, that will be renamed and
// used as the active file. Otherwise, the file will be created using the
// given size, id, and name. Returns whether the global allocator was set.
static bool CreateWithActiveFile(const FilePath& base_path,
const FilePath& active_path,
const FilePath& spare_path,
size_t size,
uint64_t id,
StringPiece name);
// Uses ConstructBaseActivePairFilePaths() to build a pair of file names which
// are then used for CreateWithActiveFile(). |name| is used for both the
// internal name for the allocator and also for the name of the file inside
// |dir|.
static bool CreateWithActiveFileInDir(const FilePath& dir,
size_t size,
uint64_t id,
StringPiece name);
// Constructs a filename using a name.
static FilePath ConstructFilePath(const FilePath& dir, StringPiece name);
// Like above but with timestamp and pid for use in upload directories.
static FilePath ConstructFilePathForUploadDir(const FilePath& dir,
StringPiece name,
base::Time stamp,
ProcessId pid);
// Parses a filename to extract name, timestamp, and pid.
static bool ParseFilePath(const FilePath& path,
std::string* out_name,
Time* out_stamp,
ProcessId* out_pid);
// Constructs a set of names in |dir| based on name that can be used for a
// base + active persistent memory mapped location for CreateWithActiveFile().
// The spare path is a file that can be pre-created and moved to be active
// without any startup penalty that comes from constructing the file. |name|
// will be used as the basename of the file inside |dir|. |out_base_path|,
// |out_active_path|, or |out_spare_path| may be null if not needed.
static void ConstructFilePaths(const FilePath& dir,
StringPiece name,
FilePath* out_base_path,
FilePath* out_active_path,
FilePath* out_spare_path);
// As above but puts the base files in a different "upload" directory. This
// is useful when moving all completed files into a single directory for easy
// upload management.
static void ConstructFilePathsForUploadDir(const FilePath& active_dir,
const FilePath& upload_dir,
const std::string& name,
FilePath* out_upload_path,
FilePath* out_active_path,
FilePath* out_spare_path);
// Create a "spare" file that can later be made the "active" file. This
// should be done on a background thread if possible.
static bool CreateSpareFile(const FilePath& spare_path, size_t size);
// Same as above but uses standard names. |name| is the name of the allocator
// and is also used to create the correct filename.
static bool CreateSpareFileInDir(const FilePath& dir_path,
size_t size,
StringPiece name);
#endif
// Create a global allocator using a block of shared memory accessed
// through the given |region|. The allocator maps the shared memory into
// current process's virtual address space and frees it upon destruction.
// The memory will continue to live if other processes have access to it.
static void CreateWithSharedMemoryRegion(
const WritableSharedMemoryRegion& region);
// Sets a GlobalHistogramAllocator for globally storing histograms in
// a space that can be persisted or shared between processes. There is only
// ever one allocator for all such histograms created by a single process.
// This takes ownership of the object and should be called as soon as
// possible during startup to capture as many histograms as possible and
// while operating single-threaded so there are no race-conditions.
static void Set(std::unique_ptr<GlobalHistogramAllocator> allocator);
// Gets a pointer to the global histogram allocator. Returns null if none
// exists.
static GlobalHistogramAllocator* Get();
// This access to the persistent allocator is only for testing; it extracts
// the current allocator completely. This allows easy creation of histograms
// within persistent memory segments which can then be extracted and used in
// other ways.
static std::unique_ptr<GlobalHistogramAllocator> ReleaseForTesting();
// Stores a pathname to which the contents of this allocator should be saved
// in order to persist the data for a later use.
void SetPersistentLocation(const FilePath& location);
// Retrieves a previously set pathname to which the contents of this allocator
// are to be saved.
const FilePath& GetPersistentLocation() const;
// Writes the internal data to a previously set location. This is generally
// called when a process is exiting from a section of code that may not know
// the filesystem. The data is written in an atomic manner. The return value
// indicates success.
bool WriteToPersistentLocation();
// If there is a global metrics file being updated on disk, mark it to be
// deleted when the process exits.
void DeletePersistentLocation();
private:
friend class StatisticsRecorder;
// Creates a new global histogram allocator.
explicit GlobalHistogramAllocator(
std::unique_ptr<PersistentMemoryAllocator> memory);
// Import new histograms from the global histogram allocator. It's possible
// for other processes to create histograms in the active memory segment;
// this adds those to the internal list of known histograms to avoid creating
// duplicates that would have to be merged during reporting. Every call to
// this method resumes from the last entry it saw; it costs nothing if
// nothing new has been added.
void ImportHistogramsToStatisticsRecorder();
// Builds a FilePath for a metrics file.
static FilePath MakeMetricsFilePath(const FilePath& dir, StringPiece name);
// Import always continues from where it left off, making use of a single
// iterator to continue the work.
Iterator import_iterator_;
// The location to which the data should be persisted.
FilePath persistent_location_;
DISALLOW_COPY_AND_ASSIGN(GlobalHistogramAllocator);
};
} // namespace base
#endif // BASE_METRICS_PERSISTENT_HISTOGRAM_ALLOCATOR_H__

View file

@ -0,0 +1,151 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/metrics/persistent_histogram_storage.h"
#include "base/files/file_util.h"
#include "base/files/important_file_writer.h"
#include "base/logging.h"
#include "base/metrics/persistent_histogram_allocator.h"
#include "base/metrics/persistent_memory_allocator.h"
#include "base/process/memory.h"
#include "base/strings/string_util.h"
#include "base/strings/stringprintf.h"
#include "base/time/time.h"
#include "build/build_config.h"
#if defined(OS_WIN)
#include <windows.h>
// Dummy line to stop `git cl format` from reordering these includes.
#include <memoryapi.h>
#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
#include <sys/mman.h>
#endif
namespace {
constexpr size_t kAllocSize = 1 << 20; // 1 MiB
void* AllocateLocalMemory(size_t size) {
void* address;
#if defined(OS_WIN)
address =
::VirtualAlloc(nullptr, size, MEM_RESERVE | MEM_COMMIT, PAGE_READWRITE);
if (address)
return address;
#elif defined(OS_POSIX) || defined(OS_FUCHSIA)
// MAP_ANON is deprecated on Linux but MAP_ANONYMOUS is not universal on Mac.
// MAP_SHARED is not available on Linux <2.4 but required on Mac.
address = ::mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_ANON | MAP_SHARED,
-1, 0);
if (address != MAP_FAILED)
return address;
#else
#error This architecture is not (yet) supported.
#endif
// As a last resort, just allocate the memory from the heap. This will
// achieve the same basic result but the acquired memory has to be
// explicitly zeroed and thus realized immediately (i.e. all pages are
// added to the process now instead of only when first accessed).
if (!base::UncheckedMalloc(size, &address))
return nullptr;
DCHECK(address);
memset(address, 0, size);
return address;
}
} // namespace
namespace base {
PersistentHistogramStorage::PersistentHistogramStorage(
StringPiece allocator_name,
StorageDirManagement storage_dir_management)
: storage_dir_management_(storage_dir_management) {
DCHECK(!allocator_name.empty());
DCHECK(IsStringASCII(allocator_name));
// This code may be executed before crash handling and/or OOM handling has
// been initialized for the process. Silently ignore a failed allocation
// (no metric persistence) rather that generating a crash that won't be
// caught/reported.
void* memory = AllocateLocalMemory(kAllocSize);
if (!memory)
return;
GlobalHistogramAllocator::CreateWithPersistentMemory(memory, kAllocSize, 0,
0, // No identifier.
allocator_name);
GlobalHistogramAllocator::Get()->CreateTrackingHistograms(allocator_name);
}
PersistentHistogramStorage::~PersistentHistogramStorage() {
PersistentHistogramAllocator* allocator = GlobalHistogramAllocator::Get();
if (!allocator)
return;
allocator->UpdateTrackingHistograms();
if (disabled_)
return;
// Stop if the storage base directory has not been properly set.
if (storage_base_dir_.empty()) {
LOG(ERROR)
<< "Could not write \"" << allocator->Name()
<< "\" persistent histograms to file as the storage base directory "
"is not properly set.";
return;
}
FilePath storage_dir = storage_base_dir_.AppendASCII(allocator->Name());
switch (storage_dir_management_) {
case StorageDirManagement::kCreate:
if (!CreateDirectory(storage_dir)) {
LOG(ERROR)
<< "Could not write \"" << allocator->Name()
<< "\" persistent histograms to file as the storage directory "
"cannot be created.";
return;
}
break;
case StorageDirManagement::kUseExisting:
if (!DirectoryExists(storage_dir)) {
// When the consumer of this class decides to use an existing storage
// directory, it should ensure the directory's existence if it's
// essential.
LOG(ERROR)
<< "Could not write \"" << allocator->Name()
<< "\" persistent histograms to file as the storage directory "
"does not exist.";
return;
}
break;
}
// Save data using the current time as the filename. The actual filename
// doesn't matter (so long as it ends with the correct extension) but this
// works as well as anything.
Time::Exploded exploded;
Time::Now().LocalExplode(&exploded);
const FilePath file_path =
storage_dir
.AppendASCII(StringPrintf("%04d%02d%02d%02d%02d%02d", exploded.year,
exploded.month, exploded.day_of_month,
exploded.hour, exploded.minute,
exploded.second))
.AddExtension(PersistentMemoryAllocator::kFileExtension);
StringPiece contents(static_cast<const char*>(allocator->data()),
allocator->used());
if (!ImportantFileWriter::WriteFileAtomically(file_path, contents)) {
LOG(ERROR) << "Persistent histograms fail to write to file: "
<< file_path.value();
}
}
} // namespace base

View file

@ -0,0 +1,68 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_METRICS_PERSISTENT_HISTOGRAM_STORAGE_H_
#define BASE_METRICS_PERSISTENT_HISTOGRAM_STORAGE_H_
#include "base/base_export.h"
#include "base/files/file_path.h"
#include "base/macros.h"
#include "base/strings/string_piece.h"
namespace base {
// This class creates a fixed sized persistent memory to allow histograms to be
// stored in it. When a PersistentHistogramStorage is destructed, histograms
// recorded during its lifetime are persisted in the directory
// |storage_base_dir_|/|allocator_name| (see the ctor for allocator_name).
// Histograms are not persisted if the storage directory does not exist on
// destruction. PersistentHistogramStorage should be instantiated as early as
// possible in the process lifetime and should never be instantiated again.
// Persisted histograms will eventually be reported by Chrome.
class BASE_EXPORT PersistentHistogramStorage {
public:
enum class StorageDirManagement { kCreate, kUseExisting };
// Creates a process-wide storage location for histograms that will be written
// to a file within a directory provided by |set_storage_base_dir()| on
// destruction.
// The |allocator_name| is used both as an internal name for the allocator,
// well as the leaf directory name for the file to which the histograms are
// persisted. The string must be ASCII.
// |storage_dir_management| specifies if this instance reuses an existing
// storage directory, or is responsible for creating one.
PersistentHistogramStorage(StringPiece allocator_name,
StorageDirManagement storage_dir_management);
~PersistentHistogramStorage();
// The storage directory isn't always known during initial construction so
// it's set separately. The last one wins if there are multiple calls to this
// method.
void set_storage_base_dir(const FilePath& storage_base_dir) {
storage_base_dir_ = storage_base_dir;
}
// Disables histogram storage.
void Disable() { disabled_ = true; }
private:
// Metrics files are written into directory
// |storage_base_dir_|/|allocator_name| (see the ctor for allocator_name).
FilePath storage_base_dir_;
// The setting of the storage directory management.
const StorageDirManagement storage_dir_management_;
// A flag indicating if histogram storage is disabled. It starts with false,
// but can be set to true by the caller who decides to throw away its
// histogram data.
bool disabled_ = false;
DISALLOW_COPY_AND_ASSIGN(PersistentHistogramStorage);
};
} // namespace base
#endif // BASE_METRICS_PERSISTENT_HISTOGRAM_STORAGE_H_

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,901 @@
// Copyright (c) 2015 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_METRICS_PERSISTENT_MEMORY_ALLOCATOR_H_
#define BASE_METRICS_PERSISTENT_MEMORY_ALLOCATOR_H_
#include <stdint.h>
#include <atomic>
#include <memory>
#include <type_traits>
#include "base/atomicops.h"
#include "base/base_export.h"
#include "base/files/file_path.h"
#include "base/gtest_prod_util.h"
#include "base/macros.h"
#include "base/memory/shared_memory_mapping.h"
#include "base/strings/string_piece.h"
namespace base {
class HistogramBase;
class MemoryMappedFile;
// Simple allocator for pieces of a memory block that may be persistent
// to some storage or shared across multiple processes. This class resides
// under base/metrics because it was written for that purpose. It is,
// however, fully general-purpose and can be freely moved to base/memory
// if other uses are found.
//
// This class provides for thread-secure (i.e. safe against other threads
// or processes that may be compromised and thus have malicious intent)
// allocation of memory within a designated block and also a mechanism by
// which other threads can learn of these allocations.
//
// There is (currently) no way to release an allocated block of data because
// doing so would risk invalidating pointers held by other processes and
// greatly complicate the allocation algorithm.
//
// Construction of this object can accept new, clean (i.e. zeroed) memory
// or previously initialized memory. In the first case, construction must
// be allowed to complete before letting other allocators attach to the same
// segment. In other words, don't share the segment until at least one
// allocator has been attached to it.
//
// Note that memory not in active use is not accessed so it is possible to
// use virtual memory, including memory-mapped files, as backing storage with
// the OS "pinning" new (zeroed) physical RAM pages only as they are needed.
//
// OBJECTS: Although the allocator can be used in a "malloc" sense, fetching
// character arrays and manipulating that memory manually, the better way is
// generally to use the "object" methods to create and manage allocations. In
// this way the sizing, type-checking, and construction are all automatic. For
// this to work, however, every type of stored object must define two public
// "constexpr" values, kPersistentTypeId and kExpectedInstanceSize, as such:
//
// struct MyPersistentObjectType {
// // SHA1(MyPersistentObjectType): Increment this if structure changes!
// static constexpr uint32_t kPersistentTypeId = 0x3E15F6DE + 1;
//
// // Expected size for 32/64-bit check. Update this if structure changes!
// static constexpr size_t kExpectedInstanceSize = 20;
//
// ...
// };
//
// kPersistentTypeId: This value is an arbitrary identifier that allows the
// identification of these objects in the allocator, including the ability
// to find them via iteration. The number is arbitrary but using the first
// four bytes of the SHA1 hash of the type name means that there shouldn't
// be any conflicts with other types that may also be stored in the memory.
// The fully qualified name (e.g. base::debug::MyPersistentObjectType) could
// be used to generate the hash if the type name seems common. Use a command
// like this to get the hash: echo -n "MyPersistentObjectType" | sha1sum
// If the structure layout changes, ALWAYS increment this number so that
// newer versions of the code don't try to interpret persistent data written
// by older versions with a different layout.
//
// kExpectedInstanceSize: This value is the hard-coded number that matches
// what sizeof(T) would return. By providing it explicitly, the allocator can
// verify that the structure is compatible between both 32-bit and 64-bit
// versions of the code.
//
// Using New manages the memory and then calls the default constructor for the
// object. Given that objects are persistent, no destructor is ever called
// automatically though a caller can explicitly call Delete to destruct it and
// change the type to something indicating it is no longer in use.
//
// Though persistent memory segments are transferrable between programs built
// for different natural word widths, they CANNOT be exchanged between CPUs
// of different endianess. Attempts to do so will simply see the existing data
// as corrupt and refuse to access any of it.
class BASE_EXPORT PersistentMemoryAllocator {
public:
typedef uint32_t Reference;
// These states are used to indicate the overall condition of the memory
// segment irrespective of what is stored within it. Because the data is
// often persistent and thus needs to be readable by different versions of
// a program, these values are fixed and can never change.
enum MemoryState : uint8_t {
// Persistent memory starts all zeros and so shows "uninitialized".
MEMORY_UNINITIALIZED = 0,
// The header has been written and the memory is ready for use.
MEMORY_INITIALIZED = 1,
// The data should be considered deleted. This would be set when the
// allocator is being cleaned up. If file-backed, the file is likely
// to be deleted but since deletion can fail for a variety of reasons,
// having this extra status means a future reader can realize what
// should have happened.
MEMORY_DELETED = 2,
// Outside code can create states starting with this number; these too
// must also never change between code versions.
MEMORY_USER_DEFINED = 100,
};
// Iterator for going through all iterable memory records in an allocator.
// Like the allocator itself, iterators are lock-free and thread-secure.
// That means that multiple threads can share an iterator and the same
// reference will not be returned twice.
//
// The order of the items returned by an iterator matches the order in which
// MakeIterable() was called on them. Once an allocation is made iterable,
// it is always such so the only possible difference between successive
// iterations is for more to be added to the end.
//
// Iteration, in general, is tolerant of corrupted memory. It will return
// what it can and stop only when corruption forces it to. Bad corruption
// could cause the same object to be returned many times but it will
// eventually quit.
class BASE_EXPORT Iterator {
public:
// Constructs an iterator on a given |allocator|, starting at the beginning.
// The allocator must live beyond the lifetime of the iterator. This class
// has read-only access to the allocator (hence "const") but the returned
// references can be used on a read/write version, too.
explicit Iterator(const PersistentMemoryAllocator* allocator);
// As above but resuming from the |starting_after| reference. The first call
// to GetNext() will return the next object found after that reference. The
// reference must be to an "iterable" object; references to non-iterable
// objects (those that never had MakeIterable() called for them) will cause
// a run-time error.
Iterator(const PersistentMemoryAllocator* allocator,
Reference starting_after);
// Resets the iterator back to the beginning.
void Reset();
// Resets the iterator, resuming from the |starting_after| reference.
void Reset(Reference starting_after);
// Returns the previously retrieved reference, or kReferenceNull if none.
// If constructor or reset with a starting_after location, this will return
// that value.
Reference GetLast();
// Gets the next iterable, storing that type in |type_return|. The actual
// return value is a reference to the allocation inside the allocator or
// zero if there are no more. GetNext() may still be called again at a
// later time to retrieve any new allocations that have been added.
Reference GetNext(uint32_t* type_return);
// Similar to above but gets the next iterable of a specific |type_match|.
// This should not be mixed with calls to GetNext() because any allocations
// skipped here due to a type mis-match will never be returned by later
// calls to GetNext() meaning it's possible to completely miss entries.
Reference GetNextOfType(uint32_t type_match);
// As above but works using object type.
template <typename T>
Reference GetNextOfType() {
return GetNextOfType(T::kPersistentTypeId);
}
// As above but works using objects and returns null if not found.
template <typename T>
const T* GetNextOfObject() {
return GetAsObject<T>(GetNextOfType<T>());
}
// Converts references to objects. This is a convenience method so that
// users of the iterator don't need to also have their own pointer to the
// allocator over which the iterator runs in order to retrieve objects.
// Because the iterator is not read/write, only "const" objects can be
// fetched. Non-const objects can be fetched using the reference on a
// non-const (external) pointer to the same allocator (or use const_cast
// to remove the qualifier).
template <typename T>
const T* GetAsObject(Reference ref) const {
return allocator_->GetAsObject<T>(ref);
}
// Similar to GetAsObject() but converts references to arrays of things.
template <typename T>
const T* GetAsArray(Reference ref, uint32_t type_id, size_t count) const {
return allocator_->GetAsArray<T>(ref, type_id, count);
}
// Convert a generic pointer back into a reference. A null reference will
// be returned if |memory| is not inside the persistent segment or does not
// point to an object of the specified |type_id|.
Reference GetAsReference(const void* memory, uint32_t type_id) const {
return allocator_->GetAsReference(memory, type_id);
}
// As above but convert an object back into a reference.
template <typename T>
Reference GetAsReference(const T* obj) const {
return allocator_->GetAsReference(obj);
}
private:
// Weak-pointer to memory allocator being iterated over.
const PersistentMemoryAllocator* allocator_;
// The last record that was returned.
std::atomic<Reference> last_record_;
// The number of records found; used for detecting loops.
std::atomic<uint32_t> record_count_;
DISALLOW_COPY_AND_ASSIGN(Iterator);
};
// Returned information about the internal state of the heap.
struct MemoryInfo {
size_t total;
size_t free;
};
enum : Reference {
// A common "null" reference value.
kReferenceNull = 0,
};
enum : uint32_t {
// A value that will match any type when doing lookups.
kTypeIdAny = 0x00000000,
// A value indicating that the type is in transition. Work is being done
// on the contents to prepare it for a new type to come.
kTypeIdTransitioning = 0xFFFFFFFF,
};
enum : size_t {
kSizeAny = 1 // Constant indicating that any array size is acceptable.
};
// This is the standard file extension (suitable for being passed to the
// AddExtension() method of base::FilePath) for dumps of persistent memory.
static const base::FilePath::CharType kFileExtension[];
// The allocator operates on any arbitrary block of memory. Creation and
// persisting or sharing of that block with another process is the
// responsibility of the caller. The allocator needs to know only the
// block's |base| address, the total |size| of the block, and any internal
// |page| size (zero if not paged) across which allocations should not span.
// The |id| is an arbitrary value the caller can use to identify a
// particular memory segment. It will only be loaded during the initial
// creation of the segment and can be checked by the caller for consistency.
// The |name|, if provided, is used to distinguish histograms for this
// allocator. Only the primary owner of the segment should define this value;
// other processes can learn it from the shared state. If the underlying
// memory is |readonly| then no changes will be made to it. The resulting
// object should be stored as a "const" pointer.
//
// PersistentMemoryAllocator does NOT take ownership of the memory block.
// The caller must manage it and ensure it stays available throughout the
// lifetime of this object.
//
// Memory segments for sharing must have had an allocator attached to them
// before actually being shared. If the memory segment was just created, it
// should be zeroed before being passed here. If it was an existing segment,
// the values here will be compared to copies stored in the shared segment
// as a guard against corruption.
//
// Make sure that the memory segment is acceptable (see IsMemoryAcceptable()
// method below) before construction if the definition of the segment can
// vary in any way at run-time. Invalid memory segments will cause a crash.
PersistentMemoryAllocator(void* base, size_t size, size_t page_size,
uint64_t id, base::StringPiece name,
bool readonly);
virtual ~PersistentMemoryAllocator();
// Check if memory segment is acceptable for creation of an Allocator. This
// doesn't do any analysis of the data and so doesn't guarantee that the
// contents are valid, just that the paramaters won't cause the program to
// abort. The IsCorrupt() method will report detection of data problems
// found during construction and general operation.
static bool IsMemoryAcceptable(const void* data, size_t size,
size_t page_size, bool readonly);
// Get the internal identifier for this persistent memory segment.
uint64_t Id() const;
// Get the internal name of this allocator (possibly an empty string).
const char* Name() const;
// Is this segment open only for read?
bool IsReadonly() const { return readonly_; }
// Manage the saved state of the memory.
void SetMemoryState(uint8_t memory_state);
uint8_t GetMemoryState() const;
// Create internal histograms for tracking memory use and allocation sizes
// for allocator of |name| (which can simply be the result of Name()). This
// is done seperately from construction for situations such as when the
// histograms will be backed by memory provided by this very allocator.
//
// IMPORTANT: Callers must update tools/metrics/histograms/histograms.xml
// with the following histograms:
// UMA.PersistentAllocator.name.Errors
// UMA.PersistentAllocator.name.UsedPct
void CreateTrackingHistograms(base::StringPiece name);
// Flushes the persistent memory to any backing store. This typically does
// nothing but is used by the FilePersistentMemoryAllocator to inform the
// OS that all the data should be sent to the disk immediately. This is
// useful in the rare case where something has just been stored that needs
// to survive a hard shutdown of the machine like from a power failure.
// The |sync| parameter indicates if this call should block until the flush
// is complete but is only advisory and may or may not have an effect
// depending on the capabilities of the OS. Synchronous flushes are allowed
// only from theads that are allowed to do I/O but since |sync| is only
// advisory, all flushes should be done on IO-capable threads.
void Flush(bool sync);
// Direct access to underlying memory segment. If the segment is shared
// across threads or processes, reading data through these values does
// not guarantee consistency. Use with care. Do not write.
const void* data() const { return const_cast<const char*>(mem_base_); }
size_t length() const { return mem_size_; }
size_t size() const { return mem_size_; }
size_t used() const;
// Get an object referenced by a |ref|. For safety reasons, the |type_id|
// code and size-of(|T|) are compared to ensure the reference is valid
// and cannot return an object outside of the memory segment. A |type_id| of
// kTypeIdAny (zero) will match any though the size is still checked. NULL is
// returned if any problem is detected, such as corrupted storage or incorrect
// parameters. Callers MUST check that the returned value is not-null EVERY
// TIME before accessing it or risk crashing! Once dereferenced, the pointer
// is safe to reuse forever.
//
// It is essential that the object be of a fixed size. All fields must be of
// a defined type that does not change based on the compiler or the CPU
// natural word size. Acceptable are char, float, double, and (u)intXX_t.
// Unacceptable are int, bool, and wchar_t which are implementation defined
// with regards to their size.
//
// Alignment must also be consistent. A uint64_t after a uint32_t will pad
// differently between 32 and 64 bit architectures. Either put the bigger
// elements first, group smaller elements into blocks the size of larger
// elements, or manually insert padding fields as appropriate for the
// largest architecture, including at the end.
//
// To protected against mistakes, all objects must have the attribute
// |kExpectedInstanceSize| (static constexpr size_t) that is a hard-coded
// numerical value -- NNN, not sizeof(T) -- that can be tested. If the
// instance size is not fixed, at least one build will fail.
//
// If the size of a structure changes, the type-ID used to recognize it
// should also change so later versions of the code don't try to read
// incompatible structures from earlier versions.
//
// NOTE: Though this method will guarantee that an object of the specified
// type can be accessed without going outside the bounds of the memory
// segment, it makes no guarantees of the validity of the data within the
// object itself. If it is expected that the contents of the segment could
// be compromised with malicious intent, the object must be hardened as well.
//
// Though the persistent data may be "volatile" if it is shared with
// other processes, such is not necessarily the case. The internal
// "volatile" designation is discarded so as to not propagate the viral
// nature of that keyword to the caller. It can add it back, if necessary,
// based on knowledge of how the allocator is being used.
template <typename T>
T* GetAsObject(Reference ref) {
static_assert(std::is_standard_layout<T>::value, "only standard objects");
static_assert(!std::is_array<T>::value, "use GetAsArray<>()");
static_assert(T::kExpectedInstanceSize == sizeof(T), "inconsistent size");
return const_cast<T*>(reinterpret_cast<volatile T*>(
GetBlockData(ref, T::kPersistentTypeId, sizeof(T))));
}
template <typename T>
const T* GetAsObject(Reference ref) const {
static_assert(std::is_standard_layout<T>::value, "only standard objects");
static_assert(!std::is_array<T>::value, "use GetAsArray<>()");
static_assert(T::kExpectedInstanceSize == sizeof(T), "inconsistent size");
return const_cast<const T*>(reinterpret_cast<const volatile T*>(
GetBlockData(ref, T::kPersistentTypeId, sizeof(T))));
}
// Like GetAsObject but get an array of simple, fixed-size types.
//
// Use a |count| of the required number of array elements, or kSizeAny.
// GetAllocSize() can be used to calculate the upper bound but isn't reliable
// because padding can make space for extra elements that were not written.
//
// Remember that an array of char is a string but may not be NUL terminated.
//
// There are no compile-time or run-time checks to ensure 32/64-bit size
// compatibilty when using these accessors. Only use fixed-size types such
// as char, float, double, or (u)intXX_t.
template <typename T>
T* GetAsArray(Reference ref, uint32_t type_id, size_t count) {
static_assert(std::is_fundamental<T>::value, "use GetAsObject<>()");
return const_cast<T*>(reinterpret_cast<volatile T*>(
GetBlockData(ref, type_id, count * sizeof(T))));
}
template <typename T>
const T* GetAsArray(Reference ref, uint32_t type_id, size_t count) const {
static_assert(std::is_fundamental<T>::value, "use GetAsObject<>()");
return const_cast<const char*>(reinterpret_cast<const volatile T*>(
GetBlockData(ref, type_id, count * sizeof(T))));
}
// Get the corresponding reference for an object held in persistent memory.
// If the |memory| is not valid or the type does not match, a kReferenceNull
// result will be returned.
Reference GetAsReference(const void* memory, uint32_t type_id) const;
// Get the number of bytes allocated to a block. This is useful when storing
// arrays in order to validate the ending boundary. The returned value will
// include any padding added to achieve the required alignment and so could
// be larger than given in the original Allocate() request.
size_t GetAllocSize(Reference ref) const;
// Access the internal "type" of an object. This generally isn't necessary
// but can be used to "clear" the type and so effectively mark it as deleted
// even though the memory stays valid and allocated. Changing the type is
// an atomic compare/exchange and so requires knowing the existing value.
// It will return false if the existing type is not what is expected.
//
// Changing the type doesn't mean the data is compatible with the new type.
// Passing true for |clear| will zero the memory after the type has been
// changed away from |from_type_id| but before it becomes |to_type_id| meaning
// that it is done in a manner that is thread-safe. Memory is guaranteed to
// be zeroed atomically by machine-word in a monotonically increasing order.
//
// It will likely be necessary to reconstruct the type before it can be used.
// Changing the type WILL NOT invalidate existing pointers to the data, either
// in this process or others, so changing the data structure could have
// unpredicatable results. USE WITH CARE!
uint32_t GetType(Reference ref) const;
bool ChangeType(Reference ref,
uint32_t to_type_id,
uint32_t from_type_id,
bool clear);
// Allocated objects can be added to an internal list that can then be
// iterated over by other processes. If an allocated object can be found
// another way, such as by having its reference within a different object
// that will be made iterable, then this call is not necessary. This always
// succeeds unless corruption is detected; check IsCorrupted() to find out.
// Once an object is made iterable, its position in iteration can never
// change; new iterable objects will always be added after it in the series.
// Changing the type does not alter its "iterable" status.
void MakeIterable(Reference ref);
// Get the information about the amount of free space in the allocator. The
// amount of free space should be treated as approximate due to extras from
// alignment and metadata. Concurrent allocations from other threads will
// also make the true amount less than what is reported.
void GetMemoryInfo(MemoryInfo* meminfo) const;
// If there is some indication that the memory has become corrupted,
// calling this will attempt to prevent further damage by indicating to
// all processes that something is not as expected.
void SetCorrupt() const;
// This can be called to determine if corruption has been detected in the
// segment, possibly my a malicious actor. Once detected, future allocations
// will fail and iteration may not locate all objects.
bool IsCorrupt() const;
// Flag set if an allocation has failed because the memory segment was full.
bool IsFull() const;
// Update those "tracking" histograms which do not get updates during regular
// operation, such as how much memory is currently used. This should be
// called before such information is to be displayed or uploaded.
void UpdateTrackingHistograms();
// While the above works much like malloc & free, these next methods provide
// an "object" interface similar to new and delete.
// Reserve space in the memory segment of the desired |size| and |type_id|.
// A return value of zero indicates the allocation failed, otherwise the
// returned reference can be used by any process to get a real pointer via
// the GetAsObject() or GetAsArray calls. The actual allocated size may be
// larger and will always be a multiple of 8 bytes (64 bits).
Reference Allocate(size_t size, uint32_t type_id);
// Allocate and construct an object in persistent memory. The type must have
// both (size_t) kExpectedInstanceSize and (uint32_t) kPersistentTypeId
// static constexpr fields that are used to ensure compatibility between
// software versions. An optional size parameter can be specified to force
// the allocation to be bigger than the size of the object; this is useful
// when the last field is actually variable length.
template <typename T>
T* New(size_t size) {
if (size < sizeof(T))
size = sizeof(T);
Reference ref = Allocate(size, T::kPersistentTypeId);
void* mem =
const_cast<void*>(GetBlockData(ref, T::kPersistentTypeId, size));
if (!mem)
return nullptr;
DCHECK_EQ(0U, reinterpret_cast<uintptr_t>(mem) & (alignof(T) - 1));
return new (mem) T();
}
template <typename T>
T* New() {
return New<T>(sizeof(T));
}
// Similar to New, above, but construct the object out of an existing memory
// block and of an expected type. If |clear| is true, memory will be zeroed
// before construction. Though this is not standard object behavior, it
// is present to match with new allocations that always come from zeroed
// memory. Anything previously present simply ceases to exist; no destructor
// is called for it so explicitly Delete() the old object first if need be.
// Calling this will not invalidate existing pointers to the object, either
// in this process or others, so changing the object could have unpredictable
// results. USE WITH CARE!
template <typename T>
T* New(Reference ref, uint32_t from_type_id, bool clear) {
DCHECK_LE(sizeof(T), GetAllocSize(ref)) << "alloc not big enough for obj";
// Make sure the memory is appropriate. This won't be used until after
// the type is changed but checking first avoids the possibility of having
// to change the type back.
void* mem = const_cast<void*>(GetBlockData(ref, 0, sizeof(T)));
if (!mem)
return nullptr;
// Ensure the allocator's internal alignment is sufficient for this object.
// This protects against coding errors in the allocator.
DCHECK_EQ(0U, reinterpret_cast<uintptr_t>(mem) & (alignof(T) - 1));
// Change the type, clearing the memory if so desired. The new type is
// "transitioning" so that there is no race condition with the construction
// of the object should another thread be simultaneously iterating over
// data. This will "acquire" the memory so no changes get reordered before
// it.
if (!ChangeType(ref, kTypeIdTransitioning, from_type_id, clear))
return nullptr;
// Construct an object of the desired type on this memory, just as if
// New() had been called to create it.
T* obj = new (mem) T();
// Finally change the type to the desired one. This will "release" all of
// the changes above and so provide a consistent view to other threads.
bool success =
ChangeType(ref, T::kPersistentTypeId, kTypeIdTransitioning, false);
DCHECK(success);
return obj;
}
// Deletes an object by destructing it and then changing the type to a
// different value (default 0).
template <typename T>
void Delete(T* obj, uint32_t new_type) {
// Get the reference for the object.
Reference ref = GetAsReference<T>(obj);
// First change the type to "transitioning" so there is no race condition
// where another thread could find the object through iteration while it
// is been destructed. This will "acquire" the memory so no changes get
// reordered before it. It will fail if |ref| is invalid.
if (!ChangeType(ref, kTypeIdTransitioning, T::kPersistentTypeId, false))
return;
// Destruct the object.
obj->~T();
// Finally change the type to the desired value. This will "release" all
// the changes above.
bool success = ChangeType(ref, new_type, kTypeIdTransitioning, false);
DCHECK(success);
}
template <typename T>
void Delete(T* obj) {
Delete<T>(obj, 0);
}
// As above but works with objects allocated from persistent memory.
template <typename T>
Reference GetAsReference(const T* obj) const {
return GetAsReference(obj, T::kPersistentTypeId);
}
// As above but works with an object allocated from persistent memory.
template <typename T>
void MakeIterable(const T* obj) {
MakeIterable(GetAsReference<T>(obj));
}
protected:
enum MemoryType {
MEM_EXTERNAL,
MEM_MALLOC,
MEM_VIRTUAL,
MEM_SHARED,
MEM_FILE,
};
struct Memory {
Memory(void* b, MemoryType t) : base(b), type(t) {}
void* base;
MemoryType type;
};
// Constructs the allocator. Everything is the same as the public allocator
// except |memory| which is a structure with additional information besides
// the base address.
PersistentMemoryAllocator(Memory memory, size_t size, size_t page_size,
uint64_t id, base::StringPiece name,
bool readonly);
// Implementation of Flush that accepts how much to flush.
virtual void FlushPartial(size_t length, bool sync);
volatile char* const mem_base_; // Memory base. (char so sizeof guaranteed 1)
const MemoryType mem_type_; // Type of memory allocation.
const uint32_t mem_size_; // Size of entire memory segment.
const uint32_t mem_page_; // Page size allocations shouldn't cross.
const size_t vm_page_size_; // The page size used by the OS.
private:
struct SharedMetadata;
struct BlockHeader;
static const uint32_t kAllocAlignment;
static const Reference kReferenceQueue;
// The shared metadata is always located at the top of the memory segment.
// These convenience functions eliminate constant casting of the base
// pointer within the code.
const SharedMetadata* shared_meta() const {
return reinterpret_cast<const SharedMetadata*>(
const_cast<const char*>(mem_base_));
}
SharedMetadata* shared_meta() {
return reinterpret_cast<SharedMetadata*>(const_cast<char*>(mem_base_));
}
// Actual method for doing the allocation.
Reference AllocateImpl(size_t size, uint32_t type_id);
// Get the block header associated with a specific reference.
const volatile BlockHeader* GetBlock(Reference ref, uint32_t type_id,
uint32_t size, bool queue_ok,
bool free_ok) const;
volatile BlockHeader* GetBlock(Reference ref, uint32_t type_id, uint32_t size,
bool queue_ok, bool free_ok) {
return const_cast<volatile BlockHeader*>(
const_cast<const PersistentMemoryAllocator*>(this)->GetBlock(
ref, type_id, size, queue_ok, free_ok));
}
// Get the actual data within a block associated with a specific reference.
const volatile void* GetBlockData(Reference ref, uint32_t type_id,
uint32_t size) const;
volatile void* GetBlockData(Reference ref, uint32_t type_id,
uint32_t size) {
return const_cast<volatile void*>(
const_cast<const PersistentMemoryAllocator*>(this)->GetBlockData(
ref, type_id, size));
}
// Record an error in the internal histogram.
void RecordError(int error) const;
const bool readonly_; // Indicates access to read-only memory.
mutable std::atomic<bool> corrupt_; // Local version of "corrupted" flag.
HistogramBase* allocs_histogram_; // Histogram recording allocs.
HistogramBase* used_histogram_; // Histogram recording used space.
HistogramBase* errors_histogram_; // Histogram recording errors.
friend class PersistentMemoryAllocatorTest;
FRIEND_TEST_ALL_PREFIXES(PersistentMemoryAllocatorTest, AllocateAndIterate);
DISALLOW_COPY_AND_ASSIGN(PersistentMemoryAllocator);
};
// This allocator uses a local memory block it allocates from the general
// heap. It is generally used when some kind of "death rattle" handler will
// save the contents to persistent storage during process shutdown. It is
// also useful for testing.
class BASE_EXPORT LocalPersistentMemoryAllocator
: public PersistentMemoryAllocator {
public:
LocalPersistentMemoryAllocator(size_t size, uint64_t id,
base::StringPiece name);
~LocalPersistentMemoryAllocator() override;
private:
// Allocates a block of local memory of the specified |size|, ensuring that
// the memory will not be physically allocated until accessed and will read
// as zero when that happens.
static Memory AllocateLocalMemory(size_t size);
// Deallocates a block of local |memory| of the specified |size|.
static void DeallocateLocalMemory(void* memory, size_t size, MemoryType type);
DISALLOW_COPY_AND_ASSIGN(LocalPersistentMemoryAllocator);
};
// This allocator takes a writable shared memory mapping object and performs
// allocation from it. The allocator takes ownership of the mapping object.
class BASE_EXPORT WritableSharedPersistentMemoryAllocator
: public PersistentMemoryAllocator {
public:
WritableSharedPersistentMemoryAllocator(
base::WritableSharedMemoryMapping memory,
uint64_t id,
base::StringPiece name);
~WritableSharedPersistentMemoryAllocator() override;
// Ensure that the memory isn't so invalid that it would crash when passing it
// to the allocator. This doesn't guarantee the data is valid, just that it
// won't cause the program to abort. The existing IsCorrupt() call will handle
// the rest.
static bool IsSharedMemoryAcceptable(
const base::WritableSharedMemoryMapping& memory);
private:
base::WritableSharedMemoryMapping shared_memory_;
DISALLOW_COPY_AND_ASSIGN(WritableSharedPersistentMemoryAllocator);
};
// This allocator takes a read-only shared memory mapping object and performs
// allocation from it. The allocator takes ownership of the mapping object.
class BASE_EXPORT ReadOnlySharedPersistentMemoryAllocator
: public PersistentMemoryAllocator {
public:
ReadOnlySharedPersistentMemoryAllocator(
base::ReadOnlySharedMemoryMapping memory,
uint64_t id,
base::StringPiece name);
~ReadOnlySharedPersistentMemoryAllocator() override;
// Ensure that the memory isn't so invalid that it would crash when passing it
// to the allocator. This doesn't guarantee the data is valid, just that it
// won't cause the program to abort. The existing IsCorrupt() call will handle
// the rest.
static bool IsSharedMemoryAcceptable(
const base::ReadOnlySharedMemoryMapping& memory);
private:
base::ReadOnlySharedMemoryMapping shared_memory_;
DISALLOW_COPY_AND_ASSIGN(ReadOnlySharedPersistentMemoryAllocator);
};
#if !defined(OS_NACL) // NACL doesn't support any kind of file access in build.
// This allocator takes a memory-mapped file object and performs allocation
// from it. The allocator takes ownership of the file object.
class BASE_EXPORT FilePersistentMemoryAllocator
: public PersistentMemoryAllocator {
public:
// A |max_size| of zero will use the length of the file as the maximum
// size. The |file| object must have been already created with sufficient
// permissions (read, read/write, or read/write/extend).
FilePersistentMemoryAllocator(std::unique_ptr<MemoryMappedFile> file,
size_t max_size,
uint64_t id,
base::StringPiece name,
bool read_only);
~FilePersistentMemoryAllocator() override;
// Ensure that the file isn't so invalid that it would crash when passing it
// to the allocator. This doesn't guarantee the file is valid, just that it
// won't cause the program to abort. The existing IsCorrupt() call will handle
// the rest.
static bool IsFileAcceptable(const MemoryMappedFile& file, bool read_only);
// Load all or a portion of the file into memory for fast access. This can
// be used to force the disk access to be done on a background thread and
// then have the data available to be read on the main thread with a greatly
// reduced risk of blocking due to I/O. The risk isn't eliminated completely
// because the system could always release the memory when under pressure
// but this can happen to any block of memory (i.e. swapped out).
void Cache();
protected:
// PersistentMemoryAllocator:
void FlushPartial(size_t length, bool sync) override;
private:
std::unique_ptr<MemoryMappedFile> mapped_file_;
DISALLOW_COPY_AND_ASSIGN(FilePersistentMemoryAllocator);
};
#endif // !defined(OS_NACL)
// An allocation that is defined but not executed until required at a later
// time. This allows for potential users of an allocation to be decoupled
// from the logic that defines it. In addition, there can be multiple users
// of the same allocation or any region thereof that are guaranteed to always
// use the same space. It's okay to copy/move these objects.
//
// This is a top-level class instead of an inner class of the PMA so that it
// can be forward-declared in other header files without the need to include
// the full contents of this file.
class BASE_EXPORT DelayedPersistentAllocation {
public:
using Reference = PersistentMemoryAllocator::Reference;
// Creates a delayed allocation using the specified |allocator|. When
// needed, the memory will be allocated using the specified |type| and
// |size|. If |offset| is given, the returned pointer will be at that
// offset into the segment; this allows combining allocations into a
// single persistent segment to reduce overhead and means an "all or
// nothing" request. Note that |size| is always the total memory size
// and |offset| is just indicating the start of a block within it. If
// |make_iterable| was true, the allocation will made iterable when it
// is created; already existing allocations are not changed.
//
// Once allocated, a reference to the segment will be stored at |ref|.
// This shared location must be initialized to zero (0); it is checked
// with every Get() request to see if the allocation has already been
// done. If reading |ref| outside of this object, be sure to do an
// "acquire" load. Don't write to it -- leave that to this object.
//
// For convenience, methods taking both Atomic32 and std::atomic<Reference>
// are defined.
DelayedPersistentAllocation(PersistentMemoryAllocator* allocator,
subtle::Atomic32* ref,
uint32_t type,
size_t size,
bool make_iterable);
DelayedPersistentAllocation(PersistentMemoryAllocator* allocator,
subtle::Atomic32* ref,
uint32_t type,
size_t size,
size_t offset,
bool make_iterable);
DelayedPersistentAllocation(PersistentMemoryAllocator* allocator,
std::atomic<Reference>* ref,
uint32_t type,
size_t size,
bool make_iterable);
DelayedPersistentAllocation(PersistentMemoryAllocator* allocator,
std::atomic<Reference>* ref,
uint32_t type,
size_t size,
size_t offset,
bool make_iterable);
~DelayedPersistentAllocation();
// Gets a pointer to the defined allocation. This will realize the request
// and update the reference provided during construction. The memory will
// be zeroed the first time it is returned, after that it is shared with
// all other Get() requests and so shows any changes made to it elsewhere.
//
// If the allocation fails for any reason, null will be returned. This works
// even on "const" objects because the allocation is already defined, just
// delayed.
void* Get() const;
// Gets the internal reference value. If this returns a non-zero value then
// a subsequent call to Get() will do nothing but convert that reference into
// a memory location -- useful for accessing an existing allocation without
// creating one unnecessarily.
Reference reference() const {
return reference_->load(std::memory_order_relaxed);
}
private:
// The underlying object that does the actual allocation of memory. Its
// lifetime must exceed that of all DelayedPersistentAllocation objects
// that use it.
PersistentMemoryAllocator* const allocator_;
// The desired type and size of the allocated segment plus the offset
// within it for the defined request.
const uint32_t type_;
const uint32_t size_;
const uint32_t offset_;
// Flag indicating if allocation should be made iterable when done.
const bool make_iterable_;
// The location at which a reference to the allocated segment is to be
// stored once the allocation is complete. If multiple delayed allocations
// share the same pointer then an allocation on one will amount to an
// allocation for all.
volatile std::atomic<Reference>* const reference_;
// No DISALLOW_COPY_AND_ASSIGN as it's okay to copy/move these objects.
};
} // namespace base
#endif // BASE_METRICS_PERSISTENT_MEMORY_ALLOCATOR_H_

View file

@ -0,0 +1,304 @@
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/metrics/persistent_sample_map.h"
#include "base/logging.h"
#include "base/metrics/histogram_macros.h"
#include "base/metrics/persistent_histogram_allocator.h"
#include "base/numerics/safe_conversions.h"
#include "base/stl_util.h"
namespace base {
typedef HistogramBase::Count Count;
typedef HistogramBase::Sample Sample;
namespace {
// An iterator for going through a PersistentSampleMap. The logic here is
// identical to that of SampleMapIterator but with different data structures.
// Changes here likely need to be duplicated there.
class PersistentSampleMapIterator : public SampleCountIterator {
public:
typedef std::map<HistogramBase::Sample, HistogramBase::Count*>
SampleToCountMap;
explicit PersistentSampleMapIterator(const SampleToCountMap& sample_counts);
~PersistentSampleMapIterator() override;
// SampleCountIterator:
bool Done() const override;
void Next() override;
void Get(HistogramBase::Sample* min,
int64_t* max,
HistogramBase::Count* count) const override;
private:
void SkipEmptyBuckets();
SampleToCountMap::const_iterator iter_;
const SampleToCountMap::const_iterator end_;
};
PersistentSampleMapIterator::PersistentSampleMapIterator(
const SampleToCountMap& sample_counts)
: iter_(sample_counts.begin()),
end_(sample_counts.end()) {
SkipEmptyBuckets();
}
PersistentSampleMapIterator::~PersistentSampleMapIterator() = default;
bool PersistentSampleMapIterator::Done() const {
return iter_ == end_;
}
void PersistentSampleMapIterator::Next() {
DCHECK(!Done());
++iter_;
SkipEmptyBuckets();
}
void PersistentSampleMapIterator::Get(Sample* min,
int64_t* max,
Count* count) const {
DCHECK(!Done());
if (min)
*min = iter_->first;
if (max)
*max = strict_cast<int64_t>(iter_->first) + 1;
if (count)
*count = *iter_->second;
}
void PersistentSampleMapIterator::SkipEmptyBuckets() {
while (!Done() && *iter_->second == 0) {
++iter_;
}
}
// This structure holds an entry for a PersistentSampleMap within a persistent
// memory allocator. The "id" must be unique across all maps held by an
// allocator or they will get attached to the wrong sample map.
struct SampleRecord {
// SHA1(SampleRecord): Increment this if structure changes!
static constexpr uint32_t kPersistentTypeId = 0x8FE6A69F + 1;
// Expected size for 32/64-bit check.
static constexpr size_t kExpectedInstanceSize = 16;
uint64_t id; // Unique identifier of owner.
Sample value; // The value for which this record holds a count.
Count count; // The count associated with the above value.
};
} // namespace
PersistentSampleMap::PersistentSampleMap(
uint64_t id,
PersistentHistogramAllocator* allocator,
Metadata* meta)
: HistogramSamples(id, meta), allocator_(allocator) {}
PersistentSampleMap::~PersistentSampleMap() {
if (records_)
records_->Release(this);
}
void PersistentSampleMap::Accumulate(Sample value, Count count) {
#if 0 // TODO(bcwhite) Re-enable efficient version after crbug.com/682680.
*GetOrCreateSampleCountStorage(value) += count;
#else
Count* local_count_ptr = GetOrCreateSampleCountStorage(value);
if (count < 0) {
if (*local_count_ptr < -count)
RecordNegativeSample(SAMPLES_ACCUMULATE_WENT_NEGATIVE, -count);
else
RecordNegativeSample(SAMPLES_ACCUMULATE_NEGATIVE_COUNT, -count);
*local_count_ptr += count;
} else {
Sample old_value = *local_count_ptr;
Sample new_value = old_value + count;
*local_count_ptr = new_value;
if ((new_value >= 0) != (old_value >= 0))
RecordNegativeSample(SAMPLES_ACCUMULATE_OVERFLOW, count);
}
#endif
IncreaseSumAndCount(strict_cast<int64_t>(count) * value, count);
}
Count PersistentSampleMap::GetCount(Sample value) const {
// Have to override "const" to make sure all samples have been loaded before
// being able to know what value to return.
Count* count_pointer =
const_cast<PersistentSampleMap*>(this)->GetSampleCountStorage(value);
return count_pointer ? *count_pointer : 0;
}
Count PersistentSampleMap::TotalCount() const {
// Have to override "const" in order to make sure all samples have been
// loaded before trying to iterate over the map.
const_cast<PersistentSampleMap*>(this)->ImportSamples(-1, true);
Count count = 0;
for (const auto& entry : sample_counts_) {
count += *entry.second;
}
return count;
}
std::unique_ptr<SampleCountIterator> PersistentSampleMap::Iterator() const {
// Have to override "const" in order to make sure all samples have been
// loaded before trying to iterate over the map.
const_cast<PersistentSampleMap*>(this)->ImportSamples(-1, true);
return std::make_unique<PersistentSampleMapIterator>(sample_counts_);
}
// static
PersistentMemoryAllocator::Reference
PersistentSampleMap::GetNextPersistentRecord(
PersistentMemoryAllocator::Iterator& iterator,
uint64_t* sample_map_id) {
const SampleRecord* record = iterator.GetNextOfObject<SampleRecord>();
if (!record)
return 0;
*sample_map_id = record->id;
return iterator.GetAsReference(record);
}
// static
PersistentMemoryAllocator::Reference
PersistentSampleMap::CreatePersistentRecord(
PersistentMemoryAllocator* allocator,
uint64_t sample_map_id,
Sample value) {
SampleRecord* record = allocator->New<SampleRecord>();
if (!record) {
NOTREACHED() << "full=" << allocator->IsFull()
<< ", corrupt=" << allocator->IsCorrupt();
return 0;
}
record->id = sample_map_id;
record->value = value;
record->count = 0;
PersistentMemoryAllocator::Reference ref = allocator->GetAsReference(record);
allocator->MakeIterable(ref);
return ref;
}
bool PersistentSampleMap::AddSubtractImpl(SampleCountIterator* iter,
Operator op) {
Sample min;
int64_t max;
Count count;
for (; !iter->Done(); iter->Next()) {
iter->Get(&min, &max, &count);
if (count == 0)
continue;
if (strict_cast<int64_t>(min) + 1 != max)
return false; // SparseHistogram only supports bucket with size 1.
*GetOrCreateSampleCountStorage(min) +=
(op == HistogramSamples::ADD) ? count : -count;
}
return true;
}
Count* PersistentSampleMap::GetSampleCountStorage(Sample value) {
// If |value| is already in the map, just return that.
auto it = sample_counts_.find(value);
if (it != sample_counts_.end())
return it->second;
// Import any new samples from persistent memory looking for the value.
return ImportSamples(value, false);
}
Count* PersistentSampleMap::GetOrCreateSampleCountStorage(Sample value) {
// Get any existing count storage.
Count* count_pointer = GetSampleCountStorage(value);
if (count_pointer)
return count_pointer;
// Create a new record in persistent memory for the value. |records_| will
// have been initialized by the GetSampleCountStorage() call above.
DCHECK(records_);
PersistentMemoryAllocator::Reference ref = records_->CreateNew(value);
if (!ref) {
// If a new record could not be created then the underlying allocator is
// full or corrupt. Instead, allocate the counter from the heap. This
// sample will not be persistent, will not be shared, and will leak...
// but it's better than crashing.
count_pointer = new Count(0);
sample_counts_[value] = count_pointer;
return count_pointer;
}
// A race condition between two independent processes (i.e. two independent
// histogram objects sharing the same sample data) could cause two of the
// above records to be created. The allocator, however, forces a strict
// ordering on iterable objects so use the import method to actually add the
// just-created record. This ensures that all PersistentSampleMap objects
// will always use the same record, whichever was first made iterable.
// Thread-safety within a process where multiple threads use the same
// histogram object is delegated to the controlling histogram object which,
// for sparse histograms, is a lock object.
count_pointer = ImportSamples(value, false);
DCHECK(count_pointer);
return count_pointer;
}
PersistentSampleMapRecords* PersistentSampleMap::GetRecords() {
// The |records_| pointer is lazily fetched from the |allocator_| only on
// first use. Sometimes duplicate histograms are created by race conditions
// and if both were to grab the records object, there would be a conflict.
// Use of a histogram, and thus a call to this method, won't occur until
// after the histogram has been de-dup'd.
if (!records_)
records_ = allocator_->UseSampleMapRecords(id(), this);
return records_;
}
Count* PersistentSampleMap::ImportSamples(Sample until_value,
bool import_everything) {
Count* found_count = nullptr;
PersistentMemoryAllocator::Reference ref;
PersistentSampleMapRecords* records = GetRecords();
while ((ref = records->GetNext()) != 0) {
SampleRecord* record = records->GetAsObject<SampleRecord>(ref);
if (!record)
continue;
DCHECK_EQ(id(), record->id);
// Check if the record's value is already known.
if (!Contains(sample_counts_, record->value)) {
// No: Add it to map of known values.
sample_counts_[record->value] = &record->count;
} else {
// Yes: Ignore it; it's a duplicate caused by a race condition -- see
// code & comment in GetOrCreateSampleCountStorage() for details.
// Check that nothing ever operated on the duplicate record.
DCHECK_EQ(0, record->count);
}
// Check if it's the value being searched for and, if so, keep a pointer
// to return later. Stop here unless everything is being imported.
// Because race conditions can cause multiple records for a single value,
// be sure to return the first one found.
if (record->value == until_value) {
if (!found_count)
found_count = &record->count;
if (!import_everything)
break;
}
}
return found_count;
}
} // namespace base

View file

@ -0,0 +1,109 @@
// Copyright 2016 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// PersistentSampleMap implements HistogramSamples interface. It is used
// by the SparseHistogram class to store samples in persistent memory which
// allows it to be shared between processes or live across restarts.
#ifndef BASE_METRICS_PERSISTENT_SAMPLE_MAP_H_
#define BASE_METRICS_PERSISTENT_SAMPLE_MAP_H_
#include <stdint.h>
#include <map>
#include <memory>
#include "base/compiler_specific.h"
#include "base/macros.h"
#include "base/metrics/histogram_base.h"
#include "base/metrics/histogram_samples.h"
#include "base/metrics/persistent_memory_allocator.h"
namespace base {
class PersistentHistogramAllocator;
class PersistentSampleMapRecords;
// The logic here is similar to that of SampleMap but with different data
// structures. Changes here likely need to be duplicated there.
class BASE_EXPORT PersistentSampleMap : public HistogramSamples {
public:
// Constructs a persistent sample map using a PersistentHistogramAllocator
// as the data source for persistent records.
PersistentSampleMap(uint64_t id,
PersistentHistogramAllocator* allocator,
Metadata* meta);
~PersistentSampleMap() override;
// HistogramSamples:
void Accumulate(HistogramBase::Sample value,
HistogramBase::Count count) override;
HistogramBase::Count GetCount(HistogramBase::Sample value) const override;
HistogramBase::Count TotalCount() const override;
std::unique_ptr<SampleCountIterator> Iterator() const override;
// Uses a persistent-memory |iterator| to locate and return information about
// the next record holding information for a PersistentSampleMap. The record
// could be for any Map so return the |sample_map_id| as well.
static PersistentMemoryAllocator::Reference GetNextPersistentRecord(
PersistentMemoryAllocator::Iterator& iterator,
uint64_t* sample_map_id);
// Creates a new record in an |allocator| storing count information for a
// specific sample |value| of a histogram with the given |sample_map_id|.
static PersistentMemoryAllocator::Reference CreatePersistentRecord(
PersistentMemoryAllocator* allocator,
uint64_t sample_map_id,
HistogramBase::Sample value);
protected:
// Performs arithemetic. |op| is ADD or SUBTRACT.
bool AddSubtractImpl(SampleCountIterator* iter, Operator op) override;
// Gets a pointer to a "count" corresponding to a given |value|. Returns NULL
// if sample does not exist.
HistogramBase::Count* GetSampleCountStorage(HistogramBase::Sample value);
// Gets a pointer to a "count" corresponding to a given |value|, creating
// the sample (initialized to zero) if it does not already exists.
HistogramBase::Count* GetOrCreateSampleCountStorage(
HistogramBase::Sample value);
private:
// Gets the object that manages persistent records. This returns the
// |records_| member after first initializing it if necessary.
PersistentSampleMapRecords* GetRecords();
// Imports samples from persistent memory by iterating over all sample
// records found therein, adding them to the sample_counts_ map. If a
// count for the sample |until_value| is found, stop the import and return
// a pointer to that counter. If that value is not found, null will be
// returned after all currently available samples have been loaded. Pass
// true for |import_everything| to force the importing of all available
// samples even if a match is found.
HistogramBase::Count* ImportSamples(HistogramBase::Sample until_value,
bool import_everything);
// All created/loaded sample values and their associated counts. The storage
// for the actual Count numbers is owned by the |records_| object and its
// underlying allocator.
std::map<HistogramBase::Sample, HistogramBase::Count*> sample_counts_;
// The allocator that manages histograms inside persistent memory. This is
// owned externally and is expected to live beyond the life of this object.
PersistentHistogramAllocator* allocator_;
// The object that manages sample records inside persistent memory. This is
// owned by the |allocator_| object (above) and so, like it, is expected to
// live beyond the life of this object. This value is lazily-initialized on
// first use via the GetRecords() accessor method.
PersistentSampleMapRecords* records_ = nullptr;
DISALLOW_COPY_AND_ASSIGN(PersistentSampleMap);
};
} // namespace base
#endif // BASE_METRICS_PERSISTENT_SAMPLE_MAP_H_

View file

@ -0,0 +1,27 @@
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_METRICS_RECORD_HISTOGRAM_CHECKER_H_
#define BASE_METRICS_RECORD_HISTOGRAM_CHECKER_H_
#include <stdint.h>
#include "base/base_export.h"
namespace base {
// RecordHistogramChecker provides an interface for checking whether
// the given histogram should be recorded.
class BASE_EXPORT RecordHistogramChecker {
public:
virtual ~RecordHistogramChecker() = default;
// Returns true iff the given histogram should be recorded.
// This method may be called on any thread, so it should not mutate any state.
virtual bool ShouldRecord(uint64_t histogram_hash) const = 0;
};
} // namespace base
#endif // BASE_METRICS_RECORD_HISTOGRAM_CHECKER_H_

View file

@ -0,0 +1,125 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/metrics/sample_map.h"
#include "base/logging.h"
#include "base/numerics/safe_conversions.h"
#include "base/stl_util.h"
namespace base {
typedef HistogramBase::Count Count;
typedef HistogramBase::Sample Sample;
namespace {
// An iterator for going through a SampleMap. The logic here is identical
// to that of PersistentSampleMapIterator but with different data structures.
// Changes here likely need to be duplicated there.
class SampleMapIterator : public SampleCountIterator {
public:
typedef std::map<HistogramBase::Sample, HistogramBase::Count>
SampleToCountMap;
explicit SampleMapIterator(const SampleToCountMap& sample_counts);
~SampleMapIterator() override;
// SampleCountIterator:
bool Done() const override;
void Next() override;
void Get(HistogramBase::Sample* min,
int64_t* max,
HistogramBase::Count* count) const override;
private:
void SkipEmptyBuckets();
SampleToCountMap::const_iterator iter_;
const SampleToCountMap::const_iterator end_;
};
SampleMapIterator::SampleMapIterator(const SampleToCountMap& sample_counts)
: iter_(sample_counts.begin()),
end_(sample_counts.end()) {
SkipEmptyBuckets();
}
SampleMapIterator::~SampleMapIterator() = default;
bool SampleMapIterator::Done() const {
return iter_ == end_;
}
void SampleMapIterator::Next() {
DCHECK(!Done());
++iter_;
SkipEmptyBuckets();
}
void SampleMapIterator::Get(Sample* min, int64_t* max, Count* count) const {
DCHECK(!Done());
if (min)
*min = iter_->first;
if (max)
*max = strict_cast<int64_t>(iter_->first) + 1;
if (count)
*count = iter_->second;
}
void SampleMapIterator::SkipEmptyBuckets() {
while (!Done() && iter_->second == 0) {
++iter_;
}
}
} // namespace
SampleMap::SampleMap() : SampleMap(0) {}
SampleMap::SampleMap(uint64_t id) : HistogramSamples(id, new LocalMetadata()) {}
SampleMap::~SampleMap() {
delete static_cast<LocalMetadata*>(meta());
}
void SampleMap::Accumulate(Sample value, Count count) {
sample_counts_[value] += count;
IncreaseSumAndCount(strict_cast<int64_t>(count) * value, count);
}
Count SampleMap::GetCount(Sample value) const {
auto it = sample_counts_.find(value);
if (it == sample_counts_.end())
return 0;
return it->second;
}
Count SampleMap::TotalCount() const {
Count count = 0;
for (const auto& entry : sample_counts_) {
count += entry.second;
}
return count;
}
std::unique_ptr<SampleCountIterator> SampleMap::Iterator() const {
return std::make_unique<SampleMapIterator>(sample_counts_);
}
bool SampleMap::AddSubtractImpl(SampleCountIterator* iter, Operator op) {
Sample min;
int64_t max;
Count count;
for (; !iter->Done(); iter->Next()) {
iter->Get(&min, &max, &count);
if (strict_cast<int64_t>(min) + 1 != max)
return false; // SparseHistogram only supports bucket with size 1.
sample_counts_[min] += (op == HistogramSamples::ADD) ? count : -count;
}
return true;
}
} // namespace base

View file

@ -0,0 +1,50 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// SampleMap implements HistogramSamples interface. It is used by the
// SparseHistogram class to store samples.
#ifndef BASE_METRICS_SAMPLE_MAP_H_
#define BASE_METRICS_SAMPLE_MAP_H_
#include <stdint.h>
#include <map>
#include <memory>
#include "base/compiler_specific.h"
#include "base/macros.h"
#include "base/metrics/histogram_base.h"
#include "base/metrics/histogram_samples.h"
namespace base {
// The logic here is similar to that of PersistentSampleMap but with different
// data structures. Changes here likely need to be duplicated there.
class BASE_EXPORT SampleMap : public HistogramSamples {
public:
SampleMap();
explicit SampleMap(uint64_t id);
~SampleMap() override;
// HistogramSamples:
void Accumulate(HistogramBase::Sample value,
HistogramBase::Count count) override;
HistogramBase::Count GetCount(HistogramBase::Sample value) const override;
HistogramBase::Count TotalCount() const override;
std::unique_ptr<SampleCountIterator> Iterator() const override;
protected:
// Performs arithemetic. |op| is ADD or SUBTRACT.
bool AddSubtractImpl(SampleCountIterator* iter, Operator op) override;
private:
std::map<HistogramBase::Sample, HistogramBase::Count> sample_counts_;
DISALLOW_COPY_AND_ASSIGN(SampleMap);
};
} // namespace base
#endif // BASE_METRICS_SAMPLE_MAP_H_

View file

@ -0,0 +1,429 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/metrics/sample_vector.h"
#include "base/lazy_instance.h"
#include "base/logging.h"
#include "base/memory/ptr_util.h"
#include "base/metrics/persistent_memory_allocator.h"
#include "base/numerics/safe_conversions.h"
#include "base/synchronization/lock.h"
#include "base/threading/platform_thread.h"
// This SampleVector makes use of the single-sample embedded in the base
// HistogramSamples class. If the count is non-zero then there is guaranteed
// (within the bounds of "eventual consistency") to be no allocated external
// storage. Once the full counts storage is allocated, the single-sample must
// be extracted and disabled.
namespace base {
typedef HistogramBase::Count Count;
typedef HistogramBase::Sample Sample;
SampleVectorBase::SampleVectorBase(uint64_t id,
Metadata* meta,
const BucketRanges* bucket_ranges)
: HistogramSamples(id, meta), bucket_ranges_(bucket_ranges) {
CHECK_GE(bucket_ranges_->bucket_count(), 1u);
}
SampleVectorBase::~SampleVectorBase() = default;
void SampleVectorBase::Accumulate(Sample value, Count count) {
const size_t bucket_index = GetBucketIndex(value);
// Handle the single-sample case.
if (!counts()) {
// Try to accumulate the parameters into the single-count entry.
if (AccumulateSingleSample(value, count, bucket_index)) {
// A race condition could lead to a new single-sample being accumulated
// above just after another thread executed the MountCountsStorage below.
// Since it is mounted, it could be mounted elsewhere and have values
// written to it. It's not allowed to have both a single-sample and
// entries in the counts array so move the single-sample.
if (counts())
MoveSingleSampleToCounts();
return;
}
// Need real storage to store both what was in the single-sample plus the
// parameter information.
MountCountsStorageAndMoveSingleSample();
}
// Handle the multi-sample case.
Count new_value =
subtle::NoBarrier_AtomicIncrement(&counts()[bucket_index], count);
IncreaseSumAndCount(strict_cast<int64_t>(count) * value, count);
// TODO(bcwhite) Remove after crbug.com/682680.
Count old_value = new_value - count;
if ((new_value >= 0) != (old_value >= 0) && count > 0)
RecordNegativeSample(SAMPLES_ACCUMULATE_OVERFLOW, count);
}
Count SampleVectorBase::GetCount(Sample value) const {
return GetCountAtIndex(GetBucketIndex(value));
}
Count SampleVectorBase::TotalCount() const {
// Handle the single-sample case.
SingleSample sample = single_sample().Load();
if (sample.count != 0)
return sample.count;
// Handle the multi-sample case.
if (counts() || MountExistingCountsStorage()) {
Count count = 0;
size_t size = counts_size();
const HistogramBase::AtomicCount* counts_array = counts();
for (size_t i = 0; i < size; ++i) {
count += subtle::NoBarrier_Load(&counts_array[i]);
}
return count;
}
// And the no-value case.
return 0;
}
Count SampleVectorBase::GetCountAtIndex(size_t bucket_index) const {
DCHECK(bucket_index < counts_size());
// Handle the single-sample case.
SingleSample sample = single_sample().Load();
if (sample.count != 0)
return sample.bucket == bucket_index ? sample.count : 0;
// Handle the multi-sample case.
if (counts() || MountExistingCountsStorage())
return subtle::NoBarrier_Load(&counts()[bucket_index]);
// And the no-value case.
return 0;
}
std::unique_ptr<SampleCountIterator> SampleVectorBase::Iterator() const {
// Handle the single-sample case.
SingleSample sample = single_sample().Load();
if (sample.count != 0) {
return std::make_unique<SingleSampleIterator>(
bucket_ranges_->range(sample.bucket),
bucket_ranges_->range(sample.bucket + 1), sample.count, sample.bucket);
}
// Handle the multi-sample case.
if (counts() || MountExistingCountsStorage()) {
return std::make_unique<SampleVectorIterator>(counts(), counts_size(),
bucket_ranges_);
}
// And the no-value case.
return std::make_unique<SampleVectorIterator>(nullptr, 0, bucket_ranges_);
}
bool SampleVectorBase::AddSubtractImpl(SampleCountIterator* iter,
HistogramSamples::Operator op) {
// Stop now if there's nothing to do.
if (iter->Done())
return true;
// Get the first value and its index.
HistogramBase::Sample min;
int64_t max;
HistogramBase::Count count;
iter->Get(&min, &max, &count);
size_t dest_index = GetBucketIndex(min);
// The destination must be a superset of the source meaning that though the
// incoming ranges will find an exact match, the incoming bucket-index, if
// it exists, may be offset from the destination bucket-index. Calculate
// that offset of the passed iterator; there are are no overflow checks
// because 2's compliment math will work it out in the end.
//
// Because GetBucketIndex() always returns the same true or false result for
// a given iterator object, |index_offset| is either set here and used below,
// or never set and never used. The compiler doesn't know this, though, which
// is why it's necessary to initialize it to something.
size_t index_offset = 0;
size_t iter_index;
if (iter->GetBucketIndex(&iter_index))
index_offset = dest_index - iter_index;
if (dest_index >= counts_size())
return false;
// Post-increment. Information about the current sample is not available
// after this point.
iter->Next();
// Single-value storage is possible if there is no counts storage and the
// retrieved entry is the only one in the iterator.
if (!counts()) {
if (iter->Done()) {
// Don't call AccumulateSingleSample because that updates sum and count
// which was already done by the caller of this method.
if (single_sample().Accumulate(
dest_index, op == HistogramSamples::ADD ? count : -count)) {
// Handle race-condition that mounted counts storage between above and
// here.
if (counts())
MoveSingleSampleToCounts();
return true;
}
}
// The counts storage will be needed to hold the multiple incoming values.
MountCountsStorageAndMoveSingleSample();
}
// Go through the iterator and add the counts into correct bucket.
while (true) {
// Ensure that the sample's min/max match the ranges min/max.
if (min != bucket_ranges_->range(dest_index) ||
max != bucket_ranges_->range(dest_index + 1)) {
NOTREACHED() << "sample=" << min << "," << max
<< "; range=" << bucket_ranges_->range(dest_index) << ","
<< bucket_ranges_->range(dest_index + 1);
return false;
}
// Sample's bucket matches exactly. Adjust count.
subtle::NoBarrier_AtomicIncrement(
&counts()[dest_index], op == HistogramSamples::ADD ? count : -count);
// Advance to the next iterable sample. See comments above for how
// everything works.
if (iter->Done())
return true;
iter->Get(&min, &max, &count);
if (iter->GetBucketIndex(&iter_index)) {
// Destination bucket is a known offset from the source bucket.
dest_index = iter_index + index_offset;
} else {
// Destination bucket has to be determined anew each time.
dest_index = GetBucketIndex(min);
}
if (dest_index >= counts_size())
return false;
iter->Next();
}
}
// Use simple binary search. This is very general, but there are better
// approaches if we knew that the buckets were linearly distributed.
size_t SampleVectorBase::GetBucketIndex(Sample value) const {
size_t bucket_count = bucket_ranges_->bucket_count();
CHECK_GE(bucket_count, 1u);
CHECK_GE(value, bucket_ranges_->range(0));
CHECK_LT(value, bucket_ranges_->range(bucket_count));
size_t under = 0;
size_t over = bucket_count;
size_t mid;
do {
DCHECK_GE(over, under);
mid = under + (over - under)/2;
if (mid == under)
break;
if (bucket_ranges_->range(mid) <= value)
under = mid;
else
over = mid;
} while (true);
DCHECK_LE(bucket_ranges_->range(mid), value);
CHECK_GT(bucket_ranges_->range(mid + 1), value);
return mid;
}
void SampleVectorBase::MoveSingleSampleToCounts() {
DCHECK(counts());
// Disable the single-sample since there is now counts storage for the data.
SingleSample sample = single_sample().Extract(/*disable=*/true);
// Stop here if there is no "count" as trying to find the bucket index of
// an invalid (including zero) "value" will crash.
if (sample.count == 0)
return;
// Move the value into storage. Sum and redundant-count already account
// for this entry so no need to call IncreaseSumAndCount().
subtle::NoBarrier_AtomicIncrement(&counts()[sample.bucket], sample.count);
}
void SampleVectorBase::MountCountsStorageAndMoveSingleSample() {
// There are many SampleVector objects and the lock is needed very
// infrequently (just when advancing from single-sample to multi-sample) so
// define a single, global lock that all can use. This lock only prevents
// concurrent entry into the code below; access and updates to |counts_|
// still requires atomic operations.
static LazyInstance<Lock>::Leaky counts_lock = LAZY_INSTANCE_INITIALIZER;
if (subtle::NoBarrier_Load(&counts_) == 0) {
AutoLock lock(counts_lock.Get());
if (subtle::NoBarrier_Load(&counts_) == 0) {
// Create the actual counts storage while the above lock is acquired.
HistogramBase::Count* counts = CreateCountsStorageWhileLocked();
DCHECK(counts);
// Point |counts_| to the newly created storage. This is done while
// locked to prevent possible concurrent calls to CreateCountsStorage
// but, between that call and here, other threads could notice the
// existence of the storage and race with this to set_counts(). That's
// okay because (a) it's atomic and (b) it always writes the same value.
set_counts(counts);
}
}
// Move any single-sample into the newly mounted storage.
MoveSingleSampleToCounts();
}
SampleVector::SampleVector(const BucketRanges* bucket_ranges)
: SampleVector(0, bucket_ranges) {}
SampleVector::SampleVector(uint64_t id, const BucketRanges* bucket_ranges)
: SampleVectorBase(id, new LocalMetadata(), bucket_ranges) {}
SampleVector::~SampleVector() {
delete static_cast<LocalMetadata*>(meta());
}
bool SampleVector::MountExistingCountsStorage() const {
// There is never any existing storage other than what is already in use.
return counts() != nullptr;
}
HistogramBase::AtomicCount* SampleVector::CreateCountsStorageWhileLocked() {
local_counts_.resize(counts_size());
return &local_counts_[0];
}
PersistentSampleVector::PersistentSampleVector(
uint64_t id,
const BucketRanges* bucket_ranges,
Metadata* meta,
const DelayedPersistentAllocation& counts)
: SampleVectorBase(id, meta, bucket_ranges), persistent_counts_(counts) {
// Only mount the full storage if the single-sample has been disabled.
// Otherwise, it is possible for this object instance to start using (empty)
// storage that was created incidentally while another instance continues to
// update to the single sample. This "incidental creation" can happen because
// the memory is a DelayedPersistentAllocation which allows multiple memory
// blocks within it and applies an all-or-nothing approach to the allocation.
// Thus, a request elsewhere for one of the _other_ blocks would make _this_
// block available even though nothing has explicitly requested it.
//
// Note that it's not possible for the ctor to mount existing storage and
// move any single-sample to it because sometimes the persistent memory is
// read-only. Only non-const methods (which assume that memory is read/write)
// can do that.
if (single_sample().IsDisabled()) {
bool success = MountExistingCountsStorage();
DCHECK(success);
}
}
PersistentSampleVector::~PersistentSampleVector() = default;
bool PersistentSampleVector::MountExistingCountsStorage() const {
// There is no early exit if counts is not yet mounted because, given that
// this is a virtual function, it's more efficient to do that at the call-
// site. There is no danger, however, should this get called anyway (perhaps
// because of a race condition) because at worst the |counts_| value would
// be over-written (in an atomic manner) with the exact same address.
if (!persistent_counts_.reference())
return false; // Nothing to mount.
// Mount the counts array in position.
set_counts(
static_cast<HistogramBase::AtomicCount*>(persistent_counts_.Get()));
// The above shouldn't fail but can if the data is corrupt or incomplete.
return counts() != nullptr;
}
HistogramBase::AtomicCount*
PersistentSampleVector::CreateCountsStorageWhileLocked() {
void* mem = persistent_counts_.Get();
if (!mem) {
// The above shouldn't fail but can if Bad Things(tm) are occurring in the
// persistent allocator. Crashing isn't a good option so instead just
// allocate something from the heap and return that. There will be no
// sharing or persistence but worse things are already happening.
return new HistogramBase::AtomicCount[counts_size()];
}
return static_cast<HistogramBase::AtomicCount*>(mem);
}
SampleVectorIterator::SampleVectorIterator(
const std::vector<HistogramBase::AtomicCount>* counts,
const BucketRanges* bucket_ranges)
: counts_(&(*counts)[0]),
counts_size_(counts->size()),
bucket_ranges_(bucket_ranges),
index_(0) {
DCHECK_GE(bucket_ranges_->bucket_count(), counts_size_);
SkipEmptyBuckets();
}
SampleVectorIterator::SampleVectorIterator(
const HistogramBase::AtomicCount* counts,
size_t counts_size,
const BucketRanges* bucket_ranges)
: counts_(counts),
counts_size_(counts_size),
bucket_ranges_(bucket_ranges),
index_(0) {
DCHECK_GE(bucket_ranges_->bucket_count(), counts_size_);
SkipEmptyBuckets();
}
SampleVectorIterator::~SampleVectorIterator() = default;
bool SampleVectorIterator::Done() const {
return index_ >= counts_size_;
}
void SampleVectorIterator::Next() {
DCHECK(!Done());
index_++;
SkipEmptyBuckets();
}
void SampleVectorIterator::Get(HistogramBase::Sample* min,
int64_t* max,
HistogramBase::Count* count) const {
DCHECK(!Done());
if (min != nullptr)
*min = bucket_ranges_->range(index_);
if (max != nullptr)
*max = strict_cast<int64_t>(bucket_ranges_->range(index_ + 1));
if (count != nullptr)
*count = subtle::NoBarrier_Load(&counts_[index_]);
}
bool SampleVectorIterator::GetBucketIndex(size_t* index) const {
DCHECK(!Done());
if (index != nullptr)
*index = index_;
return true;
}
void SampleVectorIterator::SkipEmptyBuckets() {
if (Done())
return;
while (index_ < counts_size_) {
if (subtle::NoBarrier_Load(&counts_[index_]) != 0)
return;
index_++;
}
}
} // namespace base

View file

@ -0,0 +1,185 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// SampleVector implements HistogramSamples interface. It is used by all
// Histogram based classes to store samples.
#ifndef BASE_METRICS_SAMPLE_VECTOR_H_
#define BASE_METRICS_SAMPLE_VECTOR_H_
#include <stddef.h>
#include <stdint.h>
#include <memory>
#include <vector>
#include "base/atomicops.h"
#include "base/compiler_specific.h"
#include "base/gtest_prod_util.h"
#include "base/macros.h"
#include "base/metrics/bucket_ranges.h"
#include "base/metrics/histogram_base.h"
#include "base/metrics/histogram_samples.h"
#include "base/metrics/persistent_memory_allocator.h"
namespace base {
class BucketRanges;
class BASE_EXPORT SampleVectorBase : public HistogramSamples {
public:
SampleVectorBase(uint64_t id,
Metadata* meta,
const BucketRanges* bucket_ranges);
~SampleVectorBase() override;
// HistogramSamples:
void Accumulate(HistogramBase::Sample value,
HistogramBase::Count count) override;
HistogramBase::Count GetCount(HistogramBase::Sample value) const override;
HistogramBase::Count TotalCount() const override;
std::unique_ptr<SampleCountIterator> Iterator() const override;
// Get count of a specific bucket.
HistogramBase::Count GetCountAtIndex(size_t bucket_index) const;
// Access the bucket ranges held externally.
const BucketRanges* bucket_ranges() const { return bucket_ranges_; }
protected:
bool AddSubtractImpl(
SampleCountIterator* iter,
HistogramSamples::Operator op) override; // |op| is ADD or SUBTRACT.
virtual size_t GetBucketIndex(HistogramBase::Sample value) const;
// Moves the single-sample value to a mounted "counts" array.
void MoveSingleSampleToCounts();
// Mounts (creating if necessary) an array of "counts" for multi-value
// storage.
void MountCountsStorageAndMoveSingleSample();
// Mounts "counts" storage that already exists. This does not attempt to move
// any single-sample information to that storage as that would violate the
// "const" restriction that is often used to indicate read-only memory.
virtual bool MountExistingCountsStorage() const = 0;
// Creates "counts" storage and returns a pointer to it. Ownership of the
// array remains with the called method but will never change. This must be
// called while some sort of lock is held to prevent reentry.
virtual HistogramBase::Count* CreateCountsStorageWhileLocked() = 0;
HistogramBase::AtomicCount* counts() {
return reinterpret_cast<HistogramBase::AtomicCount*>(
subtle::Acquire_Load(&counts_));
}
const HistogramBase::AtomicCount* counts() const {
return reinterpret_cast<HistogramBase::AtomicCount*>(
subtle::Acquire_Load(&counts_));
}
void set_counts(const HistogramBase::AtomicCount* counts) const {
subtle::Release_Store(&counts_, reinterpret_cast<uintptr_t>(counts));
}
size_t counts_size() const { return bucket_ranges_->bucket_count(); }
private:
friend class SampleVectorTest;
FRIEND_TEST_ALL_PREFIXES(HistogramTest, CorruptSampleCounts);
FRIEND_TEST_ALL_PREFIXES(SharedHistogramTest, CorruptSampleCounts);
// |counts_| is actually a pointer to a HistogramBase::AtomicCount array but
// is held as an AtomicWord for concurrency reasons. When combined with the
// single_sample held in the metadata, there are four possible states:
// 1) single_sample == zero, counts_ == null
// 2) single_sample != zero, counts_ == null
// 3) single_sample != zero, counts_ != null BUT IS EMPTY
// 4) single_sample == zero, counts_ != null and may have data
// Once |counts_| is set, it can never revert and any existing single-sample
// must be moved to this storage. It is mutable because changing it doesn't
// change the (const) data but must adapt if a non-const object causes the
// storage to be allocated and updated.
mutable subtle::AtomicWord counts_ = 0;
// Shares the same BucketRanges with Histogram object.
const BucketRanges* const bucket_ranges_;
DISALLOW_COPY_AND_ASSIGN(SampleVectorBase);
};
// A sample vector that uses local memory for the counts array.
class BASE_EXPORT SampleVector : public SampleVectorBase {
public:
explicit SampleVector(const BucketRanges* bucket_ranges);
SampleVector(uint64_t id, const BucketRanges* bucket_ranges);
~SampleVector() override;
private:
// SampleVectorBase:
bool MountExistingCountsStorage() const override;
HistogramBase::Count* CreateCountsStorageWhileLocked() override;
// Simple local storage for counts.
mutable std::vector<HistogramBase::AtomicCount> local_counts_;
DISALLOW_COPY_AND_ASSIGN(SampleVector);
};
// A sample vector that uses persistent memory for the counts array.
class BASE_EXPORT PersistentSampleVector : public SampleVectorBase {
public:
PersistentSampleVector(uint64_t id,
const BucketRanges* bucket_ranges,
Metadata* meta,
const DelayedPersistentAllocation& counts);
~PersistentSampleVector() override;
private:
// SampleVectorBase:
bool MountExistingCountsStorage() const override;
HistogramBase::Count* CreateCountsStorageWhileLocked() override;
// Persistent storage for counts.
DelayedPersistentAllocation persistent_counts_;
DISALLOW_COPY_AND_ASSIGN(PersistentSampleVector);
};
// An iterator for sample vectors. This could be defined privately in the .cc
// file but is here for easy testing.
class BASE_EXPORT SampleVectorIterator : public SampleCountIterator {
public:
SampleVectorIterator(const std::vector<HistogramBase::AtomicCount>* counts,
const BucketRanges* bucket_ranges);
SampleVectorIterator(const HistogramBase::AtomicCount* counts,
size_t counts_size,
const BucketRanges* bucket_ranges);
~SampleVectorIterator() override;
// SampleCountIterator implementation:
bool Done() const override;
void Next() override;
void Get(HistogramBase::Sample* min,
int64_t* max,
HistogramBase::Count* count) const override;
// SampleVector uses predefined buckets, so iterator can return bucket index.
bool GetBucketIndex(size_t* index) const override;
private:
void SkipEmptyBuckets();
const HistogramBase::AtomicCount* counts_;
size_t counts_size_;
const BucketRanges* bucket_ranges_;
size_t index_;
};
} // namespace base
#endif // BASE_METRICS_SAMPLE_VECTOR_H_

View file

@ -0,0 +1,77 @@
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/metrics/single_sample_metrics.h"
#include "base/memory/ptr_util.h"
#include "base/metrics/histogram.h"
namespace base {
static SingleSampleMetricsFactory* g_factory = nullptr;
// static
SingleSampleMetricsFactory* SingleSampleMetricsFactory::Get() {
if (!g_factory)
g_factory = new DefaultSingleSampleMetricsFactory();
return g_factory;
}
// static
void SingleSampleMetricsFactory::SetFactory(
std::unique_ptr<SingleSampleMetricsFactory> factory) {
DCHECK(!g_factory);
g_factory = factory.release();
}
// static
void SingleSampleMetricsFactory::DeleteFactoryForTesting() {
DCHECK(g_factory);
delete g_factory;
g_factory = nullptr;
}
std::unique_ptr<SingleSampleMetric>
DefaultSingleSampleMetricsFactory::CreateCustomCountsMetric(
const std::string& histogram_name,
HistogramBase::Sample min,
HistogramBase::Sample max,
uint32_t bucket_count) {
return std::make_unique<DefaultSingleSampleMetric>(
histogram_name, min, max, bucket_count,
HistogramBase::kUmaTargetedHistogramFlag);
}
DefaultSingleSampleMetric::DefaultSingleSampleMetric(
const std::string& histogram_name,
HistogramBase::Sample min,
HistogramBase::Sample max,
uint32_t bucket_count,
int32_t flags)
: histogram_(Histogram::FactoryGet(histogram_name,
min,
max,
bucket_count,
flags)) {
// Bad construction parameters may lead to |histogram_| being null; DCHECK to
// find accidental errors in production. We must still handle the nullptr in
// destruction though since this construction may come from another untrusted
// process.
DCHECK(histogram_);
}
DefaultSingleSampleMetric::~DefaultSingleSampleMetric() {
// |histogram_| may be nullptr if bad construction parameters are given.
if (sample_ < 0 || !histogram_)
return;
histogram_->Add(sample_);
}
void DefaultSingleSampleMetric::SetSample(HistogramBase::Sample sample) {
DCHECK_GE(sample, 0);
sample_ = sample;
}
} // namespace base

View file

@ -0,0 +1,104 @@
// Copyright 2017 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_METRICS_SINGLE_SAMPLE_METRICS_H_
#define BASE_METRICS_SINGLE_SAMPLE_METRICS_H_
#include <string>
#include "base/base_export.h"
#include "base/macros.h"
#include "base/metrics/histogram_base.h"
namespace base {
// See base/metrics/histograms.h for parameter definitions. Must only be used
// and destroyed from the same thread as construction.
class BASE_EXPORT SingleSampleMetric {
public:
virtual ~SingleSampleMetric() = default;
virtual void SetSample(HistogramBase::Sample sample) = 0;
};
// Factory for creating single sample metrics. A single sample metric only
// reports its sample once at destruction time. The sample may be changed prior
// to destruction using the SetSample() method as many times as desired.
//
// The metric creation methods are safe to call from any thread, however the
// returned class must only be used and destroyed from the same thread as
// construction.
//
// See base/metrics/histogram_macros.h for usage recommendations and
// base/metrics/histogram.h for full parameter definitions.
class BASE_EXPORT SingleSampleMetricsFactory {
public:
virtual ~SingleSampleMetricsFactory() = default;
// Returns the factory provided by SetFactory(), or if no factory has been set
// a default factory will be provided (future calls to SetFactory() will fail
// if the default factory is ever vended).
static SingleSampleMetricsFactory* Get();
static void SetFactory(std::unique_ptr<SingleSampleMetricsFactory> factory);
// The factory normally persists until process shutdown, but in testing we
// should avoid leaking it since it sets a global.
static void DeleteFactoryForTesting();
// The methods below return a single sample metric for counts histograms; see
// method comments for the corresponding histogram macro.
// UMA_HISTOGRAM_CUSTOM_COUNTS()
virtual std::unique_ptr<SingleSampleMetric> CreateCustomCountsMetric(
const std::string& histogram_name,
HistogramBase::Sample min,
HistogramBase::Sample max,
uint32_t bucket_count) = 0;
};
// Default implementation for when no factory has been provided to the process.
// Samples are only recorded within the current process in this case, so samples
// will be lost in the event of sudden process termination.
class BASE_EXPORT DefaultSingleSampleMetricsFactory
: public SingleSampleMetricsFactory {
public:
DefaultSingleSampleMetricsFactory() = default;
~DefaultSingleSampleMetricsFactory() override = default;
// SingleSampleMetricsFactory:
std::unique_ptr<SingleSampleMetric> CreateCustomCountsMetric(
const std::string& histogram_name,
HistogramBase::Sample min,
HistogramBase::Sample max,
uint32_t bucket_count) override;
private:
DISALLOW_COPY_AND_ASSIGN(DefaultSingleSampleMetricsFactory);
};
class BASE_EXPORT DefaultSingleSampleMetric : public SingleSampleMetric {
public:
DefaultSingleSampleMetric(const std::string& histogram_name,
HistogramBase::Sample min,
HistogramBase::Sample max,
uint32_t bucket_count,
int32_t flags);
~DefaultSingleSampleMetric() override;
// SingleSampleMetric:
void SetSample(HistogramBase::Sample sample) override;
private:
HistogramBase* const histogram_;
// The last sample provided to SetSample(). We use -1 as a sentinel value to
// indicate no sample has been set.
HistogramBase::Sample sample_ = -1;
DISALLOW_COPY_AND_ASSIGN(DefaultSingleSampleMetric);
};
} // namespace base
#endif // BASE_METRICS_SINGLE_SAMPLE_METRICS_H_

View file

@ -0,0 +1,303 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/metrics/sparse_histogram.h"
#include <utility>
#include "base/memory/ptr_util.h"
#include "base/metrics/dummy_histogram.h"
#include "base/metrics/metrics_hashes.h"
#include "base/metrics/persistent_histogram_allocator.h"
#include "base/metrics/persistent_sample_map.h"
#include "base/metrics/sample_map.h"
#include "base/metrics/statistics_recorder.h"
#include "base/pickle.h"
#include "base/strings/stringprintf.h"
#include "base/synchronization/lock.h"
namespace {
constexpr char kHtmlNewLine[] = "<br>";
constexpr char kAsciiNewLine[] = "\n";
} // namespace
namespace base {
typedef HistogramBase::Count Count;
typedef HistogramBase::Sample Sample;
// static
HistogramBase* SparseHistogram::FactoryGet(const std::string& name,
int32_t flags) {
HistogramBase* histogram = StatisticsRecorder::FindHistogram(name);
if (!histogram) {
// TODO(gayane): |HashMetricName| is called again in Histogram constructor.
// Refactor code to avoid the additional call.
bool should_record =
StatisticsRecorder::ShouldRecordHistogram(HashMetricName(name));
if (!should_record)
return DummyHistogram::GetInstance();
// Try to create the histogram using a "persistent" allocator. As of
// 2016-02-25, the availability of such is controlled by a base::Feature
// that is off by default. If the allocator doesn't exist or if
// allocating from it fails, code below will allocate the histogram from
// the process heap.
PersistentMemoryAllocator::Reference histogram_ref = 0;
std::unique_ptr<HistogramBase> tentative_histogram;
PersistentHistogramAllocator* allocator = GlobalHistogramAllocator::Get();
if (allocator) {
tentative_histogram = allocator->AllocateHistogram(
SPARSE_HISTOGRAM, name, 0, 0, nullptr, flags, &histogram_ref);
}
// Handle the case where no persistent allocator is present or the
// persistent allocation fails (perhaps because it is full).
if (!tentative_histogram) {
DCHECK(!histogram_ref); // Should never have been set.
DCHECK(!allocator); // Shouldn't have failed.
flags &= ~HistogramBase::kIsPersistent;
tentative_histogram.reset(new SparseHistogram(GetPermanentName(name)));
tentative_histogram->SetFlags(flags);
}
// Register this histogram with the StatisticsRecorder. Keep a copy of
// the pointer value to tell later whether the locally created histogram
// was registered or deleted. The type is "void" because it could point
// to released memory after the following line.
const void* tentative_histogram_ptr = tentative_histogram.get();
histogram = StatisticsRecorder::RegisterOrDeleteDuplicate(
tentative_histogram.release());
// Persistent histograms need some follow-up processing.
if (histogram_ref) {
allocator->FinalizeHistogram(histogram_ref,
histogram == tentative_histogram_ptr);
}
}
CHECK_EQ(SPARSE_HISTOGRAM, histogram->GetHistogramType());
return histogram;
}
// static
std::unique_ptr<HistogramBase> SparseHistogram::PersistentCreate(
PersistentHistogramAllocator* allocator,
const char* name,
HistogramSamples::Metadata* meta,
HistogramSamples::Metadata* logged_meta) {
return WrapUnique(
new SparseHistogram(allocator, name, meta, logged_meta));
}
SparseHistogram::~SparseHistogram() = default;
uint64_t SparseHistogram::name_hash() const {
return unlogged_samples_->id();
}
HistogramType SparseHistogram::GetHistogramType() const {
return SPARSE_HISTOGRAM;
}
bool SparseHistogram::HasConstructionArguments(
Sample expected_minimum,
Sample expected_maximum,
uint32_t expected_bucket_count) const {
// SparseHistogram never has min/max/bucket_count limit.
return false;
}
void SparseHistogram::Add(Sample value) {
AddCount(value, 1);
}
void SparseHistogram::AddCount(Sample value, int count) {
if (count <= 0) {
NOTREACHED();
return;
}
{
base::AutoLock auto_lock(lock_);
unlogged_samples_->Accumulate(value, count);
}
if (UNLIKELY(StatisticsRecorder::have_active_callbacks()))
FindAndRunCallback(value);
}
std::unique_ptr<HistogramSamples> SparseHistogram::SnapshotSamples() const {
std::unique_ptr<SampleMap> snapshot(new SampleMap(name_hash()));
base::AutoLock auto_lock(lock_);
snapshot->Add(*unlogged_samples_);
snapshot->Add(*logged_samples_);
return std::move(snapshot);
}
std::unique_ptr<HistogramSamples> SparseHistogram::SnapshotDelta() {
DCHECK(!final_delta_created_);
std::unique_ptr<SampleMap> snapshot(new SampleMap(name_hash()));
base::AutoLock auto_lock(lock_);
snapshot->Add(*unlogged_samples_);
unlogged_samples_->Subtract(*snapshot);
logged_samples_->Add(*snapshot);
return std::move(snapshot);
}
std::unique_ptr<HistogramSamples> SparseHistogram::SnapshotFinalDelta() const {
DCHECK(!final_delta_created_);
final_delta_created_ = true;
std::unique_ptr<SampleMap> snapshot(new SampleMap(name_hash()));
base::AutoLock auto_lock(lock_);
snapshot->Add(*unlogged_samples_);
return std::move(snapshot);
}
void SparseHistogram::AddSamples(const HistogramSamples& samples) {
base::AutoLock auto_lock(lock_);
unlogged_samples_->Add(samples);
}
bool SparseHistogram::AddSamplesFromPickle(PickleIterator* iter) {
base::AutoLock auto_lock(lock_);
return unlogged_samples_->AddFromPickle(iter);
}
void SparseHistogram::WriteHTMLGraph(std::string* output) const {
// Get a local copy of the data so we are consistent.
std::unique_ptr<HistogramSamples> snapshot = SnapshotSamples();
output->append("<PRE>");
output->append("<h4>");
WriteAsciiHeader(*snapshot, output);
output->append("</h4>");
WriteAsciiBody(*snapshot, true, kHtmlNewLine, output);
output->append("</PRE>");
}
void SparseHistogram::WriteAscii(std::string* output) const {
// Get a local copy of the data so we are consistent.
std::unique_ptr<HistogramSamples> snapshot = SnapshotSamples();
WriteAsciiHeader(*snapshot, output);
output->append(kAsciiNewLine);
WriteAsciiBody(*snapshot, true, kAsciiNewLine, output);
}
void SparseHistogram::SerializeInfoImpl(Pickle* pickle) const {
pickle->WriteString(histogram_name());
pickle->WriteInt(flags());
}
SparseHistogram::SparseHistogram(const char* name)
: HistogramBase(name),
unlogged_samples_(new SampleMap(HashMetricName(name))),
logged_samples_(new SampleMap(unlogged_samples_->id())) {}
SparseHistogram::SparseHistogram(PersistentHistogramAllocator* allocator,
const char* name,
HistogramSamples::Metadata* meta,
HistogramSamples::Metadata* logged_meta)
: HistogramBase(name),
// While other histogram types maintain a static vector of values with
// sufficient space for both "active" and "logged" samples, with each
// SampleVector being given the appropriate half, sparse histograms
// have no such initial allocation. Each sample has its own record
// attached to a single PersistentSampleMap by a common 64-bit identifier.
// Since a sparse histogram has two sample maps (active and logged),
// there must be two sets of sample records with diffent IDs. The
// "active" samples use, for convenience purposes, an ID matching
// that of the histogram while the "logged" samples use that number
// plus 1.
unlogged_samples_(
new PersistentSampleMap(HashMetricName(name), allocator, meta)),
logged_samples_(new PersistentSampleMap(unlogged_samples_->id() + 1,
allocator,
logged_meta)) {}
HistogramBase* SparseHistogram::DeserializeInfoImpl(PickleIterator* iter) {
std::string histogram_name;
int flags;
if (!iter->ReadString(&histogram_name) || !iter->ReadInt(&flags)) {
DLOG(ERROR) << "Pickle error decoding Histogram: " << histogram_name;
return nullptr;
}
flags &= ~HistogramBase::kIPCSerializationSourceFlag;
return SparseHistogram::FactoryGet(histogram_name, flags);
}
void SparseHistogram::GetParameters(DictionaryValue* params) const {
// TODO(kaiwang): Implement. (See HistogramBase::WriteJSON.)
}
void SparseHistogram::GetCountAndBucketData(Count* count,
int64_t* sum,
ListValue* buckets) const {
// TODO(kaiwang): Implement. (See HistogramBase::WriteJSON.)
}
void SparseHistogram::WriteAsciiBody(const HistogramSamples& snapshot,
bool graph_it,
const std::string& newline,
std::string* output) const {
Count total_count = snapshot.TotalCount();
double scaled_total_count = total_count / 100.0;
// Determine how wide the largest bucket range is (how many digits to print),
// so that we'll be able to right-align starts for the graphical bars.
// Determine which bucket has the largest sample count so that we can
// normalize the graphical bar-width relative to that sample count.
Count largest_count = 0;
Sample largest_sample = 0;
std::unique_ptr<SampleCountIterator> it = snapshot.Iterator();
while (!it->Done()) {
Sample min;
int64_t max;
Count count;
it->Get(&min, &max, &count);
if (min > largest_sample)
largest_sample = min;
if (count > largest_count)
largest_count = count;
it->Next();
}
size_t print_width = GetSimpleAsciiBucketRange(largest_sample).size() + 1;
// iterate over each item and display them
it = snapshot.Iterator();
while (!it->Done()) {
Sample min;
int64_t max;
Count count;
it->Get(&min, &max, &count);
// value is min, so display it
std::string range = GetSimpleAsciiBucketRange(min);
output->append(range);
for (size_t j = 0; range.size() + j < print_width + 1; ++j)
output->push_back(' ');
if (graph_it)
WriteAsciiBucketGraph(count, largest_count, output);
WriteAsciiBucketValue(count, scaled_total_count, output);
output->append(newline);
it->Next();
}
}
void SparseHistogram::WriteAsciiHeader(const HistogramSamples& snapshot,
std::string* output) const {
StringAppendF(output, "Histogram: %s recorded %d samples", histogram_name(),
snapshot.TotalCount());
if (flags())
StringAppendF(output, " (flags = 0x%x)", flags());
}
} // namespace base

View file

@ -0,0 +1,109 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_METRICS_SPARSE_HISTOGRAM_H_
#define BASE_METRICS_SPARSE_HISTOGRAM_H_
#include <stddef.h>
#include <stdint.h>
#include <map>
#include <memory>
#include <string>
#include "base/base_export.h"
#include "base/macros.h"
#include "base/metrics/histogram_base.h"
#include "base/metrics/histogram_samples.h"
#include "base/synchronization/lock.h"
namespace base {
class HistogramSamples;
class PersistentHistogramAllocator;
class Pickle;
class PickleIterator;
class BASE_EXPORT SparseHistogram : public HistogramBase {
public:
// If there's one with same name, return the existing one. If not, create a
// new one.
static HistogramBase* FactoryGet(const std::string& name, int32_t flags);
// Create a histogram using data in persistent storage. The allocator must
// live longer than the created sparse histogram.
static std::unique_ptr<HistogramBase> PersistentCreate(
PersistentHistogramAllocator* allocator,
const char* name,
HistogramSamples::Metadata* meta,
HistogramSamples::Metadata* logged_meta);
~SparseHistogram() override;
// HistogramBase implementation:
uint64_t name_hash() const override;
HistogramType GetHistogramType() const override;
bool HasConstructionArguments(Sample expected_minimum,
Sample expected_maximum,
uint32_t expected_bucket_count) const override;
void Add(Sample value) override;
void AddCount(Sample value, int count) override;
void AddSamples(const HistogramSamples& samples) override;
bool AddSamplesFromPickle(base::PickleIterator* iter) override;
std::unique_ptr<HistogramSamples> SnapshotSamples() const override;
std::unique_ptr<HistogramSamples> SnapshotDelta() override;
std::unique_ptr<HistogramSamples> SnapshotFinalDelta() const override;
void WriteHTMLGraph(std::string* output) const override;
void WriteAscii(std::string* output) const override;
protected:
// HistogramBase implementation:
void SerializeInfoImpl(base::Pickle* pickle) const override;
private:
// Clients should always use FactoryGet to create SparseHistogram.
explicit SparseHistogram(const char* name);
SparseHistogram(PersistentHistogramAllocator* allocator,
const char* name,
HistogramSamples::Metadata* meta,
HistogramSamples::Metadata* logged_meta);
friend BASE_EXPORT HistogramBase* DeserializeHistogramInfo(
base::PickleIterator* iter);
static HistogramBase* DeserializeInfoImpl(base::PickleIterator* iter);
void GetParameters(DictionaryValue* params) const override;
void GetCountAndBucketData(Count* count,
int64_t* sum,
ListValue* buckets) const override;
// Helpers for emitting Ascii graphic. Each method appends data to output.
void WriteAsciiBody(const HistogramSamples& snapshot,
bool graph_it,
const std::string& newline,
std::string* output) const;
// Write a common header message describing this histogram.
void WriteAsciiHeader(const HistogramSamples& snapshot,
std::string* output) const;
// For constuctor calling.
friend class SparseHistogramTest;
// Protects access to |samples_|.
mutable base::Lock lock_;
// Flag to indicate if PrepareFinalDelta has been previously called.
mutable bool final_delta_created_ = false;
std::unique_ptr<HistogramSamples> unlogged_samples_;
std::unique_ptr<HistogramSamples> logged_samples_;
DISALLOW_COPY_AND_ASSIGN(SparseHistogram);
};
} // namespace base
#endif // BASE_METRICS_SPARSE_HISTOGRAM_H_

View file

@ -0,0 +1,444 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/metrics/statistics_recorder.h"
#include <memory>
#include "base/at_exit.h"
#include "base/debug/leak_annotations.h"
#include "base/json/string_escape.h"
#include "base/logging.h"
#include "base/memory/ptr_util.h"
#include "base/metrics/histogram.h"
#include "base/metrics/histogram_snapshot_manager.h"
#include "base/metrics/metrics_hashes.h"
#include "base/metrics/persistent_histogram_allocator.h"
#include "base/metrics/record_histogram_checker.h"
#include "base/stl_util.h"
#include "base/strings/stringprintf.h"
#include "base/values.h"
namespace base {
namespace {
bool HistogramNameLesser(const base::HistogramBase* a,
const base::HistogramBase* b) {
return strcmp(a->histogram_name(), b->histogram_name()) < 0;
}
} // namespace
// static
LazyInstance<Lock>::Leaky StatisticsRecorder::lock_;
// static
StatisticsRecorder* StatisticsRecorder::top_ = nullptr;
// static
bool StatisticsRecorder::is_vlog_initialized_ = false;
// static
std::atomic<bool> StatisticsRecorder::have_active_callbacks_{false};
// static
std::atomic<StatisticsRecorder::GlobalSampleCallback>
StatisticsRecorder::global_sample_callback_{nullptr};
size_t StatisticsRecorder::BucketRangesHash::operator()(
const BucketRanges* const a) const {
return a->checksum();
}
bool StatisticsRecorder::BucketRangesEqual::operator()(
const BucketRanges* const a,
const BucketRanges* const b) const {
return a->Equals(b);
}
StatisticsRecorder::~StatisticsRecorder() {
const AutoLock auto_lock(lock_.Get());
DCHECK_EQ(this, top_);
top_ = previous_;
}
// static
void StatisticsRecorder::EnsureGlobalRecorderWhileLocked() {
lock_.Get().AssertAcquired();
if (top_)
return;
const StatisticsRecorder* const p = new StatisticsRecorder;
// The global recorder is never deleted.
ANNOTATE_LEAKING_OBJECT_PTR(p);
DCHECK_EQ(p, top_);
}
// static
void StatisticsRecorder::RegisterHistogramProvider(
const WeakPtr<HistogramProvider>& provider) {
const AutoLock auto_lock(lock_.Get());
EnsureGlobalRecorderWhileLocked();
top_->providers_.push_back(provider);
}
// static
HistogramBase* StatisticsRecorder::RegisterOrDeleteDuplicate(
HistogramBase* histogram) {
// Declared before |auto_lock| to ensure correct destruction order.
std::unique_ptr<HistogramBase> histogram_deleter;
const AutoLock auto_lock(lock_.Get());
EnsureGlobalRecorderWhileLocked();
const char* const name = histogram->histogram_name();
HistogramBase*& registered = top_->histograms_[name];
if (!registered) {
// |name| is guaranteed to never change or be deallocated so long
// as the histogram is alive (which is forever).
registered = histogram;
ANNOTATE_LEAKING_OBJECT_PTR(histogram); // see crbug.com/79322
// If there are callbacks for this histogram, we set the kCallbackExists
// flag.
const auto callback_iterator = top_->callbacks_.find(name);
if (callback_iterator != top_->callbacks_.end()) {
if (!callback_iterator->second.is_null())
histogram->SetFlags(HistogramBase::kCallbackExists);
else
histogram->ClearFlags(HistogramBase::kCallbackExists);
}
return histogram;
}
if (histogram == registered) {
// The histogram was registered before.
return histogram;
}
// We already have one histogram with this name.
histogram_deleter.reset(histogram);
return registered;
}
// static
const BucketRanges* StatisticsRecorder::RegisterOrDeleteDuplicateRanges(
const BucketRanges* ranges) {
DCHECK(ranges->HasValidChecksum());
// Declared before |auto_lock| to ensure correct destruction order.
std::unique_ptr<const BucketRanges> ranges_deleter;
const AutoLock auto_lock(lock_.Get());
EnsureGlobalRecorderWhileLocked();
const BucketRanges* const registered = *top_->ranges_.insert(ranges).first;
if (registered == ranges) {
ANNOTATE_LEAKING_OBJECT_PTR(ranges);
} else {
ranges_deleter.reset(ranges);
}
return registered;
}
// static
void StatisticsRecorder::WriteHTMLGraph(const std::string& query,
std::string* output) {
for (const HistogramBase* const histogram :
Sort(WithName(GetHistograms(), query))) {
histogram->WriteHTMLGraph(output);
*output += "<br><hr><br>";
}
}
// static
void StatisticsRecorder::WriteGraph(const std::string& query,
std::string* output) {
if (query.length())
StringAppendF(output, "Collections of histograms for %s\n", query.c_str());
else
output->append("Collections of all histograms\n");
for (const HistogramBase* const histogram :
Sort(WithName(GetHistograms(), query))) {
histogram->WriteAscii(output);
output->append("\n");
}
}
// static
std::string StatisticsRecorder::ToJSON(JSONVerbosityLevel verbosity_level) {
std::string output = "{\"histograms\":[";
const char* sep = "";
for (const HistogramBase* const histogram : Sort(GetHistograms())) {
output += sep;
sep = ",";
std::string json;
histogram->WriteJSON(&json, verbosity_level);
output += json;
}
output += "]}";
return output;
}
// static
std::vector<const BucketRanges*> StatisticsRecorder::GetBucketRanges() {
std::vector<const BucketRanges*> out;
const AutoLock auto_lock(lock_.Get());
EnsureGlobalRecorderWhileLocked();
out.reserve(top_->ranges_.size());
out.assign(top_->ranges_.begin(), top_->ranges_.end());
return out;
}
// static
HistogramBase* StatisticsRecorder::FindHistogram(base::StringPiece name) {
// This must be called *before* the lock is acquired below because it will
// call back into this object to register histograms. Those called methods
// will acquire the lock at that time.
ImportGlobalPersistentHistograms();
const AutoLock auto_lock(lock_.Get());
EnsureGlobalRecorderWhileLocked();
const HistogramMap::const_iterator it = top_->histograms_.find(name);
return it != top_->histograms_.end() ? it->second : nullptr;
}
// static
StatisticsRecorder::HistogramProviders
StatisticsRecorder::GetHistogramProviders() {
const AutoLock auto_lock(lock_.Get());
EnsureGlobalRecorderWhileLocked();
return top_->providers_;
}
// static
void StatisticsRecorder::ImportProvidedHistograms() {
// Merge histogram data from each provider in turn.
for (const WeakPtr<HistogramProvider>& provider : GetHistogramProviders()) {
// Weak-pointer may be invalid if the provider was destructed, though they
// generally never are.
if (provider)
provider->MergeHistogramDeltas();
}
}
// static
void StatisticsRecorder::PrepareDeltas(
bool include_persistent,
HistogramBase::Flags flags_to_set,
HistogramBase::Flags required_flags,
HistogramSnapshotManager* snapshot_manager) {
Histograms histograms = GetHistograms();
if (!include_persistent)
histograms = NonPersistent(std::move(histograms));
snapshot_manager->PrepareDeltas(Sort(std::move(histograms)), flags_to_set,
required_flags);
}
// static
void StatisticsRecorder::InitLogOnShutdown() {
const AutoLock auto_lock(lock_.Get());
InitLogOnShutdownWhileLocked();
}
// static
bool StatisticsRecorder::SetCallback(const std::string& name,
StatisticsRecorder::OnSampleCallback cb) {
DCHECK(!cb.is_null());
const AutoLock auto_lock(lock_.Get());
EnsureGlobalRecorderWhileLocked();
if (!top_->callbacks_.insert({name, std::move(cb)}).second)
return false;
const HistogramMap::const_iterator it = top_->histograms_.find(name);
if (it != top_->histograms_.end())
it->second->SetFlags(HistogramBase::kCallbackExists);
have_active_callbacks_.store(
global_sample_callback() || !top_->callbacks_.empty(),
std::memory_order_relaxed);
return true;
}
// static
void StatisticsRecorder::ClearCallback(const std::string& name) {
const AutoLock auto_lock(lock_.Get());
EnsureGlobalRecorderWhileLocked();
top_->callbacks_.erase(name);
// We also clear the flag from the histogram (if it exists).
const HistogramMap::const_iterator it = top_->histograms_.find(name);
if (it != top_->histograms_.end())
it->second->ClearFlags(HistogramBase::kCallbackExists);
have_active_callbacks_.store(
global_sample_callback() || !top_->callbacks_.empty(),
std::memory_order_relaxed);
}
// static
StatisticsRecorder::OnSampleCallback StatisticsRecorder::FindCallback(
const std::string& name) {
const AutoLock auto_lock(lock_.Get());
EnsureGlobalRecorderWhileLocked();
const auto it = top_->callbacks_.find(name);
return it != top_->callbacks_.end() ? it->second : OnSampleCallback();
}
// static
void StatisticsRecorder::SetGlobalSampleCallback(
const GlobalSampleCallback& new_global_sample_callback) {
const AutoLock auto_lock(lock_.Get());
EnsureGlobalRecorderWhileLocked();
DCHECK(!global_sample_callback() || !new_global_sample_callback);
global_sample_callback_.store(new_global_sample_callback);
have_active_callbacks_.store(
new_global_sample_callback || !top_->callbacks_.empty(),
std::memory_order_relaxed);
}
// static
size_t StatisticsRecorder::GetHistogramCount() {
const AutoLock auto_lock(lock_.Get());
EnsureGlobalRecorderWhileLocked();
return top_->histograms_.size();
}
// static
void StatisticsRecorder::ForgetHistogramForTesting(base::StringPiece name) {
const AutoLock auto_lock(lock_.Get());
EnsureGlobalRecorderWhileLocked();
const HistogramMap::iterator found = top_->histograms_.find(name);
if (found == top_->histograms_.end())
return;
HistogramBase* const base = found->second;
if (base->GetHistogramType() != SPARSE_HISTOGRAM) {
// When forgetting a histogram, it's likely that other information is
// also becoming invalid. Clear the persistent reference that may no
// longer be valid. There's no danger in this as, at worst, duplicates
// will be created in persistent memory.
static_cast<Histogram*>(base)->bucket_ranges()->set_persistent_reference(0);
}
top_->histograms_.erase(found);
}
// static
std::unique_ptr<StatisticsRecorder>
StatisticsRecorder::CreateTemporaryForTesting() {
const AutoLock auto_lock(lock_.Get());
return WrapUnique(new StatisticsRecorder());
}
// static
void StatisticsRecorder::SetRecordChecker(
std::unique_ptr<RecordHistogramChecker> record_checker) {
const AutoLock auto_lock(lock_.Get());
EnsureGlobalRecorderWhileLocked();
top_->record_checker_ = std::move(record_checker);
}
// static
bool StatisticsRecorder::ShouldRecordHistogram(uint64_t histogram_hash) {
const AutoLock auto_lock(lock_.Get());
EnsureGlobalRecorderWhileLocked();
return !top_->record_checker_ ||
top_->record_checker_->ShouldRecord(histogram_hash);
}
// static
StatisticsRecorder::Histograms StatisticsRecorder::GetHistograms() {
// This must be called *before* the lock is acquired below because it will
// call back into this object to register histograms. Those called methods
// will acquire the lock at that time.
ImportGlobalPersistentHistograms();
Histograms out;
const AutoLock auto_lock(lock_.Get());
EnsureGlobalRecorderWhileLocked();
out.reserve(top_->histograms_.size());
for (const auto& entry : top_->histograms_)
out.push_back(entry.second);
return out;
}
// static
StatisticsRecorder::Histograms StatisticsRecorder::Sort(Histograms histograms) {
std::sort(histograms.begin(), histograms.end(), &HistogramNameLesser);
return histograms;
}
// static
StatisticsRecorder::Histograms StatisticsRecorder::WithName(
Histograms histograms,
const std::string& query) {
// Need a C-string query for comparisons against C-string histogram name.
const char* const query_string = query.c_str();
histograms.erase(std::remove_if(histograms.begin(), histograms.end(),
[query_string](const HistogramBase* const h) {
return !strstr(h->histogram_name(),
query_string);
}),
histograms.end());
return histograms;
}
// static
StatisticsRecorder::Histograms StatisticsRecorder::NonPersistent(
Histograms histograms) {
histograms.erase(
std::remove_if(histograms.begin(), histograms.end(),
[](const HistogramBase* const h) {
return (h->flags() & HistogramBase::kIsPersistent) != 0;
}),
histograms.end());
return histograms;
}
// static
void StatisticsRecorder::ImportGlobalPersistentHistograms() {
// Import histograms from known persistent storage. Histograms could have been
// added by other processes and they must be fetched and recognized locally.
// If the persistent memory segment is not shared between processes, this call
// does nothing.
if (GlobalHistogramAllocator* allocator = GlobalHistogramAllocator::Get())
allocator->ImportHistogramsToStatisticsRecorder();
}
// This singleton instance should be started during the single threaded portion
// of main(), and hence it is not thread safe. It initializes globals to provide
// support for all future calls.
StatisticsRecorder::StatisticsRecorder() {
lock_.Get().AssertAcquired();
previous_ = top_;
top_ = this;
InitLogOnShutdownWhileLocked();
}
// static
void StatisticsRecorder::InitLogOnShutdownWhileLocked() {
lock_.Get().AssertAcquired();
if (!is_vlog_initialized_ && VLOG_IS_ON(1)) {
is_vlog_initialized_ = true;
const auto dump_to_vlog = [](void*) {
std::string output;
WriteGraph("", &output);
VLOG(1) << output;
};
AtExitManager::RegisterCallback(dump_to_vlog, nullptr);
}
}
} // namespace base

View file

@ -0,0 +1,332 @@
// Copyright (c) 2012 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// StatisticsRecorder holds all Histograms and BucketRanges that are used by
// Histograms in the system. It provides a general place for
// Histograms/BucketRanges to register, and supports a global API for accessing
// (i.e., dumping, or graphing) the data.
#ifndef BASE_METRICS_STATISTICS_RECORDER_H_
#define BASE_METRICS_STATISTICS_RECORDER_H_
#include <stdint.h>
#include <atomic>
#include <memory>
#include <string>
#include <unordered_map>
#include <unordered_set>
#include <vector>
#include "base/base_export.h"
#include "base/callback.h"
#include "base/gtest_prod_util.h"
#include "base/lazy_instance.h"
#include "base/macros.h"
#include "base/memory/weak_ptr.h"
#include "base/metrics/histogram_base.h"
#include "base/metrics/record_histogram_checker.h"
#include "base/strings/string_piece.h"
#include "base/synchronization/lock.h"
namespace base {
class BucketRanges;
class HistogramSnapshotManager;
// In-memory recorder of usage statistics (aka metrics, aka histograms).
//
// All the public methods are static and act on a global recorder. This global
// recorder is internally synchronized and all the static methods are thread
// safe.
//
// StatisticsRecorder doesn't have any public constructor. For testing purpose,
// you can create a temporary recorder using the factory method
// CreateTemporaryForTesting(). This temporary recorder becomes the global one
// until deleted. When this temporary recorder is deleted, it restores the
// previous global one.
class BASE_EXPORT StatisticsRecorder {
public:
// An interface class that allows the StatisticsRecorder to forcibly merge
// histograms from providers when necessary.
class HistogramProvider {
public:
// Merges all histogram information into the global versions.
virtual void MergeHistogramDeltas() = 0;
};
typedef std::vector<HistogramBase*> Histograms;
// Restores the previous global recorder.
//
// When several temporary recorders are created using
// CreateTemporaryForTesting(), these recorders must be deleted in reverse
// order of creation.
//
// This method is thread safe.
//
// Precondition: The recorder being deleted is the current global recorder.
~StatisticsRecorder();
// Registers a provider of histograms that can be called to merge those into
// the global recorder. Calls to ImportProvidedHistograms() will fetch from
// registered providers.
//
// This method is thread safe.
static void RegisterHistogramProvider(
const WeakPtr<HistogramProvider>& provider);
// Registers or adds a new histogram to the collection of statistics. If an
// identically named histogram is already registered, then the argument
// |histogram| will be deleted. The returned value is always the registered
// histogram (either the argument, or the pre-existing registered histogram).
//
// This method is thread safe.
static HistogramBase* RegisterOrDeleteDuplicate(HistogramBase* histogram);
// Registers or adds a new BucketRanges. If an equivalent BucketRanges is
// already registered, then the argument |ranges| will be deleted. The
// returned value is always the registered BucketRanges (either the argument,
// or the pre-existing one).
//
// This method is thread safe.
static const BucketRanges* RegisterOrDeleteDuplicateRanges(
const BucketRanges* ranges);
// Methods for appending histogram data to a string. Only histograms which
// have |query| as a substring are written to |output| (an empty string will
// process all registered histograms).
//
// These methods are thread safe.
static void WriteHTMLGraph(const std::string& query, std::string* output);
static void WriteGraph(const std::string& query, std::string* output);
// Returns the histograms with |verbosity_level| as the serialization
// verbosity.
//
// This method is thread safe.
static std::string ToJSON(JSONVerbosityLevel verbosity_level);
// Gets existing histograms.
//
// The order of returned histograms is not guaranteed.
//
// Ownership of the individual histograms remains with the StatisticsRecorder.
//
// This method is thread safe.
static Histograms GetHistograms();
// Gets BucketRanges used by all histograms registered. The order of returned
// BucketRanges is not guaranteed.
//
// This method is thread safe.
static std::vector<const BucketRanges*> GetBucketRanges();
// Finds a histogram by name. Matches the exact name. Returns a null pointer
// if a matching histogram is not found.
//
// This method is thread safe.
static HistogramBase* FindHistogram(base::StringPiece name);
// Imports histograms from providers.
//
// This method must be called on the UI thread.
static void ImportProvidedHistograms();
// Snapshots all histograms via |snapshot_manager|. |flags_to_set| is used to
// set flags for each histogram. |required_flags| is used to select
// histograms to be recorded. Only histograms that have all the flags
// specified by the argument will be chosen. If all histograms should be
// recorded, set it to |Histogram::kNoFlags|.
static void PrepareDeltas(bool include_persistent,
HistogramBase::Flags flags_to_set,
HistogramBase::Flags required_flags,
HistogramSnapshotManager* snapshot_manager);
using OnSampleCallback = base::RepeatingCallback<void(HistogramBase::Sample)>;
// Sets the callback to notify when a new sample is recorded on the histogram
// referred to by |histogram_name|. Can be called before or after the
// histogram is created. Returns whether the callback was successfully set.
//
// This method is thread safe.
static bool SetCallback(const std::string& histogram_name,
OnSampleCallback callback);
// Clears any callback set on the histogram referred to by |histogram_name|.
//
// This method is thread safe.
static void ClearCallback(const std::string& histogram_name);
// Retrieves the callback for the histogram referred to by |histogram_name|,
// or a null callback if no callback exists for this histogram.
//
// This method is thread safe.
static OnSampleCallback FindCallback(const std::string& histogram_name);
// Returns the number of known histograms.
//
// This method is thread safe.
static size_t GetHistogramCount();
// Initializes logging histograms with --v=1. Safe to call multiple times.
// Is called from ctor but for browser it seems that it is more useful to
// start logging after statistics recorder, so we need to init log-on-shutdown
// later.
//
// This method is thread safe.
static void InitLogOnShutdown();
// Removes a histogram from the internal set of known ones. This can be
// necessary during testing persistent histograms where the underlying
// memory is being released.
//
// This method is thread safe.
static void ForgetHistogramForTesting(base::StringPiece name);
// Creates a temporary StatisticsRecorder object for testing purposes. All new
// histograms will be registered in it until it is destructed or pushed aside
// for the lifetime of yet another StatisticsRecorder object. The destruction
// of the returned object will re-activate the previous one.
// StatisticsRecorder objects must be deleted in the opposite order to which
// they're created.
//
// This method is thread safe.
static std::unique_ptr<StatisticsRecorder> CreateTemporaryForTesting()
WARN_UNUSED_RESULT;
// Sets the record checker for determining if a histogram should be recorded.
// Record checker doesn't affect any already recorded histograms, so this
// method must be called very early, before any threads have started.
// Record checker methods can be called on any thread, so they shouldn't
// mutate any state.
static void SetRecordChecker(
std::unique_ptr<RecordHistogramChecker> record_checker);
// Checks if the given histogram should be recorded based on the
// ShouldRecord() method of the record checker. If the record checker is not
// set, returns true.
//
// This method is thread safe.
static bool ShouldRecordHistogram(uint64_t histogram_hash);
// Sorts histograms by name.
static Histograms Sort(Histograms histograms);
// Filters histograms by name. Only histograms which have |query| as a
// substring in their name are kept. An empty query keeps all histograms.
static Histograms WithName(Histograms histograms, const std::string& query);
// Filters histograms by persistency. Only non-persistent histograms are kept.
static Histograms NonPersistent(Histograms histograms);
using GlobalSampleCallback = void (*)(const char* /*=histogram_name*/,
uint64_t /*=name_hash*/,
HistogramBase::Sample);
// Installs a global callback which will be called for every added
// histogram sample. The given callback is a raw function pointer in order
// to be accessed lock-free and can be called on any thread.
static void SetGlobalSampleCallback(
const GlobalSampleCallback& global_sample_callback);
// Returns the global callback, if any, that should be called every time a
// histogram sample is added.
static GlobalSampleCallback global_sample_callback() {
return global_sample_callback_.load(std::memory_order_relaxed);
}
// Returns whether there's either a global histogram callback set,
// or if any individual histograms have callbacks set. Used for early return
// when histogram samples are added.
static bool have_active_callbacks() {
return have_active_callbacks_.load(std::memory_order_relaxed);
}
private:
typedef std::vector<WeakPtr<HistogramProvider>> HistogramProviders;
typedef std::unordered_map<StringPiece, HistogramBase*, StringPieceHash>
HistogramMap;
// We keep a map of callbacks to histograms, so that as histograms are
// created, we can set the callback properly.
typedef std::unordered_map<std::string, OnSampleCallback> CallbackMap;
struct BucketRangesHash {
size_t operator()(const BucketRanges* a) const;
};
struct BucketRangesEqual {
bool operator()(const BucketRanges* a, const BucketRanges* b) const;
};
typedef std::
unordered_set<const BucketRanges*, BucketRangesHash, BucketRangesEqual>
RangesMap;
friend class StatisticsRecorderTest;
FRIEND_TEST_ALL_PREFIXES(StatisticsRecorderTest, IterationTest);
// Initializes the global recorder if it doesn't already exist. Safe to call
// multiple times.
//
// Precondition: The global lock is already acquired.
static void EnsureGlobalRecorderWhileLocked();
// Gets histogram providers.
//
// This method is thread safe.
static HistogramProviders GetHistogramProviders();
// Imports histograms from global persistent memory.
//
// Precondition: The global lock must not be held during this call.
static void ImportGlobalPersistentHistograms();
// Constructs a new StatisticsRecorder and sets it as the current global
// recorder.
//
// Precondition: The global lock is already acquired.
StatisticsRecorder();
// Initialize implementation but without lock. Caller should guard
// StatisticsRecorder by itself if needed (it isn't in unit tests).
//
// Precondition: The global lock is already acquired.
static void InitLogOnShutdownWhileLocked();
HistogramMap histograms_;
CallbackMap callbacks_;
RangesMap ranges_;
HistogramProviders providers_;
std::unique_ptr<RecordHistogramChecker> record_checker_;
// Previous global recorder that existed when this one was created.
StatisticsRecorder* previous_ = nullptr;
// Global lock for internal synchronization.
static LazyInstance<Lock>::Leaky lock_;
// Current global recorder. This recorder is used by static methods. When a
// new global recorder is created by CreateTemporaryForTesting(), then the
// previous global recorder is referenced by top_->previous_.
static StatisticsRecorder* top_;
// Tracks whether InitLogOnShutdownWhileLocked() has registered a logging
// function that will be called when the program finishes.
static bool is_vlog_initialized_;
// Track whether there are active histogram callbacks present.
static std::atomic<bool> have_active_callbacks_;
// Stores a raw callback which should be called on any every histogram sample
// which gets added.
static std::atomic<GlobalSampleCallback> global_sample_callback_;
DISALLOW_COPY_AND_ASSIGN(StatisticsRecorder);
};
} // namespace base
#endif // BASE_METRICS_STATISTICS_RECORDER_H_

View file

@ -0,0 +1,50 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/metrics/ukm_source_id.h"
#include "base/atomic_sequence_num.h"
#include "base/logging.h"
#include "base/rand_util.h"
namespace base {
namespace {
const int64_t kLowBitsMask = (INT64_C(1) << 32) - 1;
const int64_t kNumTypeBits = static_cast<int64_t>(UkmSourceId::Type::kMaxValue);
const int64_t kTypeMask = (INT64_C(1) << kNumTypeBits) - 1;
} // namespace
// static
UkmSourceId UkmSourceId::New() {
// Generate some bits which are unique to this process, so we can generate
// IDs independently in different processes. IDs generated by this method may
// collide, but it should be sufficiently rare enough to not impact data
// quality.
const static int64_t process_id_bits =
static_cast<int64_t>(RandUint64()) & ~kLowBitsMask;
// Generate some bits which are unique within the process, using a counter.
static AtomicSequenceNumber seq;
UkmSourceId local_id = FromOtherId(seq.GetNext() + 1, UkmSourceId::Type::UKM);
// Combine the local and process bits to generate a unique ID.
return UkmSourceId((local_id.value_ & kLowBitsMask) | process_id_bits);
}
// static
UkmSourceId UkmSourceId::FromOtherId(int64_t other_id, UkmSourceId::Type type) {
const int64_t type_bits = static_cast<int64_t>(type);
DCHECK_EQ(type_bits, type_bits & kTypeMask);
// Stores the the type ID in the low bits of the source id, and shift the rest
// of the ID to make room. This could cause the original ID to overflow, but
// that should be rare enough that it won't matter for UKM's purposes.
return UkmSourceId((other_id << kNumTypeBits) | type_bits);
}
UkmSourceId::Type UkmSourceId::GetType() const {
return static_cast<UkmSourceId::Type>(value_ & kTypeMask);
}
} // namespace base

View file

@ -0,0 +1,98 @@
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_METRICS_UKM_SOURCE_ID_H_
#define BASE_METRICS_UKM_SOURCE_ID_H_
#include <stdint.h>
#include "base/base_export.h"
namespace base {
// An ID used to identify a Source to UKM, for recording information about it.
// These objects are copyable, assignable, and occupy 64-bits per instance.
// Prefer passing them by value.
class BASE_EXPORT UkmSourceId {
public:
enum class Type : int64_t {
// Source ids of this type are created via ukm::AssignNewSourceId, to denote
// 'custom' source other than the 4 types below. Source of this type has
// additional restrictions with logging, as determined by
// IsWhitelistedSourceId.
UKM = 0,
// Sources created by navigation. They will be kept in memory as long as
// the associated tab is still alive and the number of sources are within
// the max threshold.
NAVIGATION_ID = 1,
// Source ID used by AppLaunchEventLogger::Log. A new source of this type
// and associated events are expected to be recorded within the same report
// interval; it will not be kept in memory between different reports.
APP_ID = 2,
// Source ID for background events that don't have an open tab but the
// associated URL is still present in the browser's history. A new source of
// this type and associated events are expected to be recorded within the
// same report interval; it will not be kept in memory between different
// reports.
HISTORY_ID = 3,
// Source ID used by WebApkUkmRecorder. A new source of this type and
// associated events are expected to be recorded within the same report
// interval; it will not be kept in memory between different reports.
WEBAPK_ID = 4,
// Source ID for service worker based payment handlers. A new source of this
// type and associated events are expected to be recorded within the same
// report interval; it will not be kept in memory between different reports.
PAYMENT_APP_ID = 5,
kMaxValue = PAYMENT_APP_ID,
};
// Default constructor has the invalid value.
constexpr UkmSourceId() : value_(0) {}
constexpr UkmSourceId& operator=(UkmSourceId other) {
value_ = other.value_;
return *this;
}
// Allow identity comparisons.
constexpr bool operator==(UkmSourceId other) const {
return value_ == other.value_;
}
constexpr bool operator!=(UkmSourceId other) const {
return value_ != other.value_;
}
// Allow coercive comparisons to simplify test migration.
// TODO(crbug/873866): Remove these once callers are migrated.
constexpr bool operator==(int64_t other) const { return value_ == other; }
constexpr bool operator!=(int64_t other) const { return value_ == other; }
// Extract the Type of the SourceId.
Type GetType() const;
// Return the ID as an int64.
constexpr int64_t ToInt64() const { return value_; }
// Convert an int64 ID value to an ID.
static constexpr UkmSourceId FromInt64(int64_t internal_value) {
return UkmSourceId(internal_value);
}
// Get a new UKM-Type SourceId, which is unique within the scope of a
// browser session.
static UkmSourceId New();
// Utility for converting other unique ids to source ids.
static UkmSourceId FromOtherId(int64_t value, Type type);
private:
constexpr explicit UkmSourceId(int64_t value) : value_(value) {}
int64_t value_;
};
constexpr UkmSourceId kInvalidUkmSourceId = UkmSourceId();
} // namespace base
#endif // BASE_METRICS_UKM_SOURCE_ID_H_

View file

@ -0,0 +1,93 @@
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "base/metrics/user_metrics.h"
#include <stddef.h>
#include <vector>
#include "base/bind.h"
#include "base/lazy_instance.h"
#include "base/location.h"
#include "base/macros.h"
#include "base/threading/thread_checker.h"
#include "base/time/time.h"
#include "base/trace_event/trace_event.h"
namespace base {
namespace {
LazyInstance<std::vector<ActionCallback>>::DestructorAtExit g_callbacks =
LAZY_INSTANCE_INITIALIZER;
LazyInstance<scoped_refptr<SingleThreadTaskRunner>>::DestructorAtExit
g_task_runner = LAZY_INSTANCE_INITIALIZER;
} // namespace
void RecordAction(const UserMetricsAction& action) {
RecordComputedAction(action.str_);
}
void RecordComputedAction(const std::string& action) {
RecordComputedActionAt(action, TimeTicks::Now());
}
void RecordComputedActionSince(const std::string& action,
TimeDelta time_since) {
RecordComputedActionAt(action, TimeTicks::Now() - time_since);
}
void RecordComputedActionAt(const std::string& action, TimeTicks action_time) {
TRACE_EVENT_INSTANT1("ui", "UserEvent", TRACE_EVENT_SCOPE_GLOBAL, "action",
action);
if (!g_task_runner.Get()) {
DCHECK(g_callbacks.Get().empty());
return;
}
if (!g_task_runner.Get()->BelongsToCurrentThread()) {
g_task_runner.Get()->PostTask(
FROM_HERE, BindOnce(&RecordComputedActionAt, action, action_time));
return;
}
for (const ActionCallback& callback : g_callbacks.Get()) {
callback.Run(action, action_time);
}
}
void AddActionCallback(const ActionCallback& callback) {
// Only allow adding a callback if the task runner is set.
DCHECK(g_task_runner.Get());
DCHECK(g_task_runner.Get()->BelongsToCurrentThread());
g_callbacks.Get().push_back(callback);
}
void RemoveActionCallback(const ActionCallback& callback) {
DCHECK(g_task_runner.Get());
DCHECK(g_task_runner.Get()->BelongsToCurrentThread());
std::vector<ActionCallback>* callbacks = g_callbacks.Pointer();
for (size_t i = 0; i < callbacks->size(); ++i) {
if ((*callbacks)[i] == callback) {
callbacks->erase(callbacks->begin() + i);
return;
}
}
}
void SetRecordActionTaskRunner(
scoped_refptr<SingleThreadTaskRunner> task_runner) {
DCHECK(task_runner->BelongsToCurrentThread());
DCHECK(!g_task_runner.Get() || g_task_runner.Get()->BelongsToCurrentThread());
g_task_runner.Get() = task_runner;
}
scoped_refptr<SingleThreadTaskRunner> GetRecordActionTaskRunner() {
if (g_task_runner.IsCreated())
return g_task_runner.Get();
return nullptr;
}
} // namespace base

View file

@ -0,0 +1,89 @@
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_METRICS_USER_METRICS_H_
#define BASE_METRICS_USER_METRICS_H_
#include <string>
#include "base/base_export.h"
#include "base/callback.h"
#include "base/metrics/user_metrics_action.h"
#include "base/single_thread_task_runner.h"
namespace base {
class TimeTicks;
// This module provides some helper functions for logging actions tracked by
// the user metrics system.
// For best practices on deciding when to emit a user action, see
// https://chromium.googlesource.com/chromium/src.git/+/HEAD/tools/metrics/actions/README.md
// Record that the user performed an action.
// This function must be called after the task runner has been set with
// SetRecordActionTaskRunner().
//
// "Action" here means a user-generated event:
// good: "Reload", "CloseTab", and "IMEInvoked"
// not good: "SSLDialogShown", "PageLoaded", "DiskFull"
// We use this to gather anonymized information about how users are
// interacting with the browser.
// WARNING: In calls to this function, UserMetricsAction should be followed by a
// string literal parameter and not a variable e.g.
// RecordAction(UserMetricsAction("my action name"));
// This ensures that our processing scripts can associate this action's hash
// with its metric name. Therefore, it will be possible to retrieve the metric
// name from the hash later on.
//
// Once a new recorded action is added, run
// tools/metrics/actions/extract_actions.py
// to add the metric to actions.xml, then update the <owner>s and <description>
// sections. Make sure to include the actions.xml file when you upload your code
// for review!
//
// For more complicated situations (like when there are many different
// possible actions), see RecordComputedAction().
BASE_EXPORT void RecordAction(const UserMetricsAction& action);
// This function has identical input and behavior to RecordAction(), but is
// not automatically found by the action-processing scripts. It can be used
// when it's a pain to enumerate all possible actions, but if you use this
// you need to also update the rules for extracting known actions in
// tools/metrics/actions/extract_actions.py.
// This function must be called after the task runner has been set with
// SetRecordActionTaskRunner().
BASE_EXPORT void RecordComputedAction(const std::string& action);
// Similar to RecordComputedAction, but also takes the time at which the action
// was observed.
BASE_EXPORT void RecordComputedActionAt(const std::string& action,
TimeTicks action_time);
// Similar to RecordComputedActionAt, but takes the amount of time elasped since
// the action was observed.
BASE_EXPORT void RecordComputedActionSince(const std::string& action,
TimeDelta time_since);
// Called with the action string.
using ActionCallback = RepeatingCallback<void(const std::string&, TimeTicks)>;
// Add/remove action callbacks (see above).
// These functions must be called after the task runner has been set with
// SetRecordActionTaskRunner().
BASE_EXPORT void AddActionCallback(const ActionCallback& callback);
BASE_EXPORT void RemoveActionCallback(const ActionCallback& callback);
// Set the task runner on which to record actions.
BASE_EXPORT void SetRecordActionTaskRunner(
scoped_refptr<SingleThreadTaskRunner> task_runner);
// Returns the task runner used to record actions. Returns null when not set.
// This function is thread safe.
BASE_EXPORT scoped_refptr<SingleThreadTaskRunner> GetRecordActionTaskRunner();
} // namespace base
#endif // BASE_METRICS_USER_METRICS_H_

View file

@ -0,0 +1,27 @@
// Copyright 2014 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef BASE_METRICS_USER_METRICS_ACTION_H_
#define BASE_METRICS_USER_METRICS_ACTION_H_
namespace base {
// UserMetricsAction exists purely to standardize on the parameters passed to
// UserMetrics. That way, our toolset can scan the source code reliable for
// constructors and extract the associated string constants.
// WARNING: When using UserMetricsAction you should use a string literal
// parameter e.g.
// RecordAction(UserMetricsAction("my action name"));
// This ensures that our processing scripts can associate this action's hash
// with its metric name. Therefore, it will be possible to retrieve the metric
// name from the hash later on.
// Please see tools/metrics/actions/extract_actions.py for details.
struct UserMetricsAction {
const char* str_;
explicit constexpr UserMetricsAction(const char* str) noexcept : str_(str) {}
};
} // namespace base
#endif // BASE_METRICS_USER_METRICS_ACTION_H_