Commit 93e992bb authored by Igor Sheludko's avatar Igor Sheludko Committed by Commit Bot

[zone-stats] Improve zone memory usage stats tracing

1) make it possible to combine tracing logic with accounting allocator
  supporting zone compression,
2) make it possible to record zone memory usage via Chrome tracing
  machinery (especially, for already running process),
3) trace both allocated and actually used memory per zone,

Bug: v8:10572
Change-Id: I768e474ada1a384218af09efd0dfce2d9a43ac3c
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2228888
Commit-Queue: Igor Sheludko <ishell@chromium.org>
Reviewed-by: 's avatarLeszek Swirski <leszeks@chromium.org>
Cr-Commit-Position: refs/heads/master@{#68197}
parent 1b699b4e
......@@ -2657,77 +2657,110 @@ void Isolate::ThreadDataTable::RemoveAllThreads() {
table_.clear();
}
class VerboseAccountingAllocator : public AccountingAllocator {
class TracingAccountingAllocator : public AccountingAllocator {
public:
VerboseAccountingAllocator(Heap* heap, size_t allocation_sample_bytes)
: heap_(heap), allocation_sample_bytes_(allocation_sample_bytes) {}
explicit TracingAccountingAllocator(Isolate* isolate) : isolate_(isolate) {}
v8::internal::Segment* AllocateSegment(size_t size) override {
v8::internal::Segment* memory = AccountingAllocator::AllocateSegment(size);
if (!memory) return nullptr;
size_t malloced_current = GetCurrentMemoryUsage();
protected:
void TraceAllocateSegmentImpl(v8::internal::Segment* segment) override {
base::MutexGuard lock(&mutex_);
UpdateMemoryTrafficAndReportMemoryUsage(segment->total_size());
}
if (last_memory_usage_ + allocation_sample_bytes_ < malloced_current) {
PrintMemoryJSON(malloced_current);
last_memory_usage_ = malloced_current;
}
return memory;
void TraceZoneCreationImpl(const Zone* zone) override {
base::MutexGuard lock(&mutex_);
active_zones_.insert(zone);
nesting_depth_++;
}
void ReturnSegment(v8::internal::Segment* memory) override {
AccountingAllocator::ReturnSegment(memory);
size_t malloced_current = GetCurrentMemoryUsage();
void TraceZoneDestructionImpl(const Zone* zone) override {
base::MutexGuard lock(&mutex_);
UpdateMemoryTrafficAndReportMemoryUsage(zone->segment_bytes_allocated());
active_zones_.erase(zone);
nesting_depth_--;
}
private:
void UpdateMemoryTrafficAndReportMemoryUsage(size_t memory_traffic_delta) {
memory_traffic_since_last_report_ += memory_traffic_delta;
if (memory_traffic_since_last_report_ < FLAG_zone_stats_tolerance) return;
memory_traffic_since_last_report_ = 0;
Dump(buffer_, true);
if (malloced_current + allocation_sample_bytes_ < last_memory_usage_) {
PrintMemoryJSON(malloced_current);
last_memory_usage_ = malloced_current;
{
std::string trace_str = buffer_.str();
if (FLAG_trace_zone_stats) {
PrintF(
"{"
"\"type\": \"v8-zone-trace\", "
"\"stats\": %s"
"}\n",
trace_str.c_str());
}
if (V8_UNLIKELY(
TracingFlags::zone_stats.load(std::memory_order_relaxed) &
v8::tracing::TracingCategoryObserver::ENABLED_BY_TRACING)) {
TRACE_EVENT_INSTANT1(TRACE_DISABLED_BY_DEFAULT("v8.zone_stats"),
"V8.Zone_Stats", TRACE_EVENT_SCOPE_THREAD, "stats",
TRACE_STR_COPY(trace_str.c_str()));
}
}
}
void ZoneCreation(const Zone* zone) override {
PrintZoneModificationSample(zone, "zonecreation");
nesting_deepth_++;
// Clear the buffer.
buffer_.str(std::string());
}
void ZoneDestruction(const Zone* zone) override {
nesting_deepth_--;
PrintZoneModificationSample(zone, "zonedestruction");
void Dump(std::ostringstream& out, bool dump_details) {
// Note: Neither isolate nor zones are locked, so be careful with accesses
// as the allocator is potentially used on a concurrent thread.
double time = isolate_->time_millis_since_init();
out << "{"
<< "\"isolate\": \"" << reinterpret_cast<void*>(isolate_) << "\", "
<< "\"time\": " << time << ", ";
size_t total_segment_bytes_allocated = 0;
size_t total_zone_allocation_size = 0;
if (dump_details) {
// Print detailed zone stats if memory usage changes direction.
out << "\"zones\": [";
bool first = true;
for (const Zone* zone : active_zones_) {
size_t zone_segment_bytes_allocated = zone->segment_bytes_allocated();
size_t zone_allocation_size = zone->allocation_size_for_tracing();
if (first) {
first = false;
} else {
out << ", ";
}
out << "{"
<< "\"name\": \"" << zone->name() << "\", "
<< "\"allocated\": " << zone_segment_bytes_allocated << ", "
<< "\"used\": " << zone_allocation_size << "}";
total_segment_bytes_allocated += zone_segment_bytes_allocated;
total_zone_allocation_size += zone_allocation_size;
}
out << "], ";
} else {
// Just calculate total allocated/used memory values.
for (const Zone* zone : active_zones_) {
total_segment_bytes_allocated += zone->segment_bytes_allocated();
total_zone_allocation_size += zone->allocation_size_for_tracing();
}
}
out << "\"allocated\": " << total_segment_bytes_allocated << ", "
<< "\"used\": " << total_zone_allocation_size << "}";
}
private:
void PrintZoneModificationSample(const Zone* zone, const char* type) {
PrintF(
"{"
"\"type\": \"%s\", "
"\"isolate\": \"%p\", "
"\"time\": %f, "
"\"ptr\": \"%p\", "
"\"name\": \"%s\", "
"\"size\": %zu,"
"\"nesting\": %zu}\n",
type, reinterpret_cast<void*>(heap_->isolate()),
heap_->isolate()->time_millis_since_init(),
reinterpret_cast<const void*>(zone), zone->name(),
zone->allocation_size(), nesting_deepth_.load());
}
void PrintMemoryJSON(size_t malloced) {
// Note: Neither isolate, nor heap is locked, so be careful with accesses
// as the allocator is potentially used on a concurrent thread.
double time = heap_->isolate()->time_millis_since_init();
PrintF(
"{"
"\"type\": \"zone\", "
"\"isolate\": \"%p\", "
"\"time\": %f, "
"\"allocated\": %zu}\n",
reinterpret_cast<void*>(heap_->isolate()), time, malloced);
}
Heap* heap_;
std::atomic<size_t> last_memory_usage_{0};
std::atomic<size_t> nesting_deepth_{0};
size_t allocation_sample_bytes_;
Isolate* const isolate_;
std::atomic<size_t> nesting_depth_{0};
base::Mutex mutex_;
std::unordered_set<const Zone*> active_zones_;
std::ostringstream buffer_;
// This value is increased on both allocations and deallocations.
size_t memory_traffic_since_last_report_ = 0;
};
#ifdef DEBUG
......@@ -2806,9 +2839,7 @@ Isolate::Isolate(std::unique_ptr<i::IsolateAllocator> isolate_allocator)
: isolate_data_(this),
isolate_allocator_(std::move(isolate_allocator)),
id_(isolate_counter.fetch_add(1, std::memory_order_relaxed)),
allocator_(FLAG_trace_zone_stats
? new VerboseAccountingAllocator(&heap_, 256 * KB)
: new AccountingAllocator()),
allocator_(new TracingAccountingAllocator(this)),
builtins_(this),
rail_mode_(PERFORMANCE_ANIMATION),
code_event_dispatcher_(new CodeEventDispatcher()),
......
......@@ -973,6 +973,9 @@ DEFINE_GENERIC_IMPLICATION(
trace_zone_stats,
TracingFlags::zone_stats.store(
v8::tracing::TracingCategoryObserver::ENABLED_BY_NATIVE))
DEFINE_SIZE_T(
zone_stats_tolerance, 1 * MB,
"report a tick only when allocated zone memory changes by this amount")
DEFINE_BOOL(track_retaining_path, false,
"enable support for tracking retaining path")
DEFINE_DEBUG_BOOL(trace_backing_store, false, "trace backing store events")
......
......@@ -8,6 +8,7 @@
#include <atomic>
#include "src/base/macros.h"
#include "src/logging/tracing-flags.h"
namespace v8 {
namespace internal {
......@@ -21,11 +22,11 @@ class V8_EXPORT_PRIVATE AccountingAllocator {
virtual ~AccountingAllocator();
// Allocates a new segment. Returns nullptr on failed allocation.
virtual Segment* AllocateSegment(size_t bytes);
Segment* AllocateSegment(size_t bytes);
// Return unneeded segments to either insert them into the pool or release
// them if the pool is already full or memory pressure is high.
virtual void ReturnSegment(Segment* memory);
void ReturnSegment(Segment* memory);
size_t GetCurrentMemoryUsage() const {
return current_memory_usage_.load(std::memory_order_relaxed);
......@@ -35,8 +36,25 @@ class V8_EXPORT_PRIVATE AccountingAllocator {
return max_memory_usage_.load(std::memory_order_relaxed);
}
virtual void ZoneCreation(const Zone* zone) {}
virtual void ZoneDestruction(const Zone* zone) {}
void TraceZoneCreation(const Zone* zone) {
if (V8_LIKELY(!TracingFlags::is_zone_stats_enabled())) return;
TraceZoneCreationImpl(zone);
}
void TraceZoneDestruction(const Zone* zone) {
if (V8_LIKELY(!TracingFlags::is_zone_stats_enabled())) return;
TraceZoneDestructionImpl(zone);
}
void TraceAllocateSegment(Segment* segment) {
if (V8_LIKELY(!TracingFlags::is_zone_stats_enabled())) return;
TraceAllocateSegmentImpl(segment);
}
protected:
virtual void TraceZoneCreationImpl(const Zone* zone) {}
virtual void TraceZoneDestructionImpl(const Zone* zone) {}
virtual void TraceAllocateSegmentImpl(Segment* segment) {}
private:
std::atomic<size_t> current_memory_usage_{0};
......
......@@ -36,11 +36,10 @@ Zone::Zone(AccountingAllocator* allocator, const char* name)
segment_head_(nullptr),
name_(name),
sealed_(false) {
allocator_->ZoneCreation(this);
allocator_->TraceZoneCreation(this);
}
Zone::~Zone() {
allocator_->ZoneDestruction(this);
DeleteAll();
DCHECK_EQ(segment_bytes_allocated_, 0);
......@@ -74,14 +73,23 @@ void* Zone::AsanNew(size_t size) {
}
void Zone::ReleaseMemory() {
allocator_->ZoneDestruction(this);
DeleteAll();
allocator_->ZoneCreation(this);
allocator_->TraceZoneCreation(this);
}
void Zone::DeleteAll() {
Segment* current = segment_head_;
if (current) {
// Commit the allocation_size_ of segment_head_ and disconnect the segments
// list from the zone in order to ensure that tracing accounting allocator
// will observe value including memory from the head segment.
allocation_size_ = allocation_size();
segment_head_ = nullptr;
}
allocator_->TraceZoneDestruction(this);
// Traverse the chained list of segments and return them all to the allocator.
for (Segment* current = segment_head_; current;) {
while (current) {
Segment* next = current->next();
size_t size = current->total_size();
......@@ -96,30 +104,14 @@ void Zone::DeleteAll() {
position_ = limit_ = 0;
allocation_size_ = 0;
segment_head_ = nullptr;
}
// Creates a new segment, sets its size, and pushes it to the front
// of the segment chain. Returns the new segment.
Segment* Zone::NewSegment(size_t requested_size) {
Segment* result = allocator_->AllocateSegment(requested_size);
if (!result) return nullptr;
DCHECK_GE(result->total_size(), requested_size);
segment_bytes_allocated_ += result->total_size();
result->set_zone(this);
result->set_next(segment_head_);
segment_head_ = result;
return result;
}
Address Zone::NewExpand(size_t size) {
// Make sure the requested size is already properly aligned and that
// there isn't enough room in the Zone to satisfy the request.
DCHECK_EQ(size, RoundDown(size, kAlignmentInBytes));
DCHECK(limit_ - position_ < size);
DCHECK_LT(limit_ - position_, size);
// Commit the allocation_size_ of segment_head_ if any.
allocation_size_ = allocation_size();
// Compute the new segment size. We use a 'high water mark'
// strategy, where we increase the segment size every time we expand
// except that we employ a maximum segment size when we delete. This
......@@ -148,12 +140,24 @@ Address Zone::NewExpand(size_t size) {
V8::FatalProcessOutOfMemory(nullptr, "Zone");
return kNullAddress;
}
Segment* segment = NewSegment(new_size);
Segment* segment = allocator_->AllocateSegment(new_size);
if (segment == nullptr) {
V8::FatalProcessOutOfMemory(nullptr, "Zone");
return kNullAddress;
}
DCHECK_GE(segment->total_size(), new_size);
segment_bytes_allocated_ += segment->total_size();
segment->set_zone(this);
segment->set_next(segment_head_);
// Commit the allocation_size_ of segment_head_ if any, in order to ensure
// that tracing accounting allocator will observe value including memory
// from the previous head segment.
allocation_size_ = allocation_size();
segment_head_ = segment;
allocator_->TraceAllocateSegment(segment);
// Recompute 'top' and 'limit' based on the new segment.
Address result = RoundUp(segment->start(), kAlignmentInBytes);
position_ = result + size;
......
......@@ -79,13 +79,21 @@ class V8_EXPORT_PRIVATE Zone final {
return segment_bytes_allocated_ > kExcessLimit;
}
size_t segment_bytes_allocated() const { return segment_bytes_allocated_; }
const char* name() const { return name_; }
// Returns precise value of used zone memory, allowed to be called only
// from thread owning the zone.
size_t allocation_size() const {
size_t extra = segment_head_ ? position_ - segment_head_->start() : 0;
return allocation_size_ + extra;
}
// Returns used zone memory not including the head segment, can be called
// from threads not owning the zone.
size_t allocation_size_for_tracing() const { return allocation_size_; }
AccountingAllocator* allocator() const { return allocator_; }
private:
......@@ -118,10 +126,6 @@ class V8_EXPORT_PRIVATE Zone final {
// room in the Zone already.
Address NewExpand(size_t size);
// Creates a new segment, sets it size, and pushes it to the front
// of the segment chain. Returns the new segment.
inline Segment* NewSegment(size_t requested_size);
// The free region in the current (front) segment is represented as
// the half-open interval [position, limit). The 'position' variable
// is guaranteed to be aligned as dictated by kAlignment.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment