Commit 3f8b0316 authored by Clemens Hammacher's avatar Clemens Hammacher Committed by Commit Bot

[zone] Remove segment pooling from accounting allocator

This pooling introduces severe lock contention for Liftoff compilation,
since each compilation uses its own Zone which does at least one
segment allocation.
It's also unclear whether pooling improves performance, since {malloc}
should implement a similar pooling mechanism, but better optimized for
multithreaded uses.

Feel free to revert if this introduces significant regressions.

R=verwaest@chromium.org

Bug: v8:8916
Cq-Include-Trybots: luci.v8.try:v8_linux64_tsan_rel
Change-Id: Iaf988bed898e35700f5f7f3310df8e01918de4c9
Reviewed-on: https://chromium-review.googlesource.com/c/1491632
Commit-Queue: Clemens Hammacher <clemensh@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Reviewed-by: 's avatarToon Verwaest <verwaest@chromium.org>
Cr-Commit-Position: refs/heads/master@{#59959}
parent 803ad324
......@@ -6562,8 +6562,14 @@ class V8_EXPORT ResourceConstraints {
void set_code_range_size(size_t limit_in_mb) {
code_range_size_ = limit_in_mb;
}
size_t max_zone_pool_size() const { return max_zone_pool_size_; }
void set_max_zone_pool_size(size_t bytes) { max_zone_pool_size_ = bytes; }
V8_DEPRECATE_SOON("Zone does not pool memory any more.",
size_t max_zone_pool_size() const) {
return max_zone_pool_size_;
}
V8_DEPRECATE_SOON("Zone does not pool memory any more.",
void set_max_zone_pool_size(size_t bytes)) {
max_zone_pool_size_ = bytes;
}
private:
// max_semi_space_size_ is in KB
......
......@@ -980,7 +980,6 @@ void ResourceConstraints::ConfigureDefaults(uint64_t physical_memory,
set_max_semi_space_size_in_kb(
i::Heap::ComputeMaxSemiSpaceSize(physical_memory));
set_max_old_space_size(i::Heap::ComputeMaxOldGenerationSize(physical_memory));
set_max_zone_pool_size(i::AccountingAllocator::kMaxPoolSize);
if (virtual_memory_limit > 0 && i::kRequiresCodeRange) {
// Reserve no more than 1/8 of the memory for the code range, but at most
......@@ -996,12 +995,10 @@ void SetResourceConstraints(i::Isolate* isolate,
size_t semi_space_size = constraints.max_semi_space_size_in_kb();
size_t old_space_size = constraints.max_old_space_size();
size_t code_range_size = constraints.code_range_size();
size_t max_pool_size = constraints.max_zone_pool_size();
if (semi_space_size != 0 || old_space_size != 0 || code_range_size != 0) {
isolate->heap()->ConfigureHeap(semi_space_size, old_space_size,
code_range_size);
}
isolate->allocator()->ConfigureSegmentPool(max_pool_size);
if (constraints.stack_limit() != nullptr) {
uintptr_t limit = reinterpret_cast<uintptr_t>(constraints.stack_limit());
......@@ -8692,7 +8689,6 @@ void Isolate::MemoryPressureNotification(MemoryPressureLevel level) {
? isolate->thread_manager()->IsLockedByCurrentThread()
: i::ThreadId::Current().Equals(isolate->thread_id());
isolate->heap()->MemoryPressureNotification(level, on_isolate_thread);
isolate->allocator()->MemoryPressureNotification(level);
}
void Isolate::EnableMemorySavingsMode() {
......
......@@ -2701,27 +2701,17 @@ void Isolate::ThreadDataTable::RemoveAllThreads() {
class VerboseAccountingAllocator : public AccountingAllocator {
public:
VerboseAccountingAllocator(Heap* heap, size_t allocation_sample_bytes,
size_t pool_sample_bytes)
: heap_(heap),
last_memory_usage_(0),
last_pool_size_(0),
nesting_deepth_(0),
allocation_sample_bytes_(allocation_sample_bytes),
pool_sample_bytes_(pool_sample_bytes) {}
v8::internal::Segment* GetSegment(size_t size) override {
v8::internal::Segment* memory = AccountingAllocator::GetSegment(size);
if (memory) {
size_t malloced_current = GetCurrentMemoryUsage();
size_t pooled_current = GetCurrentPoolSize();
if (last_memory_usage_ + allocation_sample_bytes_ < malloced_current ||
last_pool_size_ + pool_sample_bytes_ < pooled_current) {
PrintMemoryJSON(malloced_current, pooled_current);
last_memory_usage_ = malloced_current;
last_pool_size_ = pooled_current;
}
VerboseAccountingAllocator(Heap* heap, size_t allocation_sample_bytes)
: heap_(heap), allocation_sample_bytes_(allocation_sample_bytes) {}
v8::internal::Segment* AllocateSegment(size_t size) override {
v8::internal::Segment* memory = AccountingAllocator::AllocateSegment(size);
if (!memory) return nullptr;
size_t malloced_current = GetCurrentMemoryUsage();
if (last_memory_usage_ + allocation_sample_bytes_ < malloced_current) {
PrintMemoryJSON(malloced_current);
last_memory_usage_ = malloced_current;
}
return memory;
}
......@@ -2729,13 +2719,10 @@ class VerboseAccountingAllocator : public AccountingAllocator {
void ReturnSegment(v8::internal::Segment* memory) override {
AccountingAllocator::ReturnSegment(memory);
size_t malloced_current = GetCurrentMemoryUsage();
size_t pooled_current = GetCurrentPoolSize();
if (malloced_current + allocation_sample_bytes_ < last_memory_usage_ ||
pooled_current + pool_sample_bytes_ < last_pool_size_) {
PrintMemoryJSON(malloced_current, pooled_current);
if (malloced_current + allocation_sample_bytes_ < last_memory_usage_) {
PrintMemoryJSON(malloced_current);
last_memory_usage_ = malloced_current;
last_pool_size_ = pooled_current;
}
}
......@@ -2767,7 +2754,7 @@ class VerboseAccountingAllocator : public AccountingAllocator {
zone->allocation_size(), nesting_deepth_.load());
}
void PrintMemoryJSON(size_t malloced, size_t pooled) {
void PrintMemoryJSON(size_t malloced) {
// Note: Neither isolate, nor heap is locked, so be careful with accesses
// as the allocator is potentially used on a concurrent thread.
double time = heap_->isolate()->time_millis_since_init();
......@@ -2776,17 +2763,14 @@ class VerboseAccountingAllocator : public AccountingAllocator {
"\"type\": \"zone\", "
"\"isolate\": \"%p\", "
"\"time\": %f, "
"\"allocated\": %" PRIuS
","
"\"pooled\": %" PRIuS "}\n",
reinterpret_cast<void*>(heap_->isolate()), time, malloced, pooled);
"\"allocated\": %" PRIuS "}\n",
reinterpret_cast<void*>(heap_->isolate()), time, malloced);
}
Heap* heap_;
std::atomic<size_t> last_memory_usage_;
std::atomic<size_t> last_pool_size_;
std::atomic<size_t> nesting_deepth_;
size_t allocation_sample_bytes_, pool_sample_bytes_;
std::atomic<size_t> last_memory_usage_{0};
std::atomic<size_t> nesting_deepth_{0};
size_t allocation_sample_bytes_;
};
#ifdef DEBUG
......@@ -2854,9 +2838,9 @@ Isolate::Isolate(std::unique_ptr<i::IsolateAllocator> isolate_allocator)
: isolate_allocator_(std::move(isolate_allocator)),
id_(isolate_counter.fetch_add(1, std::memory_order_relaxed)),
stack_guard_(this),
allocator_(FLAG_trace_zone_stats ? new VerboseAccountingAllocator(
&heap_, 256 * KB, 128 * KB)
: new AccountingAllocator()),
allocator_(FLAG_trace_zone_stats
? new VerboseAccountingAllocator(&heap_, 256 * KB)
: new AccountingAllocator()),
builtins_(this),
rail_mode_(PERFORMANCE_ANIMATION),
code_event_dispatcher_(new CodeEventDispatcher()),
......
......@@ -17,67 +17,7 @@
namespace v8 {
namespace internal {
AccountingAllocator::AccountingAllocator() : unused_segments_mutex_() {
static const size_t kDefaultBucketMaxSize = 5;
std::fill(unused_segments_heads_, unused_segments_heads_ + kNumberBuckets,
nullptr);
std::fill(unused_segments_sizes_, unused_segments_sizes_ + kNumberBuckets, 0);
std::fill(unused_segments_max_sizes_,
unused_segments_max_sizes_ + kNumberBuckets, kDefaultBucketMaxSize);
}
AccountingAllocator::~AccountingAllocator() { ClearPool(); }
void AccountingAllocator::MemoryPressureNotification(
MemoryPressureLevel level) {
memory_pressure_level_.store(level);
if (level != MemoryPressureLevel::kNone) {
ClearPool();
}
}
void AccountingAllocator::ConfigureSegmentPool(const size_t max_pool_size) {
// The sum of the bytes of one segment of each size.
static const size_t full_size = (size_t(1) << (kMaxSegmentSizePower + 1)) -
(size_t(1) << kMinSegmentSizePower);
size_t fits_fully = max_pool_size / full_size;
base::MutexGuard lock_guard(&unused_segments_mutex_);
// We assume few zones (less than 'fits_fully' many) to be active at the same
// time. When zones grow regularly, they will keep requesting segments of
// increasing size each time. Therefore we try to get as many segments with an
// equal number of segments of each size as possible.
// The remaining space is used to make more room for an 'incomplete set' of
// segments beginning with the smaller ones.
// This code will work best if the max_pool_size is a multiple of the
// full_size. If max_pool_size is no sum of segment sizes the actual pool
// size might be smaller then max_pool_size. Note that no actual memory gets
// wasted though.
// TODO(heimbuef): Determine better strategy generating a segment sizes
// distribution that is closer to real/benchmark usecases and uses the given
// max_pool_size more efficiently.
size_t total_size = fits_fully * full_size;
for (size_t power = 0; power < kNumberBuckets; ++power) {
if (total_size + (size_t(1) << (power + kMinSegmentSizePower)) <=
max_pool_size) {
unused_segments_max_sizes_[power] = fits_fully + 1;
total_size += size_t(1) << power;
} else {
unused_segments_max_sizes_[power] = fits_fully;
}
}
}
Segment* AccountingAllocator::GetSegment(size_t bytes) {
Segment* result = GetSegmentFromPool(bytes);
if (result == nullptr) result = AllocateSegment(bytes);
return result;
}
AccountingAllocator::~AccountingAllocator() = default;
Segment* AccountingAllocator::AllocateSegment(size_t bytes) {
void* memory = AllocWithRetry(bytes);
......@@ -96,103 +36,10 @@ Segment* AccountingAllocator::AllocateSegment(size_t bytes) {
void AccountingAllocator::ReturnSegment(Segment* segment) {
segment->ZapContents();
if (memory_pressure_level_.load() != MemoryPressureLevel::kNone) {
FreeSegment(segment);
} else if (!AddSegmentToPool(segment)) {
FreeSegment(segment);
}
}
void AccountingAllocator::FreeSegment(Segment* memory) {
current_memory_usage_.fetch_sub(memory->total_size(),
current_memory_usage_.fetch_sub(segment->total_size(),
std::memory_order_relaxed);
memory->ZapHeader();
free(memory);
}
Segment* AccountingAllocator::GetSegmentFromPool(size_t requested_size) {
if (requested_size > (1 << kMaxSegmentSizePower)) {
return nullptr;
}
size_t power = kMinSegmentSizePower;
while (requested_size > (static_cast<size_t>(1) << power)) power++;
DCHECK_GE(power, kMinSegmentSizePower + 0);
power -= kMinSegmentSizePower;
Segment* segment;
{
base::MutexGuard lock_guard(&unused_segments_mutex_);
segment = unused_segments_heads_[power];
if (segment == nullptr) return nullptr;
unused_segments_heads_[power] = segment->next();
segment->set_next(nullptr);
unused_segments_sizes_[power]--;
}
current_pool_size_.fetch_sub(segment->total_size(),
std::memory_order_relaxed);
ASAN_UNPOISON_MEMORY_REGION(reinterpret_cast<void*>(segment->start()),
segment->capacity());
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(segment->start(), segment->capacity());
DCHECK_GE(segment->total_size(), requested_size);
return segment;
}
bool AccountingAllocator::AddSegmentToPool(Segment* segment) {
size_t size = segment->total_size();
if (size >= (1 << (kMaxSegmentSizePower + 1))) return false;
if (size < (1 << kMinSegmentSizePower)) return false;
size_t power = kMaxSegmentSizePower;
while (size < (static_cast<size_t>(1) << power)) power--;
DCHECK_GE(power, kMinSegmentSizePower + 0);
power -= kMinSegmentSizePower;
{
base::MutexGuard lock_guard(&unused_segments_mutex_);
if (unused_segments_sizes_[power] >= unused_segments_max_sizes_[power]) {
return false;
}
segment->set_next(unused_segments_heads_[power]);
unused_segments_heads_[power] = segment;
unused_segments_sizes_[power]++;
// Poisoning needs to happen while still holding the mutex to guarantee that
// it happens before the segment is taken from the pool again.
ASAN_POISON_MEMORY_REGION(reinterpret_cast<void*>(segment->start()),
segment->capacity());
}
current_pool_size_.fetch_add(size, std::memory_order_relaxed);
return true;
}
void AccountingAllocator::ClearPool() {
base::MutexGuard lock_guard(&unused_segments_mutex_);
for (size_t power = 0; power <= kMaxSegmentSizePower - kMinSegmentSizePower;
power++) {
Segment* current = unused_segments_heads_[power];
while (current) {
Segment* next = current->next();
FreeSegment(current);
current = next;
}
unused_segments_heads_[power] = nullptr;
}
segment->ZapHeader();
free(segment);
}
} // namespace internal
......
......@@ -10,8 +10,6 @@
#include "include/v8-platform.h"
#include "include/v8.h"
#include "src/base/macros.h"
#include "src/base/platform/mutex.h"
#include "src/base/platform/semaphore.h"
#include "src/base/platform/time.h"
#include "src/zone/zone-segment.h"
#include "testing/gtest/include/gtest/gtest_prod.h" // nogncheck
......@@ -21,13 +19,12 @@ namespace internal {
class V8_EXPORT_PRIVATE AccountingAllocator {
public:
static const size_t kMaxPoolSize = 8ul * KB;
AccountingAllocator();
AccountingAllocator() = default;
virtual ~AccountingAllocator();
// Gets an empty segment from the pool or creates a new one.
virtual Segment* GetSegment(size_t bytes);
// Allocates a new segment. Returns nullptr on failed allocation.
virtual Segment* AllocateSegment(size_t bytes);
// Return unneeded segments to either insert them into the pool or release
// them if the pool is already full or memory pressure is high.
virtual void ReturnSegment(Segment* memory);
......@@ -40,56 +37,12 @@ class V8_EXPORT_PRIVATE AccountingAllocator {
return max_memory_usage_.load(std::memory_order_relaxed);
}
size_t GetCurrentPoolSize() const {
return current_pool_size_.load(std::memory_order_relaxed);
}
void MemoryPressureNotification(MemoryPressureLevel level);
// Configures the zone segment pool size limits so the pool does not
// grow bigger than max_pool_size.
// TODO(heimbuef): Do not accept segments to pool that are larger than
// their size class requires. Sometimes the zones generate weird segments.
void ConfigureSegmentPool(const size_t max_pool_size);
virtual void ZoneCreation(const Zone* zone) {}
virtual void ZoneDestruction(const Zone* zone) {}
private:
FRIEND_TEST(Zone, SegmentPoolConstraints);
static const size_t kMinSegmentSizePower = 13;
static const size_t kMaxSegmentSizePower = 18;
STATIC_ASSERT(kMinSegmentSizePower <= kMaxSegmentSizePower);
static const size_t kNumberBuckets =
1 + kMaxSegmentSizePower - kMinSegmentSizePower;
// Allocates a new segment. Returns nullptr on failed allocation.
Segment* AllocateSegment(size_t bytes);
void FreeSegment(Segment* memory);
// Returns a segment from the pool of at least the requested size.
Segment* GetSegmentFromPool(size_t requested_size);
// Trys to add a segment to the pool. Returns false if the pool is full.
bool AddSegmentToPool(Segment* segment);
// Empties the pool and puts all its contents onto the garbage stack.
void ClearPool();
Segment* unused_segments_heads_[kNumberBuckets];
size_t unused_segments_sizes_[kNumberBuckets];
size_t unused_segments_max_sizes_[kNumberBuckets];
base::Mutex unused_segments_mutex_;
std::atomic<size_t> current_memory_usage_{0};
std::atomic<size_t> max_memory_usage_{0};
std::atomic<size_t> current_pool_size_{0};
std::atomic<MemoryPressureLevel> memory_pressure_level_{
MemoryPressureLevel::kNone};
DISALLOW_COPY_AND_ASSIGN(AccountingAllocator);
};
......
......@@ -101,17 +101,16 @@ void Zone::DeleteAll() {
segment_head_ = nullptr;
}
// Creates a new segment, sets it size, and pushes it to the front
// Creates a new segment, sets its size, and pushes it to the front
// of the segment chain. Returns the new segment.
Segment* Zone::NewSegment(size_t requested_size) {
Segment* result = allocator_->GetSegment(requested_size);
if (result != nullptr) {
DCHECK_GE(result->total_size(), requested_size);
segment_bytes_allocated_ += result->total_size();
result->set_zone(this);
result->set_next(segment_head_);
segment_head_ = result;
}
Segment* result = allocator_->AllocateSegment(requested_size);
if (!result) return nullptr;
DCHECK_GE(result->total_size(), requested_size);
segment_bytes_allocated_ += result->total_size();
result->set_zone(this);
result->set_next(segment_head_);
segment_head_ = result;
return result;
}
......
......@@ -99,7 +99,8 @@ TEST(AccountingAllocatorOOM) {
AllocationPlatform platform;
v8::internal::AccountingAllocator allocator;
CHECK(!platform.oom_callback_called);
v8::internal::Segment* result = allocator.GetSegment(GetHugeMemoryAmount());
v8::internal::Segment* result =
allocator.AllocateSegment(GetHugeMemoryAmount());
// On a few systems, allocation somehow succeeds.
CHECK_EQ(result == nullptr, platform.oom_callback_called);
}
......
......@@ -214,7 +214,6 @@ v8_source_set("unittests_sources") {
"wasm/wasm-macro-gen-unittest.cc",
"wasm/wasm-module-builder-unittest.cc",
"wasm/wasm-opcodes-unittest.cc",
"zone/segmentpool-unittest.cc",
"zone/zone-allocator-unittest.cc",
"zone/zone-chunk-list-unittest.cc",
"zone/zone-unittest.cc",
......
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/zone/accounting-allocator.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace v8 {
namespace internal {
TEST(Zone, SegmentPoolConstraints) {
size_t sizes[]{
0, // Corner case
AccountingAllocator::kMaxPoolSize,
GB // Something really large
};
AccountingAllocator allocator;
for (size_t size : sizes) {
allocator.ConfigureSegmentPool(size);
size_t total_size = 0;
for (size_t power = 0; power < AccountingAllocator::kNumberBuckets;
++power) {
total_size +=
allocator.unused_segments_max_sizes_[power] * (size_t(1) << power);
}
EXPECT_LE(total_size, size);
}
}
} // namespace internal
} // namespace v8
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment