Commit 3f8b0316 authored by Clemens Hammacher's avatar Clemens Hammacher Committed by Commit Bot

[zone] Remove segment pooling from accounting allocator

This pooling introduces severe lock contention for Liftoff compilation,
since each compilation uses its own Zone which does at least one
segment allocation.
It's also unclear whether pooling improves performance, since {malloc}
should implement a similar pooling mechanism, but better optimized for
multithreaded uses.

Feel free to revert if this introduces significant regressions.

R=verwaest@chromium.org

Bug: v8:8916
Cq-Include-Trybots: luci.v8.try:v8_linux64_tsan_rel
Change-Id: Iaf988bed898e35700f5f7f3310df8e01918de4c9
Reviewed-on: https://chromium-review.googlesource.com/c/1491632
Commit-Queue: Clemens Hammacher <clemensh@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Reviewed-by: 's avatarToon Verwaest <verwaest@chromium.org>
Cr-Commit-Position: refs/heads/master@{#59959}
parent 803ad324
...@@ -6562,8 +6562,14 @@ class V8_EXPORT ResourceConstraints { ...@@ -6562,8 +6562,14 @@ class V8_EXPORT ResourceConstraints {
void set_code_range_size(size_t limit_in_mb) { void set_code_range_size(size_t limit_in_mb) {
code_range_size_ = limit_in_mb; code_range_size_ = limit_in_mb;
} }
size_t max_zone_pool_size() const { return max_zone_pool_size_; } V8_DEPRECATE_SOON("Zone does not pool memory any more.",
void set_max_zone_pool_size(size_t bytes) { max_zone_pool_size_ = bytes; } size_t max_zone_pool_size() const) {
return max_zone_pool_size_;
}
V8_DEPRECATE_SOON("Zone does not pool memory any more.",
void set_max_zone_pool_size(size_t bytes)) {
max_zone_pool_size_ = bytes;
}
private: private:
// max_semi_space_size_ is in KB // max_semi_space_size_ is in KB
......
...@@ -980,7 +980,6 @@ void ResourceConstraints::ConfigureDefaults(uint64_t physical_memory, ...@@ -980,7 +980,6 @@ void ResourceConstraints::ConfigureDefaults(uint64_t physical_memory,
set_max_semi_space_size_in_kb( set_max_semi_space_size_in_kb(
i::Heap::ComputeMaxSemiSpaceSize(physical_memory)); i::Heap::ComputeMaxSemiSpaceSize(physical_memory));
set_max_old_space_size(i::Heap::ComputeMaxOldGenerationSize(physical_memory)); set_max_old_space_size(i::Heap::ComputeMaxOldGenerationSize(physical_memory));
set_max_zone_pool_size(i::AccountingAllocator::kMaxPoolSize);
if (virtual_memory_limit > 0 && i::kRequiresCodeRange) { if (virtual_memory_limit > 0 && i::kRequiresCodeRange) {
// Reserve no more than 1/8 of the memory for the code range, but at most // Reserve no more than 1/8 of the memory for the code range, but at most
...@@ -996,12 +995,10 @@ void SetResourceConstraints(i::Isolate* isolate, ...@@ -996,12 +995,10 @@ void SetResourceConstraints(i::Isolate* isolate,
size_t semi_space_size = constraints.max_semi_space_size_in_kb(); size_t semi_space_size = constraints.max_semi_space_size_in_kb();
size_t old_space_size = constraints.max_old_space_size(); size_t old_space_size = constraints.max_old_space_size();
size_t code_range_size = constraints.code_range_size(); size_t code_range_size = constraints.code_range_size();
size_t max_pool_size = constraints.max_zone_pool_size();
if (semi_space_size != 0 || old_space_size != 0 || code_range_size != 0) { if (semi_space_size != 0 || old_space_size != 0 || code_range_size != 0) {
isolate->heap()->ConfigureHeap(semi_space_size, old_space_size, isolate->heap()->ConfigureHeap(semi_space_size, old_space_size,
code_range_size); code_range_size);
} }
isolate->allocator()->ConfigureSegmentPool(max_pool_size);
if (constraints.stack_limit() != nullptr) { if (constraints.stack_limit() != nullptr) {
uintptr_t limit = reinterpret_cast<uintptr_t>(constraints.stack_limit()); uintptr_t limit = reinterpret_cast<uintptr_t>(constraints.stack_limit());
...@@ -8692,7 +8689,6 @@ void Isolate::MemoryPressureNotification(MemoryPressureLevel level) { ...@@ -8692,7 +8689,6 @@ void Isolate::MemoryPressureNotification(MemoryPressureLevel level) {
? isolate->thread_manager()->IsLockedByCurrentThread() ? isolate->thread_manager()->IsLockedByCurrentThread()
: i::ThreadId::Current().Equals(isolate->thread_id()); : i::ThreadId::Current().Equals(isolate->thread_id());
isolate->heap()->MemoryPressureNotification(level, on_isolate_thread); isolate->heap()->MemoryPressureNotification(level, on_isolate_thread);
isolate->allocator()->MemoryPressureNotification(level);
} }
void Isolate::EnableMemorySavingsMode() { void Isolate::EnableMemorySavingsMode() {
......
...@@ -2701,27 +2701,17 @@ void Isolate::ThreadDataTable::RemoveAllThreads() { ...@@ -2701,27 +2701,17 @@ void Isolate::ThreadDataTable::RemoveAllThreads() {
class VerboseAccountingAllocator : public AccountingAllocator { class VerboseAccountingAllocator : public AccountingAllocator {
public: public:
VerboseAccountingAllocator(Heap* heap, size_t allocation_sample_bytes, VerboseAccountingAllocator(Heap* heap, size_t allocation_sample_bytes)
size_t pool_sample_bytes) : heap_(heap), allocation_sample_bytes_(allocation_sample_bytes) {}
: heap_(heap),
last_memory_usage_(0), v8::internal::Segment* AllocateSegment(size_t size) override {
last_pool_size_(0), v8::internal::Segment* memory = AccountingAllocator::AllocateSegment(size);
nesting_deepth_(0), if (!memory) return nullptr;
allocation_sample_bytes_(allocation_sample_bytes), size_t malloced_current = GetCurrentMemoryUsage();
pool_sample_bytes_(pool_sample_bytes) {}
if (last_memory_usage_ + allocation_sample_bytes_ < malloced_current) {
v8::internal::Segment* GetSegment(size_t size) override { PrintMemoryJSON(malloced_current);
v8::internal::Segment* memory = AccountingAllocator::GetSegment(size); last_memory_usage_ = malloced_current;
if (memory) {
size_t malloced_current = GetCurrentMemoryUsage();
size_t pooled_current = GetCurrentPoolSize();
if (last_memory_usage_ + allocation_sample_bytes_ < malloced_current ||
last_pool_size_ + pool_sample_bytes_ < pooled_current) {
PrintMemoryJSON(malloced_current, pooled_current);
last_memory_usage_ = malloced_current;
last_pool_size_ = pooled_current;
}
} }
return memory; return memory;
} }
...@@ -2729,13 +2719,10 @@ class VerboseAccountingAllocator : public AccountingAllocator { ...@@ -2729,13 +2719,10 @@ class VerboseAccountingAllocator : public AccountingAllocator {
void ReturnSegment(v8::internal::Segment* memory) override { void ReturnSegment(v8::internal::Segment* memory) override {
AccountingAllocator::ReturnSegment(memory); AccountingAllocator::ReturnSegment(memory);
size_t malloced_current = GetCurrentMemoryUsage(); size_t malloced_current = GetCurrentMemoryUsage();
size_t pooled_current = GetCurrentPoolSize();
if (malloced_current + allocation_sample_bytes_ < last_memory_usage_ || if (malloced_current + allocation_sample_bytes_ < last_memory_usage_) {
pooled_current + pool_sample_bytes_ < last_pool_size_) { PrintMemoryJSON(malloced_current);
PrintMemoryJSON(malloced_current, pooled_current);
last_memory_usage_ = malloced_current; last_memory_usage_ = malloced_current;
last_pool_size_ = pooled_current;
} }
} }
...@@ -2767,7 +2754,7 @@ class VerboseAccountingAllocator : public AccountingAllocator { ...@@ -2767,7 +2754,7 @@ class VerboseAccountingAllocator : public AccountingAllocator {
zone->allocation_size(), nesting_deepth_.load()); zone->allocation_size(), nesting_deepth_.load());
} }
void PrintMemoryJSON(size_t malloced, size_t pooled) { void PrintMemoryJSON(size_t malloced) {
// Note: Neither isolate, nor heap is locked, so be careful with accesses // Note: Neither isolate, nor heap is locked, so be careful with accesses
// as the allocator is potentially used on a concurrent thread. // as the allocator is potentially used on a concurrent thread.
double time = heap_->isolate()->time_millis_since_init(); double time = heap_->isolate()->time_millis_since_init();
...@@ -2776,17 +2763,14 @@ class VerboseAccountingAllocator : public AccountingAllocator { ...@@ -2776,17 +2763,14 @@ class VerboseAccountingAllocator : public AccountingAllocator {
"\"type\": \"zone\", " "\"type\": \"zone\", "
"\"isolate\": \"%p\", " "\"isolate\": \"%p\", "
"\"time\": %f, " "\"time\": %f, "
"\"allocated\": %" PRIuS "\"allocated\": %" PRIuS "}\n",
"," reinterpret_cast<void*>(heap_->isolate()), time, malloced);
"\"pooled\": %" PRIuS "}\n",
reinterpret_cast<void*>(heap_->isolate()), time, malloced, pooled);
} }
Heap* heap_; Heap* heap_;
std::atomic<size_t> last_memory_usage_; std::atomic<size_t> last_memory_usage_{0};
std::atomic<size_t> last_pool_size_; std::atomic<size_t> nesting_deepth_{0};
std::atomic<size_t> nesting_deepth_; size_t allocation_sample_bytes_;
size_t allocation_sample_bytes_, pool_sample_bytes_;
}; };
#ifdef DEBUG #ifdef DEBUG
...@@ -2854,9 +2838,9 @@ Isolate::Isolate(std::unique_ptr<i::IsolateAllocator> isolate_allocator) ...@@ -2854,9 +2838,9 @@ Isolate::Isolate(std::unique_ptr<i::IsolateAllocator> isolate_allocator)
: isolate_allocator_(std::move(isolate_allocator)), : isolate_allocator_(std::move(isolate_allocator)),
id_(isolate_counter.fetch_add(1, std::memory_order_relaxed)), id_(isolate_counter.fetch_add(1, std::memory_order_relaxed)),
stack_guard_(this), stack_guard_(this),
allocator_(FLAG_trace_zone_stats ? new VerboseAccountingAllocator( allocator_(FLAG_trace_zone_stats
&heap_, 256 * KB, 128 * KB) ? new VerboseAccountingAllocator(&heap_, 256 * KB)
: new AccountingAllocator()), : new AccountingAllocator()),
builtins_(this), builtins_(this),
rail_mode_(PERFORMANCE_ANIMATION), rail_mode_(PERFORMANCE_ANIMATION),
code_event_dispatcher_(new CodeEventDispatcher()), code_event_dispatcher_(new CodeEventDispatcher()),
......
...@@ -17,67 +17,7 @@ ...@@ -17,67 +17,7 @@
namespace v8 { namespace v8 {
namespace internal { namespace internal {
AccountingAllocator::AccountingAllocator() : unused_segments_mutex_() { AccountingAllocator::~AccountingAllocator() = default;
static const size_t kDefaultBucketMaxSize = 5;
std::fill(unused_segments_heads_, unused_segments_heads_ + kNumberBuckets,
nullptr);
std::fill(unused_segments_sizes_, unused_segments_sizes_ + kNumberBuckets, 0);
std::fill(unused_segments_max_sizes_,
unused_segments_max_sizes_ + kNumberBuckets, kDefaultBucketMaxSize);
}
AccountingAllocator::~AccountingAllocator() { ClearPool(); }
void AccountingAllocator::MemoryPressureNotification(
MemoryPressureLevel level) {
memory_pressure_level_.store(level);
if (level != MemoryPressureLevel::kNone) {
ClearPool();
}
}
void AccountingAllocator::ConfigureSegmentPool(const size_t max_pool_size) {
// The sum of the bytes of one segment of each size.
static const size_t full_size = (size_t(1) << (kMaxSegmentSizePower + 1)) -
(size_t(1) << kMinSegmentSizePower);
size_t fits_fully = max_pool_size / full_size;
base::MutexGuard lock_guard(&unused_segments_mutex_);
// We assume few zones (less than 'fits_fully' many) to be active at the same
// time. When zones grow regularly, they will keep requesting segments of
// increasing size each time. Therefore we try to get as many segments with an
// equal number of segments of each size as possible.
// The remaining space is used to make more room for an 'incomplete set' of
// segments beginning with the smaller ones.
// This code will work best if the max_pool_size is a multiple of the
// full_size. If max_pool_size is no sum of segment sizes the actual pool
// size might be smaller then max_pool_size. Note that no actual memory gets
// wasted though.
// TODO(heimbuef): Determine better strategy generating a segment sizes
// distribution that is closer to real/benchmark usecases and uses the given
// max_pool_size more efficiently.
size_t total_size = fits_fully * full_size;
for (size_t power = 0; power < kNumberBuckets; ++power) {
if (total_size + (size_t(1) << (power + kMinSegmentSizePower)) <=
max_pool_size) {
unused_segments_max_sizes_[power] = fits_fully + 1;
total_size += size_t(1) << power;
} else {
unused_segments_max_sizes_[power] = fits_fully;
}
}
}
Segment* AccountingAllocator::GetSegment(size_t bytes) {
Segment* result = GetSegmentFromPool(bytes);
if (result == nullptr) result = AllocateSegment(bytes);
return result;
}
Segment* AccountingAllocator::AllocateSegment(size_t bytes) { Segment* AccountingAllocator::AllocateSegment(size_t bytes) {
void* memory = AllocWithRetry(bytes); void* memory = AllocWithRetry(bytes);
...@@ -96,103 +36,10 @@ Segment* AccountingAllocator::AllocateSegment(size_t bytes) { ...@@ -96,103 +36,10 @@ Segment* AccountingAllocator::AllocateSegment(size_t bytes) {
void AccountingAllocator::ReturnSegment(Segment* segment) { void AccountingAllocator::ReturnSegment(Segment* segment) {
segment->ZapContents(); segment->ZapContents();
current_memory_usage_.fetch_sub(segment->total_size(),
if (memory_pressure_level_.load() != MemoryPressureLevel::kNone) {
FreeSegment(segment);
} else if (!AddSegmentToPool(segment)) {
FreeSegment(segment);
}
}
void AccountingAllocator::FreeSegment(Segment* memory) {
current_memory_usage_.fetch_sub(memory->total_size(),
std::memory_order_relaxed); std::memory_order_relaxed);
memory->ZapHeader(); segment->ZapHeader();
free(memory); free(segment);
}
Segment* AccountingAllocator::GetSegmentFromPool(size_t requested_size) {
if (requested_size > (1 << kMaxSegmentSizePower)) {
return nullptr;
}
size_t power = kMinSegmentSizePower;
while (requested_size > (static_cast<size_t>(1) << power)) power++;
DCHECK_GE(power, kMinSegmentSizePower + 0);
power -= kMinSegmentSizePower;
Segment* segment;
{
base::MutexGuard lock_guard(&unused_segments_mutex_);
segment = unused_segments_heads_[power];
if (segment == nullptr) return nullptr;
unused_segments_heads_[power] = segment->next();
segment->set_next(nullptr);
unused_segments_sizes_[power]--;
}
current_pool_size_.fetch_sub(segment->total_size(),
std::memory_order_relaxed);
ASAN_UNPOISON_MEMORY_REGION(reinterpret_cast<void*>(segment->start()),
segment->capacity());
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(segment->start(), segment->capacity());
DCHECK_GE(segment->total_size(), requested_size);
return segment;
}
bool AccountingAllocator::AddSegmentToPool(Segment* segment) {
size_t size = segment->total_size();
if (size >= (1 << (kMaxSegmentSizePower + 1))) return false;
if (size < (1 << kMinSegmentSizePower)) return false;
size_t power = kMaxSegmentSizePower;
while (size < (static_cast<size_t>(1) << power)) power--;
DCHECK_GE(power, kMinSegmentSizePower + 0);
power -= kMinSegmentSizePower;
{
base::MutexGuard lock_guard(&unused_segments_mutex_);
if (unused_segments_sizes_[power] >= unused_segments_max_sizes_[power]) {
return false;
}
segment->set_next(unused_segments_heads_[power]);
unused_segments_heads_[power] = segment;
unused_segments_sizes_[power]++;
// Poisoning needs to happen while still holding the mutex to guarantee that
// it happens before the segment is taken from the pool again.
ASAN_POISON_MEMORY_REGION(reinterpret_cast<void*>(segment->start()),
segment->capacity());
}
current_pool_size_.fetch_add(size, std::memory_order_relaxed);
return true;
}
void AccountingAllocator::ClearPool() {
base::MutexGuard lock_guard(&unused_segments_mutex_);
for (size_t power = 0; power <= kMaxSegmentSizePower - kMinSegmentSizePower;
power++) {
Segment* current = unused_segments_heads_[power];
while (current) {
Segment* next = current->next();
FreeSegment(current);
current = next;
}
unused_segments_heads_[power] = nullptr;
}
} }
} // namespace internal } // namespace internal
......
...@@ -10,8 +10,6 @@ ...@@ -10,8 +10,6 @@
#include "include/v8-platform.h" #include "include/v8-platform.h"
#include "include/v8.h" #include "include/v8.h"
#include "src/base/macros.h" #include "src/base/macros.h"
#include "src/base/platform/mutex.h"
#include "src/base/platform/semaphore.h"
#include "src/base/platform/time.h" #include "src/base/platform/time.h"
#include "src/zone/zone-segment.h" #include "src/zone/zone-segment.h"
#include "testing/gtest/include/gtest/gtest_prod.h" // nogncheck #include "testing/gtest/include/gtest/gtest_prod.h" // nogncheck
...@@ -21,13 +19,12 @@ namespace internal { ...@@ -21,13 +19,12 @@ namespace internal {
class V8_EXPORT_PRIVATE AccountingAllocator { class V8_EXPORT_PRIVATE AccountingAllocator {
public: public:
static const size_t kMaxPoolSize = 8ul * KB; AccountingAllocator() = default;
AccountingAllocator();
virtual ~AccountingAllocator(); virtual ~AccountingAllocator();
// Gets an empty segment from the pool or creates a new one. // Allocates a new segment. Returns nullptr on failed allocation.
virtual Segment* GetSegment(size_t bytes); virtual Segment* AllocateSegment(size_t bytes);
// Return unneeded segments to either insert them into the pool or release // Return unneeded segments to either insert them into the pool or release
// them if the pool is already full or memory pressure is high. // them if the pool is already full or memory pressure is high.
virtual void ReturnSegment(Segment* memory); virtual void ReturnSegment(Segment* memory);
...@@ -40,56 +37,12 @@ class V8_EXPORT_PRIVATE AccountingAllocator { ...@@ -40,56 +37,12 @@ class V8_EXPORT_PRIVATE AccountingAllocator {
return max_memory_usage_.load(std::memory_order_relaxed); return max_memory_usage_.load(std::memory_order_relaxed);
} }
size_t GetCurrentPoolSize() const {
return current_pool_size_.load(std::memory_order_relaxed);
}
void MemoryPressureNotification(MemoryPressureLevel level);
// Configures the zone segment pool size limits so the pool does not
// grow bigger than max_pool_size.
// TODO(heimbuef): Do not accept segments to pool that are larger than
// their size class requires. Sometimes the zones generate weird segments.
void ConfigureSegmentPool(const size_t max_pool_size);
virtual void ZoneCreation(const Zone* zone) {} virtual void ZoneCreation(const Zone* zone) {}
virtual void ZoneDestruction(const Zone* zone) {} virtual void ZoneDestruction(const Zone* zone) {}
private: private:
FRIEND_TEST(Zone, SegmentPoolConstraints);
static const size_t kMinSegmentSizePower = 13;
static const size_t kMaxSegmentSizePower = 18;
STATIC_ASSERT(kMinSegmentSizePower <= kMaxSegmentSizePower);
static const size_t kNumberBuckets =
1 + kMaxSegmentSizePower - kMinSegmentSizePower;
// Allocates a new segment. Returns nullptr on failed allocation.
Segment* AllocateSegment(size_t bytes);
void FreeSegment(Segment* memory);
// Returns a segment from the pool of at least the requested size.
Segment* GetSegmentFromPool(size_t requested_size);
// Trys to add a segment to the pool. Returns false if the pool is full.
bool AddSegmentToPool(Segment* segment);
// Empties the pool and puts all its contents onto the garbage stack.
void ClearPool();
Segment* unused_segments_heads_[kNumberBuckets];
size_t unused_segments_sizes_[kNumberBuckets];
size_t unused_segments_max_sizes_[kNumberBuckets];
base::Mutex unused_segments_mutex_;
std::atomic<size_t> current_memory_usage_{0}; std::atomic<size_t> current_memory_usage_{0};
std::atomic<size_t> max_memory_usage_{0}; std::atomic<size_t> max_memory_usage_{0};
std::atomic<size_t> current_pool_size_{0};
std::atomic<MemoryPressureLevel> memory_pressure_level_{
MemoryPressureLevel::kNone};
DISALLOW_COPY_AND_ASSIGN(AccountingAllocator); DISALLOW_COPY_AND_ASSIGN(AccountingAllocator);
}; };
......
...@@ -101,17 +101,16 @@ void Zone::DeleteAll() { ...@@ -101,17 +101,16 @@ void Zone::DeleteAll() {
segment_head_ = nullptr; segment_head_ = nullptr;
} }
// Creates a new segment, sets it size, and pushes it to the front // Creates a new segment, sets its size, and pushes it to the front
// of the segment chain. Returns the new segment. // of the segment chain. Returns the new segment.
Segment* Zone::NewSegment(size_t requested_size) { Segment* Zone::NewSegment(size_t requested_size) {
Segment* result = allocator_->GetSegment(requested_size); Segment* result = allocator_->AllocateSegment(requested_size);
if (result != nullptr) { if (!result) return nullptr;
DCHECK_GE(result->total_size(), requested_size); DCHECK_GE(result->total_size(), requested_size);
segment_bytes_allocated_ += result->total_size(); segment_bytes_allocated_ += result->total_size();
result->set_zone(this); result->set_zone(this);
result->set_next(segment_head_); result->set_next(segment_head_);
segment_head_ = result; segment_head_ = result;
}
return result; return result;
} }
......
...@@ -99,7 +99,8 @@ TEST(AccountingAllocatorOOM) { ...@@ -99,7 +99,8 @@ TEST(AccountingAllocatorOOM) {
AllocationPlatform platform; AllocationPlatform platform;
v8::internal::AccountingAllocator allocator; v8::internal::AccountingAllocator allocator;
CHECK(!platform.oom_callback_called); CHECK(!platform.oom_callback_called);
v8::internal::Segment* result = allocator.GetSegment(GetHugeMemoryAmount()); v8::internal::Segment* result =
allocator.AllocateSegment(GetHugeMemoryAmount());
// On a few systems, allocation somehow succeeds. // On a few systems, allocation somehow succeeds.
CHECK_EQ(result == nullptr, platform.oom_callback_called); CHECK_EQ(result == nullptr, platform.oom_callback_called);
} }
......
...@@ -214,7 +214,6 @@ v8_source_set("unittests_sources") { ...@@ -214,7 +214,6 @@ v8_source_set("unittests_sources") {
"wasm/wasm-macro-gen-unittest.cc", "wasm/wasm-macro-gen-unittest.cc",
"wasm/wasm-module-builder-unittest.cc", "wasm/wasm-module-builder-unittest.cc",
"wasm/wasm-opcodes-unittest.cc", "wasm/wasm-opcodes-unittest.cc",
"zone/segmentpool-unittest.cc",
"zone/zone-allocator-unittest.cc", "zone/zone-allocator-unittest.cc",
"zone/zone-chunk-list-unittest.cc", "zone/zone-chunk-list-unittest.cc",
"zone/zone-unittest.cc", "zone/zone-unittest.cc",
......
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/zone/accounting-allocator.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace v8 {
namespace internal {
TEST(Zone, SegmentPoolConstraints) {
size_t sizes[]{
0, // Corner case
AccountingAllocator::kMaxPoolSize,
GB // Something really large
};
AccountingAllocator allocator;
for (size_t size : sizes) {
allocator.ConfigureSegmentPool(size);
size_t total_size = 0;
for (size_t power = 0; power < AccountingAllocator::kNumberBuckets;
++power) {
total_size +=
allocator.unused_segments_max_sizes_[power] * (size_t(1) << power);
}
EXPECT_LE(total_size, size);
}
}
} // namespace internal
} // namespace v8
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment