Commit de20e6d3 authored by Toon Verwaest's avatar Toon Verwaest Committed by Commit Bot

[zone] Revert to previous zone allocation strategies due to severe memory regressions.

Unfortunately the previous strategy was slower but more memory efficient. For now simply revert.

Revert "[zone] Use 32kb instead of 1MB as high zone page size"
Revert "[zone] Get rid of the Zone's segment pool"
Revert "[zone] Further simplify zone expansion, use single default page size"

Bug: chromium:908359
Change-Id: I649542e7e61eef0c14a26ffd21039e8340ab4d04
Reviewed-on: https://chromium-review.googlesource.com/c/1351027Reviewed-by: 's avatarLeszek Swirski <leszeks@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Commit-Queue: Toon Verwaest <verwaest@chromium.org>
Cr-Commit-Position: refs/heads/master@{#57872}
parent a31ffef8
......@@ -945,6 +945,7 @@ void ResourceConstraints::ConfigureDefaults(uint64_t physical_memory,
set_max_semi_space_size_in_kb(
i::Heap::ComputeMaxSemiSpaceSize(physical_memory));
set_max_old_space_size(i::Heap::ComputeMaxOldGenerationSize(physical_memory));
set_max_zone_pool_size(i::AccountingAllocator::kMaxPoolSize);
if (virtual_memory_limit > 0 && i::kRequiresCodeRange) {
// Reserve no more than 1/8 of the memory for the code range, but at most
......@@ -960,10 +961,12 @@ void SetResourceConstraints(i::Isolate* isolate,
size_t semi_space_size = constraints.max_semi_space_size_in_kb();
size_t old_space_size = constraints.max_old_space_size();
size_t code_range_size = constraints.code_range_size();
size_t max_pool_size = constraints.max_zone_pool_size();
if (semi_space_size != 0 || old_space_size != 0 || code_range_size != 0) {
isolate->heap()->ConfigureHeap(semi_space_size, old_space_size,
code_range_size);
}
isolate->allocator()->ConfigureSegmentPool(max_pool_size);
if (constraints.stack_limit() != nullptr) {
uintptr_t limit = reinterpret_cast<uintptr_t>(constraints.stack_limit());
......@@ -8465,8 +8468,8 @@ void Isolate::GetHeapStatistics(HeapStatistics* heap_statistics) {
isolate->wasm_engine()->allocator()->GetCurrentMemoryUsage();
heap_statistics->external_memory_ = isolate->heap()->external_memory();
heap_statistics->peak_malloced_memory_ =
isolate->allocator()->GetPeakMemoryUsage() +
isolate->wasm_engine()->allocator()->GetPeakMemoryUsage();
isolate->allocator()->GetMaxMemoryUsage() +
isolate->wasm_engine()->allocator()->GetMaxMemoryUsage();
heap_statistics->number_of_native_contexts_ = heap->NumberOfNativeContexts();
heap_statistics->number_of_detached_contexts_ =
heap->NumberOfDetachedContexts();
......@@ -8755,6 +8758,7 @@ void Isolate::MemoryPressureNotification(MemoryPressureLevel level) {
? isolate->thread_manager()->IsLockedByCurrentThread()
: i::ThreadId::Current().Equals(isolate->thread_id());
isolate->heap()->MemoryPressureNotification(level, on_isolate_thread);
isolate->allocator()->MemoryPressureNotification(level);
}
void Isolate::EnableMemorySavingsMode() {
......
......@@ -135,7 +135,10 @@ Code BuildWithCodeStubAssemblerJS(Isolate* isolate, int32_t builtin_index,
// to code targets without dereferencing their handles.
CanonicalHandleScope canonical(isolate);
Zone zone(isolate->allocator(), ZONE_NAME);
SegmentSize segment_size = isolate->serializer_enabled()
? SegmentSize::kLarge
: SegmentSize::kDefault;
Zone zone(isolate->allocator(), ZONE_NAME, segment_size);
const int argc_with_recv =
(argc == SharedFunctionInfo::kDontAdaptArgumentsSentinel) ? 0 : argc + 1;
compiler::CodeAssemblerState state(
......@@ -157,7 +160,10 @@ Code BuildWithCodeStubAssemblerCS(Isolate* isolate, int32_t builtin_index,
// Canonicalize handles, so that we can share constant pool entries pointing
// to code targets without dereferencing their handles.
CanonicalHandleScope canonical(isolate);
Zone zone(isolate->allocator(), ZONE_NAME);
SegmentSize segment_size = isolate->serializer_enabled()
? SegmentSize::kLarge
: SegmentSize::kDefault;
Zone zone(isolate->allocator(), ZONE_NAME, segment_size);
// The interface descriptor with given key must be initialized at this point
// and this construction just queries the details from the descriptors table.
CallInterfaceDescriptor descriptor(interface_descriptor);
......
......@@ -4060,7 +4060,7 @@ void Heap::RecordStats(HeapStats* stats, bool take_snapshot) {
memory_allocator()->Size() + memory_allocator()->Available();
*stats->os_error = base::OS::GetLastError();
*stats->malloced_memory = isolate_->allocator()->GetCurrentMemoryUsage();
*stats->malloced_peak_memory = isolate_->allocator()->GetPeakMemoryUsage();
*stats->malloced_peak_memory = isolate_->allocator()->GetMaxMemoryUsage();
if (take_snapshot) {
HeapIterator iterator(this);
for (HeapObject* obj = iterator.next(); obj != nullptr;
......
......@@ -2640,20 +2640,26 @@ void Isolate::ThreadDataTable::RemoveAllThreads() {
class VerboseAccountingAllocator : public AccountingAllocator {
public:
VerboseAccountingAllocator(Heap* heap, size_t allocation_sample_bytes)
VerboseAccountingAllocator(Heap* heap, size_t allocation_sample_bytes,
size_t pool_sample_bytes)
: heap_(heap),
last_memory_usage_(0),
last_pool_size_(0),
nesting_deepth_(0),
allocation_sample_bytes_(allocation_sample_bytes) {}
allocation_sample_bytes_(allocation_sample_bytes),
pool_sample_bytes_(pool_sample_bytes) {}
v8::internal::Segment* GetSegment(size_t size) override {
v8::internal::Segment* memory = AccountingAllocator::GetSegment(size);
if (memory) {
size_t malloced_current = GetCurrentMemoryUsage();
size_t pooled_current = GetCurrentPoolSize();
if (last_memory_usage_ + allocation_sample_bytes_ < malloced_current) {
PrintMemoryJSON(malloced_current);
if (last_memory_usage_ + allocation_sample_bytes_ < malloced_current ||
last_pool_size_ + pool_sample_bytes_ < pooled_current) {
PrintMemoryJSON(malloced_current, pooled_current);
last_memory_usage_ = malloced_current;
last_pool_size_ = pooled_current;
}
}
return memory;
......@@ -2662,10 +2668,13 @@ class VerboseAccountingAllocator : public AccountingAllocator {
void ReturnSegment(v8::internal::Segment* memory) override {
AccountingAllocator::ReturnSegment(memory);
size_t malloced_current = GetCurrentMemoryUsage();
size_t pooled_current = GetCurrentPoolSize();
if (malloced_current + allocation_sample_bytes_ < last_memory_usage_) {
PrintMemoryJSON(malloced_current);
if (malloced_current + allocation_sample_bytes_ < last_memory_usage_ ||
pooled_current + pool_sample_bytes_ < last_pool_size_) {
PrintMemoryJSON(malloced_current, pooled_current);
last_memory_usage_ = malloced_current;
last_pool_size_ = pooled_current;
}
}
......@@ -2697,7 +2706,7 @@ class VerboseAccountingAllocator : public AccountingAllocator {
zone->allocation_size(), nesting_deepth_.load());
}
void PrintMemoryJSON(size_t malloced) {
void PrintMemoryJSON(size_t malloced, size_t pooled) {
// Note: Neither isolate, nor heap is locked, so be careful with accesses
// as the allocator is potentially used on a concurrent thread.
double time = heap_->isolate()->time_millis_since_init();
......@@ -2706,14 +2715,17 @@ class VerboseAccountingAllocator : public AccountingAllocator {
"\"type\": \"zone\", "
"\"isolate\": \"%p\", "
"\"time\": %f, "
"\"allocated\": %" PRIuS "}\n",
reinterpret_cast<void*>(heap_->isolate()), time, malloced);
"\"allocated\": %" PRIuS
","
"\"pooled\": %" PRIuS "}\n",
reinterpret_cast<void*>(heap_->isolate()), time, malloced, pooled);
}
Heap* heap_;
std::atomic<size_t> last_memory_usage_;
std::atomic<size_t> last_pool_size_;
std::atomic<size_t> nesting_deepth_;
size_t allocation_sample_bytes_;
size_t allocation_sample_bytes_, pool_sample_bytes_;
};
#ifdef DEBUG
......@@ -2781,9 +2793,9 @@ Isolate::Isolate(std::unique_ptr<i::IsolateAllocator> isolate_allocator)
: isolate_allocator_(std::move(isolate_allocator)),
id_(base::Relaxed_AtomicIncrement(&isolate_counter_, 1)),
stack_guard_(this),
allocator_(FLAG_trace_zone_stats
? new VerboseAccountingAllocator(&heap_, 256 * KB)
: new AccountingAllocator()),
allocator_(FLAG_trace_zone_stats ? new VerboseAccountingAllocator(
&heap_, 256 * KB, 128 * KB)
: new AccountingAllocator()),
builtins_(this),
rail_mode_(PERFORMANCE_ANIMATION),
code_event_dispatcher_(new CodeEventDispatcher()),
......
......@@ -15,9 +15,71 @@
namespace v8 {
namespace internal {
AccountingAllocator::AccountingAllocator() : unused_segments_mutex_() {
static const size_t kDefaultBucketMaxSize = 5;
memory_pressure_level_.SetValue(MemoryPressureLevel::kNone);
std::fill(unused_segments_heads_, unused_segments_heads_ + kNumberBuckets,
nullptr);
std::fill(unused_segments_sizes_, unused_segments_sizes_ + kNumberBuckets, 0);
std::fill(unused_segments_max_sizes_,
unused_segments_max_sizes_ + kNumberBuckets, kDefaultBucketMaxSize);
}
AccountingAllocator::~AccountingAllocator() { ClearPool(); }
void AccountingAllocator::MemoryPressureNotification(
MemoryPressureLevel level) {
memory_pressure_level_.SetValue(level);
if (level != MemoryPressureLevel::kNone) {
ClearPool();
}
}
void AccountingAllocator::ConfigureSegmentPool(const size_t max_pool_size) {
// The sum of the bytes of one segment of each size.
static const size_t full_size = (size_t(1) << (kMaxSegmentSizePower + 1)) -
(size_t(1) << kMinSegmentSizePower);
size_t fits_fully = max_pool_size / full_size;
base::MutexGuard lock_guard(&unused_segments_mutex_);
// We assume few zones (less than 'fits_fully' many) to be active at the same
// time. When zones grow regularly, they will keep requesting segments of
// increasing size each time. Therefore we try to get as many segments with an
// equal number of segments of each size as possible.
// The remaining space is used to make more room for an 'incomplete set' of
// segments beginning with the smaller ones.
// This code will work best if the max_pool_size is a multiple of the
// full_size. If max_pool_size is no sum of segment sizes the actual pool
// size might be smaller then max_pool_size. Note that no actual memory gets
// wasted though.
// TODO(heimbuef): Determine better strategy generating a segment sizes
// distribution that is closer to real/benchmark usecases and uses the given
// max_pool_size more efficiently.
size_t total_size = fits_fully * full_size;
for (size_t power = 0; power < kNumberBuckets; ++power) {
if (total_size + (size_t(1) << (power + kMinSegmentSizePower)) <=
max_pool_size) {
unused_segments_max_sizes_[power] = fits_fully + 1;
total_size += size_t(1) << power;
} else {
unused_segments_max_sizes_[power] = fits_fully;
}
}
}
Segment* AccountingAllocator::GetSegment(size_t bytes) {
Segment* result = AllocateSegment(bytes);
if (result != nullptr) result->Initialize(bytes);
Segment* result = GetSegmentFromPool(bytes);
if (result == nullptr) {
result = AllocateSegment(bytes);
if (result != nullptr) {
result->Initialize(bytes);
}
}
return result;
}
......@@ -26,9 +88,9 @@ Segment* AccountingAllocator::AllocateSegment(size_t bytes) {
if (memory != nullptr) {
base::AtomicWord current =
base::Relaxed_AtomicIncrement(&current_memory_usage_, bytes);
base::AtomicWord peak = base::Relaxed_Load(&peak_memory_usage_);
while (current > peak) {
peak = base::Relaxed_CompareAndSwap(&peak_memory_usage_, peak, current);
base::AtomicWord max = base::Relaxed_Load(&max_memory_usage_);
while (current > max) {
max = base::Relaxed_CompareAndSwap(&max_memory_usage_, max, current);
}
}
return reinterpret_cast<Segment*>(memory);
......@@ -36,7 +98,12 @@ Segment* AccountingAllocator::AllocateSegment(size_t bytes) {
void AccountingAllocator::ReturnSegment(Segment* segment) {
segment->ZapContents();
FreeSegment(segment);
if (memory_pressure_level_.Value() != MemoryPressureLevel::kNone) {
FreeSegment(segment);
} else if (!AddSegmentToPool(segment)) {
FreeSegment(segment);
}
}
void AccountingAllocator::FreeSegment(Segment* memory) {
......@@ -46,13 +113,95 @@ void AccountingAllocator::FreeSegment(Segment* memory) {
free(memory);
}
size_t AccountingAllocator::GetPeakMemoryUsage() const {
return base::Relaxed_Load(&peak_memory_usage_);
}
size_t AccountingAllocator::GetCurrentMemoryUsage() const {
return base::Relaxed_Load(&current_memory_usage_);
}
size_t AccountingAllocator::GetMaxMemoryUsage() const {
return base::Relaxed_Load(&max_memory_usage_);
}
size_t AccountingAllocator::GetCurrentPoolSize() const {
return base::Relaxed_Load(&current_pool_size_);
}
Segment* AccountingAllocator::GetSegmentFromPool(size_t requested_size) {
if (requested_size > (1 << kMaxSegmentSizePower)) {
return nullptr;
}
size_t power = kMinSegmentSizePower;
while (requested_size > (static_cast<size_t>(1) << power)) power++;
DCHECK_GE(power, kMinSegmentSizePower + 0);
power -= kMinSegmentSizePower;
Segment* segment;
{
base::MutexGuard lock_guard(&unused_segments_mutex_);
segment = unused_segments_heads_[power];
if (segment != nullptr) {
unused_segments_heads_[power] = segment->next();
segment->set_next(nullptr);
unused_segments_sizes_[power]--;
base::Relaxed_AtomicIncrement(
&current_pool_size_, -static_cast<base::AtomicWord>(segment->size()));
}
}
if (segment) {
DCHECK_GE(segment->size(), requested_size);
}
return segment;
}
bool AccountingAllocator::AddSegmentToPool(Segment* segment) {
size_t size = segment->size();
if (size >= (1 << (kMaxSegmentSizePower + 1))) return false;
if (size < (1 << kMinSegmentSizePower)) return false;
size_t power = kMaxSegmentSizePower;
while (size < (static_cast<size_t>(1) << power)) power--;
DCHECK_GE(power, kMinSegmentSizePower + 0);
power -= kMinSegmentSizePower;
{
base::MutexGuard lock_guard(&unused_segments_mutex_);
if (unused_segments_sizes_[power] >= unused_segments_max_sizes_[power]) {
return false;
}
segment->set_next(unused_segments_heads_[power]);
unused_segments_heads_[power] = segment;
base::Relaxed_AtomicIncrement(&current_pool_size_, size);
unused_segments_sizes_[power]++;
}
return true;
}
void AccountingAllocator::ClearPool() {
base::MutexGuard lock_guard(&unused_segments_mutex_);
for (size_t power = 0; power <= kMaxSegmentSizePower - kMinSegmentSizePower;
power++) {
Segment* current = unused_segments_heads_[power];
while (current) {
Segment* next = current->next();
FreeSegment(current);
current = next;
}
unused_segments_heads_[power] = nullptr;
}
}
} // namespace internal
} // namespace v8
......@@ -21,25 +21,67 @@ namespace internal {
class V8_EXPORT_PRIVATE AccountingAllocator {
public:
AccountingAllocator() {}
virtual ~AccountingAllocator() {}
static const size_t kMaxPoolSize = 8ul * KB;
AccountingAllocator();
virtual ~AccountingAllocator();
// Gets an empty segment from the pool or creates a new one.
virtual Segment* GetSegment(size_t bytes);
// Return unneeded segments to either insert them into the pool or release
// them if the pool is already full or memory pressure is high.
virtual void ReturnSegment(Segment* memory);
size_t GetPeakMemoryUsage() const;
size_t GetCurrentMemoryUsage() const;
size_t GetMaxMemoryUsage() const;
size_t GetCurrentPoolSize() const;
void MemoryPressureNotification(MemoryPressureLevel level);
// Configures the zone segment pool size limits so the pool does not
// grow bigger than max_pool_size.
// TODO(heimbuef): Do not accept segments to pool that are larger than
// their size class requires. Sometimes the zones generate weird segments.
void ConfigureSegmentPool(const size_t max_pool_size);
virtual void ZoneCreation(const Zone* zone) {}
virtual void ZoneDestruction(const Zone* zone) {}
private:
FRIEND_TEST(Zone, SegmentPoolConstraints);
static const size_t kMinSegmentSizePower = 13;
static const size_t kMaxSegmentSizePower = 18;
STATIC_ASSERT(kMinSegmentSizePower <= kMaxSegmentSizePower);
static const size_t kNumberBuckets =
1 + kMaxSegmentSizePower - kMinSegmentSizePower;
// Allocates a new segment. Returns nullptr on failed allocation.
Segment* AllocateSegment(size_t bytes);
void FreeSegment(Segment* memory);
base::AtomicWord peak_memory_usage_ = 0;
// Returns a segment from the pool of at least the requested size.
Segment* GetSegmentFromPool(size_t requested_size);
// Trys to add a segment to the pool. Returns false if the pool is full.
bool AddSegmentToPool(Segment* segment);
// Empties the pool and puts all its contents onto the garbage stack.
void ClearPool();
Segment* unused_segments_heads_[kNumberBuckets];
size_t unused_segments_sizes_[kNumberBuckets];
size_t unused_segments_max_sizes_[kNumberBuckets];
base::Mutex unused_segments_mutex_;
base::AtomicWord current_memory_usage_ = 0;
base::AtomicWord max_memory_usage_ = 0;
base::AtomicWord current_pool_size_ = 0;
base::AtomicValue<MemoryPressureLevel> memory_pressure_level_;
DISALLOW_COPY_AND_ASSIGN(AccountingAllocator);
};
......
......@@ -22,6 +22,9 @@ class Segment {
public:
void Initialize(size_t size) { size_ = size; }
Zone* zone() const { return zone_; }
void set_zone(Zone* const zone) { zone_ = zone; }
Segment* next() const { return next_; }
void set_next(Segment* const next) { next_ = next; }
......@@ -47,6 +50,7 @@ class Segment {
return reinterpret_cast<Address>(this) + n;
}
Zone* zone_;
Segment* next_;
size_t size_;
};
......
......@@ -27,7 +27,8 @@ constexpr size_t kASanRedzoneBytes = 0;
} // namespace
Zone::Zone(AccountingAllocator* allocator, const char* name)
Zone::Zone(AccountingAllocator* allocator, const char* name,
SegmentSize segment_size)
: allocation_size_(0),
segment_bytes_allocated_(0),
position_(0),
......@@ -35,7 +36,8 @@ Zone::Zone(AccountingAllocator* allocator, const char* name)
allocator_(allocator),
segment_head_(nullptr),
name_(name),
sealed_(false) {
sealed_(false),
segment_size_(segment_size) {
allocator_->ZoneCreation(this);
}
......@@ -99,32 +101,64 @@ void Zone::DeleteAll() {
segment_head_ = nullptr;
}
// Creates a new segment, sets it size, and pushes it to the front
// of the segment chain. Returns the new segment.
Segment* Zone::NewSegment(size_t requested_size) {
Segment* result = allocator_->GetSegment(requested_size);
if (result != nullptr) {
DCHECK_GE(result->size(), requested_size);
segment_bytes_allocated_ += result->size();
result->set_zone(this);
result->set_next(segment_head_);
segment_head_ = result;
}
return result;
}
Address Zone::NewExpand(size_t size) {
// Make sure the requested size is already properly aligned and that
// there isn't enough room in the Zone to satisfy the request.
DCHECK_EQ(size, RoundDown(size, kAlignmentInBytes));
DCHECK_LT(limit_ - position_, size);
DCHECK(limit_ - position_ < size);
// Commit the allocation_size_ of segment_head_ if any.
allocation_size_ = allocation_size();
// Compute the new segment size. We use a 'high water mark'
// strategy, where we increase the segment size every time we expand
// except that we employ a maximum segment size when we delete. This
// is to avoid excessive malloc() and free() overhead.
Segment* head = segment_head_;
const size_t old_size = (head == nullptr) ? 0 : head->size();
static const size_t kSegmentOverhead = sizeof(Segment) + kAlignmentInBytes;
const size_t min_size = kSegmentOverhead + size;
const size_t new_size_no_overhead = size + (old_size << 1);
size_t new_size = kSegmentOverhead + new_size_no_overhead;
const size_t min_new_size = kSegmentOverhead + size;
// Guard against integer overflow.
if (V8_UNLIKELY(!IsInRange(min_size, size, static_cast<size_t>(INT_MAX)))) {
if (new_size_no_overhead < size || new_size < kSegmentOverhead) {
V8::FatalProcessOutOfMemory(nullptr, "Zone");
return kNullAddress;
}
const size_t requested_size = Max(min_size, kDefaultSegmentSize);
Segment* segment = allocator_->GetSegment(requested_size);
if (V8_UNLIKELY(segment == nullptr)) {
if (segment_size_ == SegmentSize::kLarge) {
new_size = kMaximumSegmentSize;
}
if (new_size < kMinimumSegmentSize) {
new_size = kMinimumSegmentSize;
} else if (new_size > kMaximumSegmentSize) {
// Limit the size of new segments to avoid growing the segment size
// exponentially, thus putting pressure on contiguous virtual address space.
// All the while making sure to allocate a segment large enough to hold the
// requested size.
new_size = Max(min_new_size, kMaximumSegmentSize);
}
if (new_size > INT_MAX) {
V8::FatalProcessOutOfMemory(nullptr, "Zone");
return kNullAddress;
}
Segment* segment = NewSegment(new_size);
if (segment == nullptr) {
V8::FatalProcessOutOfMemory(nullptr, "Zone");
return kNullAddress;
}
DCHECK_GE(segment->size(), requested_size);
segment_bytes_allocated_ += segment->size();
segment->set_next(segment_head_);
segment_head_ = segment;
// Recompute 'top' and 'limit' based on the new segment.
Address result = RoundUp(segment->start(), kAlignmentInBytes);
......@@ -132,9 +166,9 @@ Address Zone::NewExpand(size_t size) {
// Check for address overflow.
// (Should not happen since the segment is guaranteed to accommodate
// size bytes + header and alignment padding)
DCHECK_LE(result, position_);
DCHECK(position_ >= result);
limit_ = segment->end();
DCHECK_LE(position_, limit_);
DCHECK(position_ <= limit_);
return result;
}
......
......@@ -37,9 +37,12 @@ namespace internal {
// Note: The implementation is inherently not thread safe. Do not use
// from multi-threaded code.
enum class SegmentSize { kLarge, kDefault };
class V8_EXPORT_PRIVATE Zone final {
public:
Zone(AccountingAllocator* allocator, const char* name);
Zone(AccountingAllocator* allocator, const char* name,
SegmentSize segment_size = SegmentSize::kDefault);
~Zone();
// Allocate 'size' bytes of memory in the Zone; expands the Zone by
......@@ -96,7 +99,10 @@ class V8_EXPORT_PRIVATE Zone final {
static const size_t kAlignmentInBytes = 8;
// Never allocate segments smaller than this size in bytes.
static const size_t kDefaultSegmentSize = 32 * KB;
static const size_t kMinimumSegmentSize = 8 * KB;
// Never allocate segments larger than this size in bytes.
static const size_t kMaximumSegmentSize = 1 * MB;
// Report zone excess when allocation exceeds this limit.
static const size_t kExcessLimit = 256 * MB;
......@@ -115,6 +121,10 @@ class V8_EXPORT_PRIVATE Zone final {
// room in the Zone already.
Address NewExpand(size_t size);
// Creates a new segment, sets it size, and pushes it to the front
// of the segment chain. Returns the new segment.
inline Segment* NewSegment(size_t requested_size);
// The free region in the current (front) segment is represented as
// the half-open interval [position, limit). The 'position' variable
// is guaranteed to be aligned as dictated by kAlignment.
......@@ -126,6 +136,7 @@ class V8_EXPORT_PRIVATE Zone final {
Segment* segment_head_;
const char* name_;
bool sealed_;
SegmentSize segment_size_;
};
// ZoneObject is an abstraction that helps define classes of objects
......
......@@ -211,6 +211,7 @@ v8_source_set("unittests_sources") {
"wasm/wasm-macro-gen-unittest.cc",
"wasm/wasm-module-builder-unittest.cc",
"wasm/wasm-opcodes-unittest.cc",
"zone/segmentpool-unittest.cc",
"zone/zone-allocator-unittest.cc",
"zone/zone-chunk-list-unittest.cc",
"zone/zone-unittest.cc",
......
// Copyright 2016 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/zone/accounting-allocator.h"
#include "testing/gtest/include/gtest/gtest.h"
namespace v8 {
namespace internal {
TEST(Zone, SegmentPoolConstraints) {
size_t sizes[]{
0, // Corner case
AccountingAllocator::kMaxPoolSize,
GB // Something really large
};
AccountingAllocator allocator;
for (size_t size : sizes) {
allocator.ConfigureSegmentPool(size);
size_t total_size = 0;
for (size_t power = 0; power < AccountingAllocator::kNumberBuckets;
++power) {
total_size +=
allocator.unused_segments_max_sizes_[power] * (size_t(1) << power);
}
EXPECT_LE(total_size, size);
}
}
} // namespace internal
} // namespace v8
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment