Commit 5d1d0795 authored by Peter Marshall's avatar Peter Marshall Committed by Commit Bot

[cleanup] Use unique_ptr for MemoryAllocator in Heap

Also drive-by cleanup the TestMemoryAllocatorScope class so that it
takes ownership of the old allocator while it holds onto it, and so
that the MemoryAllocator for testing is constructed inside the scope
rather than passed into it. This means users don't need to explicitly
call TearDown() and delete the allocator as the scope does it for them.

Change-Id: Id7da3c074618a376d2edfe3385bb185ba8287cea
Reviewed-on: https://chromium-review.googlesource.com/c/1392194
Commit-Queue: Peter Marshall <petermarshall@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#59627}
parent 54e51522
...@@ -200,6 +200,8 @@ Heap::Heap() ...@@ -200,6 +200,8 @@ Heap::Heap()
RememberUnmappedPage(kNullAddress, false); RememberUnmappedPage(kNullAddress, false);
} }
Heap::~Heap() = default;
size_t Heap::MaxReserved() { size_t Heap::MaxReserved() {
const size_t kMaxNewLargeObjectSpaceSize = max_semi_space_size_; const size_t kMaxNewLargeObjectSpaceSize = max_semi_space_size_;
return static_cast<size_t>(2 * max_semi_space_size_ + return static_cast<size_t>(2 * max_semi_space_size_ +
...@@ -1738,7 +1740,7 @@ bool Heap::PerformGarbageCollection( ...@@ -1738,7 +1740,7 @@ bool Heap::PerformGarbageCollection(
Heap::new_space()->Size() + new_lo_space()->SizeOfObjects(); Heap::new_space()->Size() + new_lo_space()->SizeOfObjects();
{ {
Heap::SkipStoreBufferScope skip_store_buffer_scope(store_buffer_); Heap::SkipStoreBufferScope skip_store_buffer_scope(store_buffer_.get());
switch (collector) { switch (collector) {
case MARK_COMPACTOR: case MARK_COMPACTOR:
...@@ -4496,30 +4498,30 @@ void Heap::SetUp() { ...@@ -4496,30 +4498,30 @@ void Heap::SetUp() {
~kMmapRegionMask; ~kMmapRegionMask;
// Set up memory allocator. // Set up memory allocator.
memory_allocator_ = memory_allocator_.reset(
new MemoryAllocator(isolate_, MaxReserved(), code_range_size_); new MemoryAllocator(isolate_, MaxReserved(), code_range_size_));
store_buffer_ = new StoreBuffer(this); store_buffer_.reset(new StoreBuffer(this));
heap_controller_ = new HeapController(this); heap_controller_.reset(new HeapController(this));
mark_compact_collector_ = new MarkCompactCollector(this); mark_compact_collector_.reset(new MarkCompactCollector(this));
scavenger_collector_ = new ScavengerCollector(this); scavenger_collector_.reset(new ScavengerCollector(this));
incremental_marking_ = incremental_marking_.reset(
new IncrementalMarking(this, mark_compact_collector_->marking_worklist(), new IncrementalMarking(this, mark_compact_collector_->marking_worklist(),
mark_compact_collector_->weak_objects()); mark_compact_collector_->weak_objects()));
if (FLAG_concurrent_marking || FLAG_parallel_marking) { if (FLAG_concurrent_marking || FLAG_parallel_marking) {
MarkCompactCollector::MarkingWorklist* marking_worklist = MarkCompactCollector::MarkingWorklist* marking_worklist =
mark_compact_collector_->marking_worklist(); mark_compact_collector_->marking_worklist();
concurrent_marking_ = new ConcurrentMarking( concurrent_marking_.reset(new ConcurrentMarking(
this, marking_worklist->shared(), marking_worklist->on_hold(), this, marking_worklist->shared(), marking_worklist->on_hold(),
mark_compact_collector_->weak_objects(), marking_worklist->embedder()); mark_compact_collector_->weak_objects(), marking_worklist->embedder()));
} else { } else {
concurrent_marking_ = concurrent_marking_.reset(
new ConcurrentMarking(this, nullptr, nullptr, nullptr, nullptr); new ConcurrentMarking(this, nullptr, nullptr, nullptr, nullptr));
} }
for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) { for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) {
...@@ -4543,20 +4545,20 @@ void Heap::SetUp() { ...@@ -4543,20 +4545,20 @@ void Heap::SetUp() {
deferred_counters_[i] = 0; deferred_counters_[i] = 0;
} }
tracer_ = new GCTracer(this); tracer_.reset(new GCTracer(this));
#ifdef ENABLE_MINOR_MC #ifdef ENABLE_MINOR_MC
minor_mark_compact_collector_ = new MinorMarkCompactCollector(this); minor_mark_compact_collector_ = new MinorMarkCompactCollector(this);
#else #else
minor_mark_compact_collector_ = nullptr; minor_mark_compact_collector_ = nullptr;
#endif // ENABLE_MINOR_MC #endif // ENABLE_MINOR_MC
array_buffer_collector_ = new ArrayBufferCollector(this); array_buffer_collector_.reset(new ArrayBufferCollector(this));
gc_idle_time_handler_ = new GCIdleTimeHandler(); gc_idle_time_handler_.reset(new GCIdleTimeHandler());
memory_reducer_ = new MemoryReducer(this); memory_reducer_.reset(new MemoryReducer(this));
if (V8_UNLIKELY(FLAG_gc_stats)) { if (V8_UNLIKELY(FLAG_gc_stats)) {
live_object_stats_ = new ObjectStats(this); live_object_stats_.reset(new ObjectStats(this));
dead_object_stats_ = new ObjectStats(this); dead_object_stats_.reset(new ObjectStats(this));
} }
local_embedder_heap_tracer_ = new LocalEmbedderHeapTracer(isolate()); local_embedder_heap_tracer_.reset(new LocalEmbedderHeapTracer(isolate()));
LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity())); LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
LOG(isolate_, IntPtrTEvent("heap-available", Available())); LOG(isolate_, IntPtrTEvent("heap-available", Available()));
...@@ -4571,10 +4573,10 @@ void Heap::SetUp() { ...@@ -4571,10 +4573,10 @@ void Heap::SetUp() {
#endif // ENABLE_MINOR_MC #endif // ENABLE_MINOR_MC
if (FLAG_idle_time_scavenge) { if (FLAG_idle_time_scavenge) {
scavenge_job_ = new ScavengeJob(); scavenge_job_.reset(new ScavengeJob());
idle_scavenge_observer_ = new IdleScavengeObserver( idle_scavenge_observer_.reset(new IdleScavengeObserver(
*this, ScavengeJob::kBytesAllocatedBeforeNextIdleTask); *this, ScavengeJob::kBytesAllocatedBeforeNextIdleTask));
new_space()->AddAllocationObserver(idle_scavenge_observer_); new_space()->AddAllocationObserver(idle_scavenge_observer_.get());
} }
SetGetExternallyAllocatedMemoryInBytesCallback( SetGetExternallyAllocatedMemoryInBytesCallback(
...@@ -4744,11 +4746,9 @@ void Heap::TearDown() { ...@@ -4744,11 +4746,9 @@ void Heap::TearDown() {
} }
if (FLAG_idle_time_scavenge) { if (FLAG_idle_time_scavenge) {
new_space()->RemoveAllocationObserver(idle_scavenge_observer_); new_space()->RemoveAllocationObserver(idle_scavenge_observer_.get());
delete idle_scavenge_observer_; idle_scavenge_observer_.reset();
idle_scavenge_observer_ = nullptr; scavenge_job_.reset();
delete scavenge_job_;
scavenge_job_ = nullptr;
} }
if (FLAG_stress_marking > 0) { if (FLAG_stress_marking > 0) {
...@@ -4763,15 +4763,11 @@ void Heap::TearDown() { ...@@ -4763,15 +4763,11 @@ void Heap::TearDown() {
stress_scavenge_observer_ = nullptr; stress_scavenge_observer_ = nullptr;
} }
if (heap_controller_ != nullptr) { heap_controller_.reset();
delete heap_controller_;
heap_controller_ = nullptr;
}
if (mark_compact_collector_ != nullptr) { if (mark_compact_collector_) {
mark_compact_collector_->TearDown(); mark_compact_collector_->TearDown();
delete mark_compact_collector_; mark_compact_collector_.reset();
mark_compact_collector_ = nullptr;
} }
#ifdef ENABLE_MINOR_MC #ifdef ENABLE_MINOR_MC
...@@ -4782,43 +4778,22 @@ void Heap::TearDown() { ...@@ -4782,43 +4778,22 @@ void Heap::TearDown() {
} }
#endif // ENABLE_MINOR_MC #endif // ENABLE_MINOR_MC
if (scavenger_collector_ != nullptr) { scavenger_collector_.reset();
delete scavenger_collector_; array_buffer_collector_.reset();
scavenger_collector_ = nullptr; incremental_marking_.reset();
} concurrent_marking_.reset();
if (array_buffer_collector_ != nullptr) {
delete array_buffer_collector_;
array_buffer_collector_ = nullptr;
}
delete incremental_marking_;
incremental_marking_ = nullptr;
delete concurrent_marking_; gc_idle_time_handler_.reset();
concurrent_marking_ = nullptr;
delete gc_idle_time_handler_;
gc_idle_time_handler_ = nullptr;
if (memory_reducer_ != nullptr) { if (memory_reducer_ != nullptr) {
memory_reducer_->TearDown(); memory_reducer_->TearDown();
delete memory_reducer_; memory_reducer_.reset();
memory_reducer_ = nullptr;
} }
if (live_object_stats_ != nullptr) { live_object_stats_.reset();
delete live_object_stats_; dead_object_stats_.reset();
live_object_stats_ = nullptr;
}
if (dead_object_stats_ != nullptr) { local_embedder_heap_tracer_.reset();
delete dead_object_stats_;
dead_object_stats_ = nullptr;
}
delete local_embedder_heap_tracer_;
local_embedder_heap_tracer_ = nullptr;
external_string_table_.TearDown(); external_string_table_.TearDown();
...@@ -4827,8 +4802,7 @@ void Heap::TearDown() { ...@@ -4827,8 +4802,7 @@ void Heap::TearDown() {
// store. // store.
ArrayBufferTracker::TearDown(this); ArrayBufferTracker::TearDown(this);
delete tracer_; tracer_.reset();
tracer_ = nullptr;
for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) { for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) {
delete space_[i]; delete space_[i];
...@@ -4846,11 +4820,8 @@ void Heap::TearDown() { ...@@ -4846,11 +4820,8 @@ void Heap::TearDown() {
} }
strong_roots_list_ = nullptr; strong_roots_list_ = nullptr;
delete store_buffer_; store_buffer_.reset();
store_buffer_ = nullptr; memory_allocator_.reset();
delete memory_allocator_;
memory_allocator_ = nullptr;
} }
void Heap::AddGCPrologueCallback(v8::Isolate::GCCallbackWithData callback, void Heap::AddGCPrologueCallback(v8::Isolate::GCCallbackWithData callback,
...@@ -5664,10 +5635,10 @@ bool Heap::AllowedToBeMigrated(HeapObject obj, AllocationSpace dst) { ...@@ -5664,10 +5635,10 @@ bool Heap::AllowedToBeMigrated(HeapObject obj, AllocationSpace dst) {
void Heap::CreateObjectStats() { void Heap::CreateObjectStats() {
if (V8_LIKELY(FLAG_gc_stats == 0)) return; if (V8_LIKELY(FLAG_gc_stats == 0)) return;
if (!live_object_stats_) { if (!live_object_stats_) {
live_object_stats_ = new ObjectStats(this); live_object_stats_.reset(new ObjectStats(this));
} }
if (!dead_object_stats_) { if (!dead_object_stats_) {
dead_object_stats_ = new ObjectStats(this); dead_object_stats_.reset(new ObjectStats(this));
} }
} }
......
...@@ -624,14 +624,14 @@ class Heap { ...@@ -624,14 +624,14 @@ class Heap {
// Getters to other components. ============================================== // Getters to other components. ==============================================
// =========================================================================== // ===========================================================================
GCTracer* tracer() { return tracer_; } GCTracer* tracer() { return tracer_.get(); }
MemoryAllocator* memory_allocator() { return memory_allocator_; } MemoryAllocator* memory_allocator() { return memory_allocator_.get(); }
inline Isolate* isolate(); inline Isolate* isolate();
MarkCompactCollector* mark_compact_collector() { MarkCompactCollector* mark_compact_collector() {
return mark_compact_collector_; return mark_compact_collector_.get();
} }
MinorMarkCompactCollector* minor_mark_compact_collector() { MinorMarkCompactCollector* minor_mark_compact_collector() {
...@@ -639,7 +639,7 @@ class Heap { ...@@ -639,7 +639,7 @@ class Heap {
} }
ArrayBufferCollector* array_buffer_collector() { ArrayBufferCollector* array_buffer_collector() {
return array_buffer_collector_; return array_buffer_collector_.get();
} }
// =========================================================================== // ===========================================================================
...@@ -829,13 +829,15 @@ class Heap { ...@@ -829,13 +829,15 @@ class Heap {
Reservation* reservations, const std::vector<HeapObject>& large_objects, Reservation* reservations, const std::vector<HeapObject>& large_objects,
const std::vector<Address>& maps); const std::vector<Address>& maps);
IncrementalMarking* incremental_marking() { return incremental_marking_; } IncrementalMarking* incremental_marking() {
return incremental_marking_.get();
}
// =========================================================================== // ===========================================================================
// Concurrent marking API. =================================================== // Concurrent marking API. ===================================================
// =========================================================================== // ===========================================================================
ConcurrentMarking* concurrent_marking() { return concurrent_marking_; } ConcurrentMarking* concurrent_marking() { return concurrent_marking_.get(); }
// The runtime uses this function to notify potentially unsafe object layout // The runtime uses this function to notify potentially unsafe object layout
// changes that require special synchronization with the concurrent marker. // changes that require special synchronization with the concurrent marker.
...@@ -873,7 +875,7 @@ class Heap { ...@@ -873,7 +875,7 @@ class Heap {
// =========================================================================== // ===========================================================================
LocalEmbedderHeapTracer* local_embedder_heap_tracer() const { LocalEmbedderHeapTracer* local_embedder_heap_tracer() const {
return local_embedder_heap_tracer_; return local_embedder_heap_tracer_.get();
} }
void SetEmbedderHeapTracer(EmbedderHeapTracer* tracer); void SetEmbedderHeapTracer(EmbedderHeapTracer* tracer);
...@@ -1377,6 +1379,7 @@ class Heap { ...@@ -1377,6 +1379,7 @@ class Heap {
static const int kInitialFeedbackCapacity = 256; static const int kInitialFeedbackCapacity = 256;
Heap(); Heap();
~Heap();
// Selects the proper allocation space based on the pretenuring decision. // Selects the proper allocation space based on the pretenuring decision.
static AllocationSpace SelectSpace(PretenureFlag pretenure) { static AllocationSpace SelectSpace(PretenureFlag pretenure) {
...@@ -1400,7 +1403,7 @@ class Heap { ...@@ -1400,7 +1403,7 @@ class Heap {
ROOT_LIST(ROOT_ACCESSOR) ROOT_LIST(ROOT_ACCESSOR)
#undef ROOT_ACCESSOR #undef ROOT_ACCESSOR
StoreBuffer* store_buffer() { return store_buffer_; } StoreBuffer* store_buffer() { return store_buffer_.get(); }
void set_current_gc_flags(int flags) { void set_current_gc_flags(int flags) {
current_gc_flags_ = flags; current_gc_flags_ = flags;
...@@ -1621,8 +1624,8 @@ class Heap { ...@@ -1621,8 +1624,8 @@ class Heap {
// Growing strategy. ========================================================= // Growing strategy. =========================================================
// =========================================================================== // ===========================================================================
HeapController* heap_controller() { return heap_controller_; } HeapController* heap_controller() { return heap_controller_.get(); }
MemoryReducer* memory_reducer() { return memory_reducer_; } MemoryReducer* memory_reducer() { return memory_reducer_.get(); }
// For some webpages RAIL mode does not switch from PERFORMANCE_LOAD. // For some webpages RAIL mode does not switch from PERFORMANCE_LOAD.
// This constant limits the effect of load RAIL mode on GC. // This constant limits the effect of load RAIL mode on GC.
...@@ -1900,23 +1903,23 @@ class Heap { ...@@ -1900,23 +1903,23 @@ class Heap {
// Last time a garbage collection happened. // Last time a garbage collection happened.
double last_gc_time_ = 0.0; double last_gc_time_ = 0.0;
GCTracer* tracer_ = nullptr; std::unique_ptr<GCTracer> tracer_;
MarkCompactCollector* mark_compact_collector_ = nullptr; std::unique_ptr<MarkCompactCollector> mark_compact_collector_;
MinorMarkCompactCollector* minor_mark_compact_collector_ = nullptr; MinorMarkCompactCollector* minor_mark_compact_collector_ = nullptr;
ScavengerCollector* scavenger_collector_ = nullptr; std::unique_ptr<ScavengerCollector> scavenger_collector_;
ArrayBufferCollector* array_buffer_collector_ = nullptr; std::unique_ptr<ArrayBufferCollector> array_buffer_collector_;
MemoryAllocator* memory_allocator_ = nullptr; std::unique_ptr<MemoryAllocator> memory_allocator_;
StoreBuffer* store_buffer_ = nullptr; std::unique_ptr<StoreBuffer> store_buffer_;
HeapController* heap_controller_ = nullptr; std::unique_ptr<HeapController> heap_controller_;
IncrementalMarking* incremental_marking_ = nullptr; std::unique_ptr<IncrementalMarking> incremental_marking_;
ConcurrentMarking* concurrent_marking_ = nullptr; std::unique_ptr<ConcurrentMarking> concurrent_marking_;
GCIdleTimeHandler* gc_idle_time_handler_ = nullptr; std::unique_ptr<GCIdleTimeHandler> gc_idle_time_handler_;
MemoryReducer* memory_reducer_ = nullptr; std::unique_ptr<MemoryReducer> memory_reducer_;
ObjectStats* live_object_stats_ = nullptr; std::unique_ptr<ObjectStats> live_object_stats_;
ObjectStats* dead_object_stats_ = nullptr; std::unique_ptr<ObjectStats> dead_object_stats_;
ScavengeJob* scavenge_job_ = nullptr; std::unique_ptr<ScavengeJob> scavenge_job_;
AllocationObserver* idle_scavenge_observer_ = nullptr; std::unique_ptr<AllocationObserver> idle_scavenge_observer_;
LocalEmbedderHeapTracer* local_embedder_heap_tracer_ = nullptr; std::unique_ptr<LocalEmbedderHeapTracer> local_embedder_heap_tracer_;
StrongRootsList* strong_roots_list_ = nullptr; StrongRootsList* strong_roots_list_ = nullptr;
// This counter is increased before each GC and never reset. // This counter is increased before each GC and never reset.
......
...@@ -1741,8 +1741,8 @@ void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) { ...@@ -1741,8 +1741,8 @@ void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) {
void MarkCompactCollector::RecordObjectStats() { void MarkCompactCollector::RecordObjectStats() {
if (V8_UNLIKELY(FLAG_gc_stats)) { if (V8_UNLIKELY(FLAG_gc_stats)) {
heap()->CreateObjectStats(); heap()->CreateObjectStats();
ObjectStatsCollector collector(heap(), heap()->live_object_stats_, ObjectStatsCollector collector(heap(), heap()->live_object_stats_.get(),
heap()->dead_object_stats_); heap()->dead_object_stats_.get());
collector.Collect(); collector.Collect();
if (V8_UNLIKELY(FLAG_gc_stats & if (V8_UNLIKELY(FLAG_gc_stats &
v8::tracing::TracingCategoryObserver::ENABLED_BY_TRACING)) { v8::tracing::TracingCategoryObserver::ENABLED_BY_TRACING)) {
......
...@@ -701,10 +701,13 @@ class MarkCompactCollector final : public MarkCompactCollectorBase { ...@@ -701,10 +701,13 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
unsigned epoch() const { return epoch_; } unsigned epoch() const { return epoch_; }
private:
explicit MarkCompactCollector(Heap* heap); explicit MarkCompactCollector(Heap* heap);
~MarkCompactCollector() override; ~MarkCompactCollector() override;
// Used by wrapper tracing.
V8_INLINE void MarkExternallyReferencedObject(HeapObject obj);
private:
void ComputeEvacuationHeuristics(size_t area_size, void ComputeEvacuationHeuristics(size_t area_size,
int* target_fragmentation_percent, int* target_fragmentation_percent,
size_t* max_evacuated_bytes); size_t* max_evacuated_bytes);
...@@ -724,9 +727,6 @@ class MarkCompactCollector final : public MarkCompactCollectorBase { ...@@ -724,9 +727,6 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// This is for non-incremental marking only. // This is for non-incremental marking only.
V8_INLINE void MarkRootObject(Root root, HeapObject obj); V8_INLINE void MarkRootObject(Root root, HeapObject obj);
// Used by wrapper tracing.
V8_INLINE void MarkExternallyReferencedObject(HeapObject obj);
// Mark the heap roots and all objects reachable from them. // Mark the heap roots and all objects reachable from them.
void MarkRoots(RootVisitor* root_visitor, void MarkRoots(RootVisitor* root_visitor,
ObjectVisitor* custom_root_body_visitor); ObjectVisitor* custom_root_body_visitor);
...@@ -905,7 +905,6 @@ class MarkCompactCollector final : public MarkCompactCollectorBase { ...@@ -905,7 +905,6 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
unsigned epoch_ = 0; unsigned epoch_ = 0;
friend class FullEvacuator; friend class FullEvacuator;
friend class Heap;
friend class RecordMigratedSlotVisitor; friend class RecordMigratedSlotVisitor;
}; };
......
...@@ -45,18 +45,24 @@ namespace heap { ...@@ -45,18 +45,24 @@ namespace heap {
// Temporarily sets a given allocator in an isolate. // Temporarily sets a given allocator in an isolate.
class TestMemoryAllocatorScope { class TestMemoryAllocatorScope {
public: public:
TestMemoryAllocatorScope(Isolate* isolate, MemoryAllocator* allocator) TestMemoryAllocatorScope(Isolate* isolate, size_t max_capacity,
: isolate_(isolate), old_allocator_(isolate->heap()->memory_allocator()) { size_t code_range_size)
isolate->heap()->memory_allocator_ = allocator; : isolate_(isolate),
old_allocator_(std::move(isolate->heap()->memory_allocator_)) {
isolate->heap()->memory_allocator_.reset(
new MemoryAllocator(isolate, max_capacity, code_range_size));
} }
MemoryAllocator* allocator() { return isolate_->heap()->memory_allocator(); }
~TestMemoryAllocatorScope() { ~TestMemoryAllocatorScope() {
isolate_->heap()->memory_allocator_ = old_allocator_; isolate_->heap()->memory_allocator()->TearDown();
isolate_->heap()->memory_allocator_.swap(old_allocator_);
} }
private: private:
Isolate* isolate_; Isolate* isolate_;
MemoryAllocator* old_allocator_; std::unique_ptr<MemoryAllocator> old_allocator_;
DISALLOW_COPY_AND_ASSIGN(TestMemoryAllocatorScope); DISALLOW_COPY_AND_ASSIGN(TestMemoryAllocatorScope);
}; };
...@@ -89,41 +95,37 @@ static void VerifyMemoryChunk(Isolate* isolate, Heap* heap, ...@@ -89,41 +95,37 @@ static void VerifyMemoryChunk(Isolate* isolate, Heap* heap,
v8::PageAllocator* code_page_allocator, v8::PageAllocator* code_page_allocator,
size_t reserve_area_size, size_t commit_area_size, size_t reserve_area_size, size_t commit_area_size,
Executability executable, Space* space) { Executability executable, Space* space) {
MemoryAllocator* memory_allocator = TestMemoryAllocatorScope test_allocator_scope(isolate, heap->MaxReserved(),
new MemoryAllocator(isolate, heap->MaxReserved(), 0); 0);
{ MemoryAllocator* memory_allocator = test_allocator_scope.allocator();
TestMemoryAllocatorScope test_allocator_scope(isolate, memory_allocator); TestCodePageAllocatorScope test_code_page_allocator_scope(
TestCodePageAllocatorScope test_code_page_allocator_scope( isolate, code_page_allocator);
isolate, code_page_allocator);
v8::PageAllocator* page_allocator =
v8::PageAllocator* page_allocator = memory_allocator->page_allocator(executable);
memory_allocator->page_allocator(executable);
size_t allocatable_memory_area_offset =
size_t allocatable_memory_area_offset = MemoryChunkLayout::ObjectStartOffsetInMemoryChunk(space->identity());
MemoryChunkLayout::ObjectStartOffsetInMemoryChunk(space->identity()); size_t guard_size =
size_t guard_size = (executable == EXECUTABLE) ? MemoryChunkLayout::CodePageGuardSize() : 0;
(executable == EXECUTABLE) ? MemoryChunkLayout::CodePageGuardSize() : 0;
MemoryChunk* memory_chunk = memory_allocator->AllocateChunk(
MemoryChunk* memory_chunk = memory_allocator->AllocateChunk( reserve_area_size, commit_area_size, executable, space);
reserve_area_size, commit_area_size, executable, space); size_t reserved_size =
size_t reserved_size = ((executable == EXECUTABLE))
((executable == EXECUTABLE)) ? allocatable_memory_area_offset +
? allocatable_memory_area_offset + RoundUp(reserve_area_size, page_allocator->CommitPageSize()) +
RoundUp(reserve_area_size, page_allocator->CommitPageSize()) + guard_size
guard_size : RoundUp(allocatable_memory_area_offset + reserve_area_size,
: RoundUp(allocatable_memory_area_offset + reserve_area_size, page_allocator->CommitPageSize());
page_allocator->CommitPageSize()); CHECK(memory_chunk->size() == reserved_size);
CHECK(memory_chunk->size() == reserved_size); CHECK(memory_chunk->area_start() <
CHECK(memory_chunk->area_start() < memory_chunk->address() + memory_chunk->size());
memory_chunk->address() + memory_chunk->size()); CHECK(memory_chunk->area_end() <=
CHECK(memory_chunk->area_end() <= memory_chunk->address() + memory_chunk->size());
memory_chunk->address() + memory_chunk->size()); CHECK(static_cast<size_t>(memory_chunk->area_size()) == commit_area_size);
CHECK(static_cast<size_t>(memory_chunk->area_size()) == commit_area_size);
memory_allocator->Free<MemoryAllocator::kFull>(memory_chunk);
memory_allocator->Free<MemoryAllocator::kFull>(memory_chunk);
}
memory_allocator->TearDown();
delete memory_allocator;
} }
static unsigned int PseudorandomAreaSize() { static unsigned int PseudorandomAreaSize() {
...@@ -170,48 +172,43 @@ TEST(MemoryAllocator) { ...@@ -170,48 +172,43 @@ TEST(MemoryAllocator) {
Isolate* isolate = CcTest::i_isolate(); Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap(); Heap* heap = isolate->heap();
MemoryAllocator* memory_allocator = TestMemoryAllocatorScope test_allocator_scope(isolate, heap->MaxReserved(),
new MemoryAllocator(isolate, heap->MaxReserved(), 0); 0);
CHECK_NOT_NULL(memory_allocator); MemoryAllocator* memory_allocator = test_allocator_scope.allocator();
TestMemoryAllocatorScope test_scope(isolate, memory_allocator);
{ int total_pages = 0;
int total_pages = 0; OldSpace faked_space(heap);
OldSpace faked_space(heap); CHECK(!faked_space.first_page());
CHECK(!faked_space.first_page()); CHECK(!faked_space.last_page());
CHECK(!faked_space.last_page()); Page* first_page = memory_allocator->AllocatePage(
Page* first_page = memory_allocator->AllocatePage( faked_space.AreaSize(), static_cast<PagedSpace*>(&faked_space),
faked_space.AreaSize(), static_cast<PagedSpace*>(&faked_space), NOT_EXECUTABLE);
NOT_EXECUTABLE);
faked_space.memory_chunk_list().PushBack(first_page);
CHECK(first_page->next_page() == nullptr);
total_pages++;
for (Page* p = first_page; p != nullptr; p = p->next_page()) {
CHECK(p->owner() == &faked_space);
}
// Again, we should get n or n - 1 pages. faked_space.memory_chunk_list().PushBack(first_page);
Page* other = memory_allocator->AllocatePage( CHECK(first_page->next_page() == nullptr);
faked_space.AreaSize(), static_cast<PagedSpace*>(&faked_space), total_pages++;
NOT_EXECUTABLE);
total_pages++;
faked_space.memory_chunk_list().PushBack(other);
int page_count = 0;
for (Page* p = first_page; p != nullptr; p = p->next_page()) {
CHECK(p->owner() == &faked_space);
page_count++;
}
CHECK(total_pages == page_count);
Page* second_page = first_page->next_page(); for (Page* p = first_page; p != nullptr; p = p->next_page()) {
CHECK_NOT_NULL(second_page); CHECK(p->owner() == &faked_space);
}
// OldSpace's destructor will tear down the space and free up all pages. // Again, we should get n or n - 1 pages.
Page* other = memory_allocator->AllocatePage(
faked_space.AreaSize(), static_cast<PagedSpace*>(&faked_space),
NOT_EXECUTABLE);
total_pages++;
faked_space.memory_chunk_list().PushBack(other);
int page_count = 0;
for (Page* p = first_page; p != nullptr; p = p->next_page()) {
CHECK(p->owner() == &faked_space);
page_count++;
} }
memory_allocator->TearDown(); CHECK(total_pages == page_count);
delete memory_allocator;
Page* second_page = first_page->next_page();
CHECK_NOT_NULL(second_page);
// OldSpace's destructor will tear down the space and free up all pages.
} }
TEST(ComputeDiscardMemoryAreas) { TEST(ComputeDiscardMemoryAreas) {
...@@ -256,9 +253,9 @@ TEST(ComputeDiscardMemoryAreas) { ...@@ -256,9 +253,9 @@ TEST(ComputeDiscardMemoryAreas) {
TEST(NewSpace) { TEST(NewSpace) {
Isolate* isolate = CcTest::i_isolate(); Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap(); Heap* heap = isolate->heap();
MemoryAllocator* memory_allocator = TestMemoryAllocatorScope test_allocator_scope(isolate, heap->MaxReserved(),
new MemoryAllocator(isolate, heap->MaxReserved(), 0); 0);
TestMemoryAllocatorScope test_scope(isolate, memory_allocator); MemoryAllocator* memory_allocator = test_allocator_scope.allocator();
NewSpace new_space(heap, memory_allocator->data_page_allocator(), NewSpace new_space(heap, memory_allocator->data_page_allocator(),
CcTest::heap()->InitialSemiSpaceSize(), CcTest::heap()->InitialSemiSpaceSize(),
...@@ -273,17 +270,14 @@ TEST(NewSpace) { ...@@ -273,17 +270,14 @@ TEST(NewSpace) {
new_space.TearDown(); new_space.TearDown();
memory_allocator->unmapper()->EnsureUnmappingCompleted(); memory_allocator->unmapper()->EnsureUnmappingCompleted();
memory_allocator->TearDown();
delete memory_allocator;
} }
TEST(OldSpace) { TEST(OldSpace) {
Isolate* isolate = CcTest::i_isolate(); Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap(); Heap* heap = isolate->heap();
MemoryAllocator* memory_allocator = TestMemoryAllocatorScope test_allocator_scope(isolate, heap->MaxReserved(),
new MemoryAllocator(isolate, heap->MaxReserved(), 0); 0);
TestMemoryAllocatorScope test_scope(isolate, memory_allocator);
OldSpace* s = new OldSpace(heap); OldSpace* s = new OldSpace(heap);
CHECK_NOT_NULL(s); CHECK_NOT_NULL(s);
...@@ -293,8 +287,6 @@ TEST(OldSpace) { ...@@ -293,8 +287,6 @@ TEST(OldSpace) {
} }
delete s; delete s;
memory_allocator->TearDown();
delete memory_allocator;
} }
TEST(LargeObjectSpace) { TEST(LargeObjectSpace) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment