Commit 5d1d0795 authored by Peter Marshall's avatar Peter Marshall Committed by Commit Bot

[cleanup] Use unique_ptr for MemoryAllocator in Heap

Also drive-by cleanup the TestMemoryAllocatorScope class so that it
takes ownership of the old allocator while it holds onto it, and so
that the MemoryAllocator for testing is constructed inside the scope
rather than passed into it. This means users don't need to explicitly
call TearDown() and delete the allocator as the scope does it for them.

Change-Id: Id7da3c074618a376d2edfe3385bb185ba8287cea
Reviewed-on: https://chromium-review.googlesource.com/c/1392194
Commit-Queue: Peter Marshall <petermarshall@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#59627}
parent 54e51522
......@@ -200,6 +200,8 @@ Heap::Heap()
RememberUnmappedPage(kNullAddress, false);
}
Heap::~Heap() = default;
size_t Heap::MaxReserved() {
const size_t kMaxNewLargeObjectSpaceSize = max_semi_space_size_;
return static_cast<size_t>(2 * max_semi_space_size_ +
......@@ -1738,7 +1740,7 @@ bool Heap::PerformGarbageCollection(
Heap::new_space()->Size() + new_lo_space()->SizeOfObjects();
{
Heap::SkipStoreBufferScope skip_store_buffer_scope(store_buffer_);
Heap::SkipStoreBufferScope skip_store_buffer_scope(store_buffer_.get());
switch (collector) {
case MARK_COMPACTOR:
......@@ -4496,30 +4498,30 @@ void Heap::SetUp() {
~kMmapRegionMask;
// Set up memory allocator.
memory_allocator_ =
new MemoryAllocator(isolate_, MaxReserved(), code_range_size_);
memory_allocator_.reset(
new MemoryAllocator(isolate_, MaxReserved(), code_range_size_));
store_buffer_ = new StoreBuffer(this);
store_buffer_.reset(new StoreBuffer(this));
heap_controller_ = new HeapController(this);
heap_controller_.reset(new HeapController(this));
mark_compact_collector_ = new MarkCompactCollector(this);
mark_compact_collector_.reset(new MarkCompactCollector(this));
scavenger_collector_ = new ScavengerCollector(this);
scavenger_collector_.reset(new ScavengerCollector(this));
incremental_marking_ =
incremental_marking_.reset(
new IncrementalMarking(this, mark_compact_collector_->marking_worklist(),
mark_compact_collector_->weak_objects());
mark_compact_collector_->weak_objects()));
if (FLAG_concurrent_marking || FLAG_parallel_marking) {
MarkCompactCollector::MarkingWorklist* marking_worklist =
mark_compact_collector_->marking_worklist();
concurrent_marking_ = new ConcurrentMarking(
concurrent_marking_.reset(new ConcurrentMarking(
this, marking_worklist->shared(), marking_worklist->on_hold(),
mark_compact_collector_->weak_objects(), marking_worklist->embedder());
mark_compact_collector_->weak_objects(), marking_worklist->embedder()));
} else {
concurrent_marking_ =
new ConcurrentMarking(this, nullptr, nullptr, nullptr, nullptr);
concurrent_marking_.reset(
new ConcurrentMarking(this, nullptr, nullptr, nullptr, nullptr));
}
for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) {
......@@ -4543,20 +4545,20 @@ void Heap::SetUp() {
deferred_counters_[i] = 0;
}
tracer_ = new GCTracer(this);
tracer_.reset(new GCTracer(this));
#ifdef ENABLE_MINOR_MC
minor_mark_compact_collector_ = new MinorMarkCompactCollector(this);
#else
minor_mark_compact_collector_ = nullptr;
#endif // ENABLE_MINOR_MC
array_buffer_collector_ = new ArrayBufferCollector(this);
gc_idle_time_handler_ = new GCIdleTimeHandler();
memory_reducer_ = new MemoryReducer(this);
array_buffer_collector_.reset(new ArrayBufferCollector(this));
gc_idle_time_handler_.reset(new GCIdleTimeHandler());
memory_reducer_.reset(new MemoryReducer(this));
if (V8_UNLIKELY(FLAG_gc_stats)) {
live_object_stats_ = new ObjectStats(this);
dead_object_stats_ = new ObjectStats(this);
live_object_stats_.reset(new ObjectStats(this));
dead_object_stats_.reset(new ObjectStats(this));
}
local_embedder_heap_tracer_ = new LocalEmbedderHeapTracer(isolate());
local_embedder_heap_tracer_.reset(new LocalEmbedderHeapTracer(isolate()));
LOG(isolate_, IntPtrTEvent("heap-capacity", Capacity()));
LOG(isolate_, IntPtrTEvent("heap-available", Available()));
......@@ -4571,10 +4573,10 @@ void Heap::SetUp() {
#endif // ENABLE_MINOR_MC
if (FLAG_idle_time_scavenge) {
scavenge_job_ = new ScavengeJob();
idle_scavenge_observer_ = new IdleScavengeObserver(
*this, ScavengeJob::kBytesAllocatedBeforeNextIdleTask);
new_space()->AddAllocationObserver(idle_scavenge_observer_);
scavenge_job_.reset(new ScavengeJob());
idle_scavenge_observer_.reset(new IdleScavengeObserver(
*this, ScavengeJob::kBytesAllocatedBeforeNextIdleTask));
new_space()->AddAllocationObserver(idle_scavenge_observer_.get());
}
SetGetExternallyAllocatedMemoryInBytesCallback(
......@@ -4744,11 +4746,9 @@ void Heap::TearDown() {
}
if (FLAG_idle_time_scavenge) {
new_space()->RemoveAllocationObserver(idle_scavenge_observer_);
delete idle_scavenge_observer_;
idle_scavenge_observer_ = nullptr;
delete scavenge_job_;
scavenge_job_ = nullptr;
new_space()->RemoveAllocationObserver(idle_scavenge_observer_.get());
idle_scavenge_observer_.reset();
scavenge_job_.reset();
}
if (FLAG_stress_marking > 0) {
......@@ -4763,15 +4763,11 @@ void Heap::TearDown() {
stress_scavenge_observer_ = nullptr;
}
if (heap_controller_ != nullptr) {
delete heap_controller_;
heap_controller_ = nullptr;
}
heap_controller_.reset();
if (mark_compact_collector_ != nullptr) {
if (mark_compact_collector_) {
mark_compact_collector_->TearDown();
delete mark_compact_collector_;
mark_compact_collector_ = nullptr;
mark_compact_collector_.reset();
}
#ifdef ENABLE_MINOR_MC
......@@ -4782,43 +4778,22 @@ void Heap::TearDown() {
}
#endif // ENABLE_MINOR_MC
if (scavenger_collector_ != nullptr) {
delete scavenger_collector_;
scavenger_collector_ = nullptr;
}
if (array_buffer_collector_ != nullptr) {
delete array_buffer_collector_;
array_buffer_collector_ = nullptr;
}
delete incremental_marking_;
incremental_marking_ = nullptr;
scavenger_collector_.reset();
array_buffer_collector_.reset();
incremental_marking_.reset();
concurrent_marking_.reset();
delete concurrent_marking_;
concurrent_marking_ = nullptr;
delete gc_idle_time_handler_;
gc_idle_time_handler_ = nullptr;
gc_idle_time_handler_.reset();
if (memory_reducer_ != nullptr) {
memory_reducer_->TearDown();
delete memory_reducer_;
memory_reducer_ = nullptr;
memory_reducer_.reset();
}
if (live_object_stats_ != nullptr) {
delete live_object_stats_;
live_object_stats_ = nullptr;
}
live_object_stats_.reset();
dead_object_stats_.reset();
if (dead_object_stats_ != nullptr) {
delete dead_object_stats_;
dead_object_stats_ = nullptr;
}
delete local_embedder_heap_tracer_;
local_embedder_heap_tracer_ = nullptr;
local_embedder_heap_tracer_.reset();
external_string_table_.TearDown();
......@@ -4827,8 +4802,7 @@ void Heap::TearDown() {
// store.
ArrayBufferTracker::TearDown(this);
delete tracer_;
tracer_ = nullptr;
tracer_.reset();
for (int i = FIRST_SPACE; i <= LAST_SPACE; i++) {
delete space_[i];
......@@ -4846,11 +4820,8 @@ void Heap::TearDown() {
}
strong_roots_list_ = nullptr;
delete store_buffer_;
store_buffer_ = nullptr;
delete memory_allocator_;
memory_allocator_ = nullptr;
store_buffer_.reset();
memory_allocator_.reset();
}
void Heap::AddGCPrologueCallback(v8::Isolate::GCCallbackWithData callback,
......@@ -5664,10 +5635,10 @@ bool Heap::AllowedToBeMigrated(HeapObject obj, AllocationSpace dst) {
void Heap::CreateObjectStats() {
if (V8_LIKELY(FLAG_gc_stats == 0)) return;
if (!live_object_stats_) {
live_object_stats_ = new ObjectStats(this);
live_object_stats_.reset(new ObjectStats(this));
}
if (!dead_object_stats_) {
dead_object_stats_ = new ObjectStats(this);
dead_object_stats_.reset(new ObjectStats(this));
}
}
......
......@@ -624,14 +624,14 @@ class Heap {
// Getters to other components. ==============================================
// ===========================================================================
GCTracer* tracer() { return tracer_; }
GCTracer* tracer() { return tracer_.get(); }
MemoryAllocator* memory_allocator() { return memory_allocator_; }
MemoryAllocator* memory_allocator() { return memory_allocator_.get(); }
inline Isolate* isolate();
MarkCompactCollector* mark_compact_collector() {
return mark_compact_collector_;
return mark_compact_collector_.get();
}
MinorMarkCompactCollector* minor_mark_compact_collector() {
......@@ -639,7 +639,7 @@ class Heap {
}
ArrayBufferCollector* array_buffer_collector() {
return array_buffer_collector_;
return array_buffer_collector_.get();
}
// ===========================================================================
......@@ -829,13 +829,15 @@ class Heap {
Reservation* reservations, const std::vector<HeapObject>& large_objects,
const std::vector<Address>& maps);
IncrementalMarking* incremental_marking() { return incremental_marking_; }
IncrementalMarking* incremental_marking() {
return incremental_marking_.get();
}
// ===========================================================================
// Concurrent marking API. ===================================================
// ===========================================================================
ConcurrentMarking* concurrent_marking() { return concurrent_marking_; }
ConcurrentMarking* concurrent_marking() { return concurrent_marking_.get(); }
// The runtime uses this function to notify potentially unsafe object layout
// changes that require special synchronization with the concurrent marker.
......@@ -873,7 +875,7 @@ class Heap {
// ===========================================================================
LocalEmbedderHeapTracer* local_embedder_heap_tracer() const {
return local_embedder_heap_tracer_;
return local_embedder_heap_tracer_.get();
}
void SetEmbedderHeapTracer(EmbedderHeapTracer* tracer);
......@@ -1377,6 +1379,7 @@ class Heap {
static const int kInitialFeedbackCapacity = 256;
Heap();
~Heap();
// Selects the proper allocation space based on the pretenuring decision.
static AllocationSpace SelectSpace(PretenureFlag pretenure) {
......@@ -1400,7 +1403,7 @@ class Heap {
ROOT_LIST(ROOT_ACCESSOR)
#undef ROOT_ACCESSOR
StoreBuffer* store_buffer() { return store_buffer_; }
StoreBuffer* store_buffer() { return store_buffer_.get(); }
void set_current_gc_flags(int flags) {
current_gc_flags_ = flags;
......@@ -1621,8 +1624,8 @@ class Heap {
// Growing strategy. =========================================================
// ===========================================================================
HeapController* heap_controller() { return heap_controller_; }
MemoryReducer* memory_reducer() { return memory_reducer_; }
HeapController* heap_controller() { return heap_controller_.get(); }
MemoryReducer* memory_reducer() { return memory_reducer_.get(); }
// For some webpages RAIL mode does not switch from PERFORMANCE_LOAD.
// This constant limits the effect of load RAIL mode on GC.
......@@ -1900,23 +1903,23 @@ class Heap {
// Last time a garbage collection happened.
double last_gc_time_ = 0.0;
GCTracer* tracer_ = nullptr;
MarkCompactCollector* mark_compact_collector_ = nullptr;
std::unique_ptr<GCTracer> tracer_;
std::unique_ptr<MarkCompactCollector> mark_compact_collector_;
MinorMarkCompactCollector* minor_mark_compact_collector_ = nullptr;
ScavengerCollector* scavenger_collector_ = nullptr;
ArrayBufferCollector* array_buffer_collector_ = nullptr;
MemoryAllocator* memory_allocator_ = nullptr;
StoreBuffer* store_buffer_ = nullptr;
HeapController* heap_controller_ = nullptr;
IncrementalMarking* incremental_marking_ = nullptr;
ConcurrentMarking* concurrent_marking_ = nullptr;
GCIdleTimeHandler* gc_idle_time_handler_ = nullptr;
MemoryReducer* memory_reducer_ = nullptr;
ObjectStats* live_object_stats_ = nullptr;
ObjectStats* dead_object_stats_ = nullptr;
ScavengeJob* scavenge_job_ = nullptr;
AllocationObserver* idle_scavenge_observer_ = nullptr;
LocalEmbedderHeapTracer* local_embedder_heap_tracer_ = nullptr;
std::unique_ptr<ScavengerCollector> scavenger_collector_;
std::unique_ptr<ArrayBufferCollector> array_buffer_collector_;
std::unique_ptr<MemoryAllocator> memory_allocator_;
std::unique_ptr<StoreBuffer> store_buffer_;
std::unique_ptr<HeapController> heap_controller_;
std::unique_ptr<IncrementalMarking> incremental_marking_;
std::unique_ptr<ConcurrentMarking> concurrent_marking_;
std::unique_ptr<GCIdleTimeHandler> gc_idle_time_handler_;
std::unique_ptr<MemoryReducer> memory_reducer_;
std::unique_ptr<ObjectStats> live_object_stats_;
std::unique_ptr<ObjectStats> dead_object_stats_;
std::unique_ptr<ScavengeJob> scavenge_job_;
std::unique_ptr<AllocationObserver> idle_scavenge_observer_;
std::unique_ptr<LocalEmbedderHeapTracer> local_embedder_heap_tracer_;
StrongRootsList* strong_roots_list_ = nullptr;
// This counter is increased before each GC and never reset.
......
......@@ -1741,8 +1741,8 @@ void MarkCompactCollector::ProcessTopOptimizedFrame(ObjectVisitor* visitor) {
void MarkCompactCollector::RecordObjectStats() {
if (V8_UNLIKELY(FLAG_gc_stats)) {
heap()->CreateObjectStats();
ObjectStatsCollector collector(heap(), heap()->live_object_stats_,
heap()->dead_object_stats_);
ObjectStatsCollector collector(heap(), heap()->live_object_stats_.get(),
heap()->dead_object_stats_.get());
collector.Collect();
if (V8_UNLIKELY(FLAG_gc_stats &
v8::tracing::TracingCategoryObserver::ENABLED_BY_TRACING)) {
......
......@@ -701,10 +701,13 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
unsigned epoch() const { return epoch_; }
private:
explicit MarkCompactCollector(Heap* heap);
~MarkCompactCollector() override;
// Used by wrapper tracing.
V8_INLINE void MarkExternallyReferencedObject(HeapObject obj);
private:
void ComputeEvacuationHeuristics(size_t area_size,
int* target_fragmentation_percent,
size_t* max_evacuated_bytes);
......@@ -724,9 +727,6 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
// This is for non-incremental marking only.
V8_INLINE void MarkRootObject(Root root, HeapObject obj);
// Used by wrapper tracing.
V8_INLINE void MarkExternallyReferencedObject(HeapObject obj);
// Mark the heap roots and all objects reachable from them.
void MarkRoots(RootVisitor* root_visitor,
ObjectVisitor* custom_root_body_visitor);
......@@ -905,7 +905,6 @@ class MarkCompactCollector final : public MarkCompactCollectorBase {
unsigned epoch_ = 0;
friend class FullEvacuator;
friend class Heap;
friend class RecordMigratedSlotVisitor;
};
......
......@@ -45,18 +45,24 @@ namespace heap {
// Temporarily sets a given allocator in an isolate.
class TestMemoryAllocatorScope {
public:
TestMemoryAllocatorScope(Isolate* isolate, MemoryAllocator* allocator)
: isolate_(isolate), old_allocator_(isolate->heap()->memory_allocator()) {
isolate->heap()->memory_allocator_ = allocator;
TestMemoryAllocatorScope(Isolate* isolate, size_t max_capacity,
size_t code_range_size)
: isolate_(isolate),
old_allocator_(std::move(isolate->heap()->memory_allocator_)) {
isolate->heap()->memory_allocator_.reset(
new MemoryAllocator(isolate, max_capacity, code_range_size));
}
MemoryAllocator* allocator() { return isolate_->heap()->memory_allocator(); }
~TestMemoryAllocatorScope() {
isolate_->heap()->memory_allocator_ = old_allocator_;
isolate_->heap()->memory_allocator()->TearDown();
isolate_->heap()->memory_allocator_.swap(old_allocator_);
}
private:
Isolate* isolate_;
MemoryAllocator* old_allocator_;
std::unique_ptr<MemoryAllocator> old_allocator_;
DISALLOW_COPY_AND_ASSIGN(TestMemoryAllocatorScope);
};
......@@ -89,10 +95,9 @@ static void VerifyMemoryChunk(Isolate* isolate, Heap* heap,
v8::PageAllocator* code_page_allocator,
size_t reserve_area_size, size_t commit_area_size,
Executability executable, Space* space) {
MemoryAllocator* memory_allocator =
new MemoryAllocator(isolate, heap->MaxReserved(), 0);
{
TestMemoryAllocatorScope test_allocator_scope(isolate, memory_allocator);
TestMemoryAllocatorScope test_allocator_scope(isolate, heap->MaxReserved(),
0);
MemoryAllocator* memory_allocator = test_allocator_scope.allocator();
TestCodePageAllocatorScope test_code_page_allocator_scope(
isolate, code_page_allocator);
......@@ -121,9 +126,6 @@ static void VerifyMemoryChunk(Isolate* isolate, Heap* heap,
CHECK(static_cast<size_t>(memory_chunk->area_size()) == commit_area_size);
memory_allocator->Free<MemoryAllocator::kFull>(memory_chunk);
}
memory_allocator->TearDown();
delete memory_allocator;
}
static unsigned int PseudorandomAreaSize() {
......@@ -170,12 +172,10 @@ TEST(MemoryAllocator) {
Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
MemoryAllocator* memory_allocator =
new MemoryAllocator(isolate, heap->MaxReserved(), 0);
CHECK_NOT_NULL(memory_allocator);
TestMemoryAllocatorScope test_scope(isolate, memory_allocator);
TestMemoryAllocatorScope test_allocator_scope(isolate, heap->MaxReserved(),
0);
MemoryAllocator* memory_allocator = test_allocator_scope.allocator();
{
int total_pages = 0;
OldSpace faked_space(heap);
CHECK(!faked_space.first_page());
......@@ -209,9 +209,6 @@ TEST(MemoryAllocator) {
CHECK_NOT_NULL(second_page);
// OldSpace's destructor will tear down the space and free up all pages.
}
memory_allocator->TearDown();
delete memory_allocator;
}
TEST(ComputeDiscardMemoryAreas) {
......@@ -256,9 +253,9 @@ TEST(ComputeDiscardMemoryAreas) {
TEST(NewSpace) {
Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
MemoryAllocator* memory_allocator =
new MemoryAllocator(isolate, heap->MaxReserved(), 0);
TestMemoryAllocatorScope test_scope(isolate, memory_allocator);
TestMemoryAllocatorScope test_allocator_scope(isolate, heap->MaxReserved(),
0);
MemoryAllocator* memory_allocator = test_allocator_scope.allocator();
NewSpace new_space(heap, memory_allocator->data_page_allocator(),
CcTest::heap()->InitialSemiSpaceSize(),
......@@ -273,17 +270,14 @@ TEST(NewSpace) {
new_space.TearDown();
memory_allocator->unmapper()->EnsureUnmappingCompleted();
memory_allocator->TearDown();
delete memory_allocator;
}
TEST(OldSpace) {
Isolate* isolate = CcTest::i_isolate();
Heap* heap = isolate->heap();
MemoryAllocator* memory_allocator =
new MemoryAllocator(isolate, heap->MaxReserved(), 0);
TestMemoryAllocatorScope test_scope(isolate, memory_allocator);
TestMemoryAllocatorScope test_allocator_scope(isolate, heap->MaxReserved(),
0);
OldSpace* s = new OldSpace(heap);
CHECK_NOT_NULL(s);
......@@ -293,8 +287,6 @@ TEST(OldSpace) {
}
delete s;
memory_allocator->TearDown();
delete memory_allocator;
}
TEST(LargeObjectSpace) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment