Commit 40b448ea authored by Hannes Payer's avatar Hannes Payer Committed by Commit Bot

[heap] Release dead young generation large objects in the Scavenger.

Bug: chromium:852420
Change-Id: Ieefbee7bfd625d62e9104950bdfa8e46d5f4270a
Reviewed-on: https://chromium-review.googlesource.com/c/1348081Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Commit-Queue: Hannes Payer <hpayer@chromium.org>
Cr-Commit-Position: refs/heads/master@{#57761}
parent 83fb2f8d
......@@ -269,6 +269,11 @@ void ScavengerCollector::CollectGarbage() {
}
heap_->array_buffer_collector()->FreeAllocations();
// Since we promote all surviving large objects immediatelly, all remaining
// large objects must be dead.
// TODO(hpayer): Don't free all as soon as we have an intermediate generation.
heap_->new_lo_space()->FreeAllObjects();
RememberedSet<OLD_TO_NEW>::IterateMemoryChunks(heap_, [](MemoryChunk* chunk) {
if (chunk->SweepingDone()) {
RememberedSet<OLD_TO_NEW>::FreeEmptyBuckets(chunk);
......@@ -296,7 +301,6 @@ void ScavengerCollector::HandleSurvivingNewLargeObjects() {
LargePage* page = LargePage::FromHeapObject(object);
heap_->lo_space()->PromoteNewLargeObject(page);
}
DCHECK(heap_->new_lo_space()->IsEmpty());
}
void ScavengerCollector::MergeSurvivingNewLargeObjects(
......
......@@ -3749,6 +3749,19 @@ void NewLargeObjectSpace::Flip() {
}
}
void NewLargeObjectSpace::FreeAllObjects() {
LargePage* current = first_page();
objects_size_ = 0;
while (current) {
LargePage* next_current = current->next_page();
Unregister(current, static_cast<size_t>(current->GetObject()->Size()));
heap()->memory_allocator()->Free<MemoryAllocator::kPreFreeAndQueue>(
current);
current = next_current;
}
DCHECK_EQ(objects_size_, 0);
}
CodeLargeObjectSpace::CodeLargeObjectSpace(Heap* heap)
: LargeObjectSpace(heap, CODE_LO_SPACE) {}
......
......@@ -3038,11 +3038,11 @@ class LargeObjectSpace : public Space {
V8_WARN_UNUSED_RESULT AllocationResult AllocateRaw(int object_size,
Executability executable);
private:
size_t size_; // allocated bytes
int page_count_; // number of chunks
size_t objects_size_; // size of objects
private:
// The chunk_map_mutex_ has to be used when the chunk map is accessed
// concurrently.
base::Mutex chunk_map_mutex_;
......@@ -3063,6 +3063,8 @@ class NewLargeObjectSpace : public LargeObjectSpace {
size_t Available() override;
void Flip();
void FreeAllObjects();
};
class CodeLargeObjectSpace : public LargeObjectSpace {
......
......@@ -5791,6 +5791,34 @@ TEST(YoungGenerationLargeObjectAllocationMarkCompact) {
CcTest::CollectAllAvailableGarbage();
}
TEST(YoungGenerationLargeObjectAllocationReleaseScavenger) {
if (FLAG_minor_mc) return;
FLAG_young_generation_large_objects = true;
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
Heap* heap = CcTest::heap();
Isolate* isolate = heap->isolate();
if (!isolate->serializer_enabled()) return;
{
HandleScope scope(isolate);
for (int i = 0; i < 10; i++) {
Handle<FixedArray> array_small = isolate->factory()->NewFixedArray(20000);
MemoryChunk* chunk = MemoryChunk::FromAddress(array_small->address());
CHECK_EQ(NEW_LO_SPACE, chunk->owner()->identity());
CHECK(chunk->IsFlagSet(MemoryChunk::IN_TO_SPACE));
}
}
CcTest::CollectGarbage(NEW_SPACE);
CHECK(isolate->heap()->new_lo_space()->IsEmpty());
CHECK_EQ(0, isolate->heap()->new_lo_space()->Size());
CHECK_EQ(0, isolate->heap()->new_lo_space()->SizeOfObjects());
CHECK(isolate->heap()->lo_space()->IsEmpty());
CHECK_EQ(0, isolate->heap()->lo_space()->Size());
CHECK_EQ(0, isolate->heap()->lo_space()->SizeOfObjects());
}
TEST(UncommitUnusedLargeObjectMemory) {
CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate());
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment