Commit d0151bfb authored by hpayer's avatar hpayer Committed by Commit bot

[heap] Don't use black pages for map, code and, lo space. Instead color objects black.

This reduced fragmentation in spaces where black pages are not a requirement. The only spaces where we need black pages is old space, because of allocation folding and fast inline allocation in generated code.

BUG=chromium:599174
LOG=n

Review URL: https://codereview.chromium.org/1862063002

Cr-Commit-Position: refs/heads/master@{#35315}
parent 1e001e71
......@@ -251,6 +251,12 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationSpace space,
} else {
old_gen_exhausted_ = true;
}
if (!old_gen_exhausted_ && incremental_marking()->black_allocation() &&
space != OLD_SPACE) {
Marking::MarkBlack(Marking::MarkBitFrom(object));
MemoryChunk::IncrementLiveBytesFromGC(object, size_in_bytes);
}
return allocation;
}
......
......@@ -4239,7 +4239,23 @@ void Heap::RegisterReservationsForBlackAllocation(Reservation* reservations) {
// TODO(hpayer): We do not have to iterate reservations on black objects
// for marking. We just have to execute the special visiting side effect
// code that adds objects to global data structures, e.g. for array buffers.
// Code space, map space, and large object space do not use black pages.
// Hence we have to color all objects of the reservation first black to avoid
// unnecessary marking deque load.
if (incremental_marking()->black_allocation()) {
for (int i = CODE_SPACE; i < Serializer::kNumberOfSpaces; i++) {
const Heap::Reservation& res = reservations[i];
for (auto& chunk : res) {
Address addr = chunk.start;
while (addr < chunk.end) {
HeapObject* obj = HeapObject::FromAddress(addr);
Marking::MarkBlack(Marking::MarkBitFrom(obj));
MemoryChunk::IncrementLiveBytesFromGC(obj, obj->Size());
addr += obj->Size();
}
}
}
for (int i = OLD_SPACE; i < Serializer::kNumberOfSpaces; i++) {
const Heap::Reservation& res = reservations[i];
for (auto& chunk : res) {
......
......@@ -571,12 +571,9 @@ void IncrementalMarking::StartBlackAllocation() {
DCHECK(FLAG_black_allocation);
DCHECK(IsMarking());
black_allocation_ = true;
PagedSpaces spaces(heap());
for (PagedSpace* space = spaces.next(); space != NULL;
space = spaces.next()) {
space->EmptyAllocationInfo();
space->free_list()->Reset();
}
OldSpace* old_space = heap()->old_space();
old_space->EmptyAllocationInfo();
old_space->free_list()->Reset();
if (FLAG_trace_incremental_marking) {
PrintF("[IncrementalMarking] Black allocation started\n");
}
......
......@@ -1172,9 +1172,10 @@ bool PagedSpace::Expand() {
// Pages created during bootstrapping may contain immortal immovable objects.
if (!heap()->deserialization_complete()) p->MarkNeverEvacuate();
// When incremental marking was activated, old generation pages are allocated
// When incremental marking was activated, old space pages are allocated
// black.
if (heap()->incremental_marking()->black_allocation()) {
if (heap()->incremental_marking()->black_allocation() &&
identity() == OLD_SPACE) {
Bitmap::SetAllBits(p);
p->SetFlag(Page::BLACK_PAGE);
if (FLAG_trace_incremental_marking) {
......@@ -2887,11 +2888,6 @@ AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
}
HeapObject* object = page->GetObject();
if (heap()->incremental_marking()->black_allocation()) {
MarkBit mark_bit = Marking::MarkBitFrom(object);
Marking::MarkBlack(mark_bit);
page->SetFlag(Page::BLACK_PAGE);
}
MSAN_ALLOCATED_UNINITIALIZED_MEMORY(object->address(), object_size);
if (Heap::ShouldZapGarbage()) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment