Commit 5cbe34bb authored by yangguo's avatar yangguo Committed by Commit bot

Revert of [heap] Remove black pages and use black areas instead. (patchset #6...

Revert of [heap] Remove black pages and use black areas instead. (patchset #6 id:100001 of https://codereview.chromium.org/2160613002/ )

Reason for revert:
Suspected to cause crbug.com/630969

Original issue's description:
> [heap] Remove black pages and use black areas instead.
>
> BUG=630386
> LOG=n
>
> Committed: https://crrev.com/b008a0d5a3db80a854cb93d9c94d67bf2d780f2c
> Cr-Commit-Position: refs/heads/master@{#37967}

TBR=ulan@chromium.org,hpayer@chromium.org
# Not skipping CQ checks because original CL landed more than 1 days ago.
BUG=630386

Review-Url: https://codereview.chromium.org/2176133002
Cr-Commit-Position: refs/heads/master@{#38006}
parent 02503b08
......@@ -252,6 +252,11 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationSpace space,
old_gen_exhausted_ = true;
}
if (!old_gen_exhausted_ && incremental_marking()->black_allocation() &&
space != OLD_SPACE) {
Marking::MarkBlack(ObjectMarking::MarkBitFrom(object));
MemoryChunk::IncrementLiveBytesFromGC(object, size_in_bytes);
}
return allocation;
}
......
......@@ -3065,16 +3065,6 @@ void Heap::CreateFillerObjectAt(Address addr, int size,
if (mode == ClearRecordedSlots::kYes) {
ClearRecordedSlotRange(addr, addr + size);
}
// If the location where the filler is created is within a black area we have
// to clear the mark bits of the filler space.
if (incremental_marking()->black_allocation() &&
Marking::IsBlackOrGrey(ObjectMarking::MarkBitFrom(addr))) {
Page* page = Page::FromAddress(addr);
page->markbits()->ClearRange(page->AddressToMarkbitIndex(addr),
page->AddressToMarkbitIndex(addr + size));
}
// At this point, we may be deserializing the heap from a snapshot, and
// none of the maps have been created yet and are NULL.
DCHECK((filler->map() == NULL && !deserialization_complete_) ||
......@@ -3147,9 +3137,6 @@ FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object,
// Calculate location of new array start.
Address new_start = object->address() + bytes_to_trim;
// Transfer the mark bits to their new location.
IncrementalMarking::TransferMark(this, object->address(), new_start);
// Technically in new space this write might be omitted (except for
// debug mode which iterates through the heap), but to play safer
// we still do it.
......@@ -3163,18 +3150,18 @@ FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object,
int new_start_index = elements_to_trim * (element_size / kPointerSize);
former_start[new_start_index] = map;
former_start[new_start_index + 1] = Smi::FromInt(len - elements_to_trim);
FixedArrayBase* new_object =
FixedArrayBase::cast(HeapObject::FromAddress(new_start));
// Maintain consistency of live bytes during incremental marking
AdjustLiveBytes(new_object, -bytes_to_trim, Heap::CONCURRENT_TO_SWEEPER);
// Remove recorded slots for the new map and length offset.
ClearRecordedSlot(new_object, HeapObject::RawField(new_object, 0));
ClearRecordedSlot(new_object, HeapObject::RawField(
new_object, FixedArrayBase::kLengthOffset));
// Maintain consistency of live bytes during incremental marking
IncrementalMarking::TransferMark(this, object->address(), new_start);
AdjustLiveBytes(new_object, -bytes_to_trim, Heap::CONCURRENT_TO_SWEEPER);
// Notify the heap profiler of change in object layout.
OnMoveEvent(new_object, object, new_object->Size());
return new_object;
......@@ -4188,13 +4175,14 @@ void Heap::RegisterReservationsForBlackAllocation(Reservation* reservations) {
// Hence we have to color all objects of the reservation first black to avoid
// unnecessary marking deque load.
if (incremental_marking()->black_allocation()) {
for (int i = OLD_SPACE; i < Serializer::kNumberOfSpaces; i++) {
for (int i = CODE_SPACE; i < Serializer::kNumberOfSpaces; i++) {
const Heap::Reservation& res = reservations[i];
for (auto& chunk : res) {
Address addr = chunk.start;
while (addr < chunk.end) {
HeapObject* obj = HeapObject::FromAddress(addr);
Marking::MarkBlack(ObjectMarking::MarkBitFrom(obj));
MemoryChunk::IncrementLiveBytesFromGC(obj, obj->Size());
addr += obj->Size();
}
}
......
......@@ -164,7 +164,9 @@ void IncrementalMarking::TransferMark(Heap* heap, Address old_start,
DCHECK(MemoryChunk::FromAddress(old_start) ==
MemoryChunk::FromAddress(new_start));
if (!heap->incremental_marking()->IsMarking()) return;
if (!heap->incremental_marking()->IsMarking() ||
Page::FromAddress(old_start)->IsFlagSet(Page::BLACK_PAGE))
return;
// If the mark doesn't move, we don't check the color of the object.
// It doesn't matter whether the object is black, since it hasn't changed
......@@ -601,9 +603,9 @@ void IncrementalMarking::StartBlackAllocation() {
DCHECK(FLAG_black_allocation);
DCHECK(IsMarking());
black_allocation_ = true;
heap()->old_space()->MarkAllocationInfoBlack();
heap()->map_space()->MarkAllocationInfoBlack();
heap()->code_space()->MarkAllocationInfoBlack();
OldSpace* old_space = heap()->old_space();
old_space->EmptyAllocationInfo();
old_space->free_list()->Reset();
if (FLAG_trace_incremental_marking) {
PrintF("[IncrementalMarking] Black allocation started\n");
}
......@@ -831,7 +833,7 @@ void IncrementalMarking::UpdateMarkingDequeAfterScavenge() {
// them.
if (map_word.IsForwardingAddress()) {
HeapObject* dest = map_word.ToForwardingAddress();
if (Marking::IsBlack(ObjectMarking::MarkBitFrom(dest->address())))
if (Page::FromAddress(dest->address())->IsFlagSet(Page::BLACK_PAGE))
continue;
array[new_top] = dest;
new_top = ((new_top + 1) & mask);
......
......@@ -208,17 +208,12 @@ class IncrementalMarking {
static void TransferMark(Heap* heap, Address old_start, Address new_start);
// Returns true if the color transfer requires live bytes updating.
INLINE(static bool TransferColor(HeapObject* from, HeapObject* to,
int size)) {
// Returns true if the transferred color is black.
INLINE(static bool TransferColor(HeapObject* from, HeapObject* to)) {
if (Page::FromAddress(to->address())->IsFlagSet(Page::BLACK_PAGE))
return true;
MarkBit from_mark_bit = ObjectMarking::MarkBitFrom(from);
MarkBit to_mark_bit = ObjectMarking::MarkBitFrom(to);
if (Marking::IsBlack(to_mark_bit)) {
DCHECK(to->GetHeap()->incremental_marking()->black_allocation());
return false;
}
DCHECK(Marking::IsWhite(to_mark_bit));
if (from_mark_bit.Get()) {
to_mark_bit.Set();
......
......@@ -153,38 +153,20 @@ HeapObject* LiveObjectIterator<T>::Next() {
cell_base_ = it_.CurrentCellBase();
current_cell_ = *it_.CurrentCell();
}
if (current_cell_ & second_bit_index) {
// We found a black object. If the black object is within a black area,
// make sure that we skip all set bits in the black area until the
// object ends.
HeapObject* black_object = HeapObject::FromAddress(addr);
Address end = addr + black_object->Size() - kPointerSize;
DCHECK_EQ(chunk_, MemoryChunk::FromAddress(end));
uint32_t end_mark_bit_index = chunk_->AddressToMarkbitIndex(end);
unsigned int end_cell_index =
end_mark_bit_index >> Bitmap::kBitsPerCellLog2;
MarkBit::CellType end_index_mask =
1u << Bitmap::IndexInCell(end_mark_bit_index);
if (it_.Advance(end_cell_index)) {
cell_base_ = it_.CurrentCellBase();
current_cell_ = *it_.CurrentCell();
}
// Clear all bits in current_cell, including the end index.
current_cell_ &= ~(end_index_mask + end_index_mask - 1);
if (T == kBlackObjects || T == kAllLiveObjects) {
object = black_object;
}
} else if ((T == kGreyObjects || T == kAllLiveObjects)) {
if (T == kBlackObjects && (current_cell_ & second_bit_index)) {
object = HeapObject::FromAddress(addr);
} else if (T == kGreyObjects && !(current_cell_ & second_bit_index)) {
object = HeapObject::FromAddress(addr);
} else if (T == kAllLiveObjects) {
object = HeapObject::FromAddress(addr);
}
// Clear the second bit of the found object.
current_cell_ &= ~second_bit_index;
// We found a live object.
if (object != nullptr) break;
}
if (current_cell_ == 0) {
if (!it_.Done()) {
it_.Advance();
......
......@@ -104,30 +104,30 @@ static void VerifyMarking(Heap* heap, Address bottom, Address top) {
VerifyMarkingVisitor visitor(heap);
HeapObject* object;
Address next_object_must_be_here_or_later = bottom;
for (Address current = bottom; current < top;) {
for (Address current = bottom; current < top; current += kPointerSize) {
object = HeapObject::FromAddress(current);
if (MarkCompactCollector::IsMarked(object)) {
CHECK(Marking::IsBlack(ObjectMarking::MarkBitFrom(object)));
CHECK(current >= next_object_must_be_here_or_later);
object->Iterate(&visitor);
next_object_must_be_here_or_later = current + object->Size();
// The object is either part of a black area of black allocation or a
// regular black object
Page* page = Page::FromAddress(current);
CHECK(
page->markbits()->AllBitsSetInRange(
page->AddressToMarkbitIndex(current),
page->AddressToMarkbitIndex(next_object_must_be_here_or_later)) ||
page->markbits()->AllBitsClearInRange(
page->AddressToMarkbitIndex(current + kPointerSize * 2),
page->AddressToMarkbitIndex(next_object_must_be_here_or_later)));
current = next_object_must_be_here_or_later;
} else {
// The next word for sure belongs to the current object, jump over it.
current += kPointerSize;
}
}
}
static void VerifyMarkingBlackPage(Heap* heap, Page* page) {
CHECK(page->IsFlagSet(Page::BLACK_PAGE));
VerifyMarkingVisitor visitor(heap);
HeapObjectIterator it(page);
for (HeapObject* object = it.Next(); object != NULL; object = it.Next()) {
CHECK(Marking::IsBlack(ObjectMarking::MarkBitFrom(object)));
object->Iterate(&visitor);
}
}
static void VerifyMarking(NewSpace* space) {
Address end = space->top();
// The bottom position is at the start of its page. Allows us to use
......@@ -146,7 +146,11 @@ static void VerifyMarking(NewSpace* space) {
static void VerifyMarking(PagedSpace* space) {
for (Page* p : *space) {
VerifyMarking(space->heap(), p->area_start(), p->area_end());
if (p->IsFlagSet(Page::BLACK_PAGE)) {
VerifyMarkingBlackPage(space->heap(), p);
} else {
VerifyMarking(space->heap(), p->area_start(), p->area_end());
}
}
}
......@@ -405,6 +409,9 @@ void MarkCompactCollector::VerifyOmittedMapChecks() {
static void ClearMarkbitsInPagedSpace(PagedSpace* space) {
for (Page* p : *space) {
p->ClearLiveness();
if (p->IsFlagSet(Page::BLACK_PAGE)) {
p->ClearFlag(Page::BLACK_PAGE);
}
}
}
......@@ -428,6 +435,9 @@ void MarkCompactCollector::ClearMarkbits() {
MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
chunk->ResetProgressBar();
chunk->ResetLiveBytes();
if (chunk->IsFlagSet(Page::BLACK_PAGE)) {
chunk->ClearFlag(Page::BLACK_PAGE);
}
}
}
......@@ -649,6 +659,7 @@ void MarkCompactCollector::CollectEvacuationCandidates(PagedSpace* space) {
for (Page* p : *space) {
if (p->NeverEvacuate()) continue;
if (p->IsFlagSet(Page::BLACK_PAGE)) continue;
// Invariant: Evacuation candidates are just created when marking is
// started. This means that sweeping has finished. Furthermore, at the end
// of a GC all evacuation candidates are cleared and their slot buffers are
......@@ -1902,7 +1913,9 @@ class MarkCompactCollector::EvacuateRecordOnlyVisitor final
void MarkCompactCollector::DiscoverGreyObjectsInSpace(PagedSpace* space) {
for (Page* p : *space) {
DiscoverGreyObjectsOnPage(p);
if (!p->IsFlagSet(Page::BLACK_PAGE)) {
DiscoverGreyObjectsOnPage(p);
}
if (marking_deque()->IsFull()) return;
}
}
......@@ -2916,8 +2929,9 @@ bool MarkCompactCollector::IsSlotInBlackObject(MemoryChunk* p, Address slot) {
DCHECK(owner != heap_->lo_space() && owner != nullptr);
USE(owner);
// We may be part of a black area.
if (Marking::IsBlackOrGrey(ObjectMarking::MarkBitFrom(slot))) {
// If we are on a black page, we cannot find the actual object start
// easiliy. We just return true but do not set the out_object.
if (p->IsFlagSet(Page::BLACK_PAGE)) {
return true;
}
......@@ -3014,16 +3028,27 @@ HeapObject* MarkCompactCollector::FindBlackObjectBySlotSlow(Address slot) {
return nullptr;
}
LiveObjectIterator<kBlackObjects> it(p);
HeapObject* object = nullptr;
while ((object = it.Next()) != nullptr) {
int size = object->Size();
if (object->address() > slot) return nullptr;
if (object->address() <= slot && slot < (object->address() + size)) {
return object;
if (p->IsFlagSet(Page::BLACK_PAGE)) {
HeapObjectIterator it(p);
HeapObject* object = nullptr;
while ((object = it.Next()) != nullptr) {
int size = object->Size();
if (object->address() > slot) return nullptr;
if (object->address() <= slot && slot < (object->address() + size)) {
return object;
}
}
} else {
LiveObjectIterator<kBlackObjects> it(p);
HeapObject* object = nullptr;
while ((object = it.Next()) != nullptr) {
int size = object->Size();
if (object->address() > slot) return nullptr;
if (object->address() <= slot && slot < (object->address() + size)) {
return object;
}
}
}
return nullptr;
}
......@@ -3360,6 +3385,7 @@ int MarkCompactCollector::Sweeper::RawSweep(
DCHECK(free_list_mode == IGNORE_FREE_LIST || space->identity() == OLD_SPACE ||
space->identity() == CODE_SPACE || space->identity() == MAP_SPACE);
DCHECK(!p->IsEvacuationCandidate() && !p->SweepingDone());
DCHECK(!p->IsFlagSet(Page::BLACK_PAGE));
// Before we sweep objects on the page, we free dead array buffers which
// requires valid mark bits.
......@@ -3912,6 +3938,7 @@ void MarkCompactCollector::Sweeper::AddSweepingPageSafe(AllocationSpace space,
}
void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
Address space_top = space->top();
space->ClearStats();
int will_be_swept = 0;
......@@ -3928,6 +3955,24 @@ void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
continue;
}
// We can not sweep black pages, since all mark bits are set for these
// pages.
if (p->IsFlagSet(Page::BLACK_PAGE)) {
p->ClearLiveness();
p->concurrent_sweeping_state().SetValue(Page::kSweepingDone);
p->ClearFlag(Page::BLACK_PAGE);
// Area above the high watermark is free.
Address free_start = p->HighWaterMark();
// Check if the space top was in this page, which means that the
// high watermark is not up-to-date.
if (free_start < space_top && space_top <= p->area_end()) {
free_start = space_top;
}
int size = static_cast<int>(p->area_end() - free_start);
space->Free(free_start, size);
continue;
}
if (p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) {
// We need to sweep the page to get it into an iterable state again. Note
// that this adds unusable memory into the free list that is later on
......
......@@ -218,19 +218,7 @@ class MarkBitCellIterator BASE_EMBEDDED {
inline void Advance() {
cell_index_++;
cell_base_ += Bitmap::kBitsPerCell * kPointerSize;
}
inline bool Advance(unsigned int new_cell_index) {
if (new_cell_index != cell_index_) {
DCHECK_GT(new_cell_index, cell_index_);
DCHECK_LE(new_cell_index, last_cell_index_);
unsigned int diff = new_cell_index - cell_index_;
cell_index_ = new_cell_index;
cell_base_ += diff * (Bitmap::kBitsPerCell * kPointerSize);
return true;
}
return false;
cell_base_ += 32 * kPointerSize;
}
// Return the next mark bit cell. If there is no next it returns 0;
......@@ -265,6 +253,8 @@ class LiveObjectIterator BASE_EMBEDDED {
it_(chunk_),
cell_base_(it_.CurrentCellBase()),
current_cell_(*it_.CurrentCell()) {
// Black pages can not be iterated.
DCHECK(!chunk->IsFlagSet(Page::BLACK_PAGE));
}
HeapObject* Next();
......
......@@ -106,98 +106,8 @@ class Bitmap {
for (int i = 0; i < CellsCount(); i++) cells()[i] = 0;
}
// Sets all bits in the range [start_index, end_index).
void SetRange(uint32_t start_index, uint32_t end_index) {
unsigned int start_cell_index = start_index >> Bitmap::kBitsPerCellLog2;
MarkBit::CellType start_index_mask = 1u << Bitmap::IndexInCell(start_index);
unsigned int end_cell_index = end_index >> Bitmap::kBitsPerCellLog2;
MarkBit::CellType end_index_mask = 1u << Bitmap::IndexInCell(end_index);
if (start_cell_index != end_cell_index) {
// Firstly, fill all bits from the start address to the end of the first
// cell with 1s.
cells()[start_cell_index] |= ~(start_index_mask - 1);
// Then fill all in between cells with 1s.
for (unsigned int i = start_cell_index + 1; i < end_cell_index; i++) {
cells()[i] = ~0u;
}
// Finally, fill all bits until the end address in the last cell with 1s.
cells()[end_cell_index] |= (end_index_mask - 1);
} else {
cells()[start_cell_index] |= end_index_mask - start_index_mask;
}
}
// Clears all bits in the range [start_index, end_index).
void ClearRange(uint32_t start_index, uint32_t end_index) {
unsigned int start_cell_index = start_index >> Bitmap::kBitsPerCellLog2;
MarkBit::CellType start_index_mask = 1u << Bitmap::IndexInCell(start_index);
unsigned int end_cell_index = end_index >> Bitmap::kBitsPerCellLog2;
MarkBit::CellType end_index_mask = 1u << Bitmap::IndexInCell(end_index);
if (start_cell_index != end_cell_index) {
// Firstly, fill all bits from the start address to the end of the first
// cell with 0s.
cells()[start_cell_index] &= (start_index_mask - 1);
// Then fill all in between cells with 0s.
for (unsigned int i = start_cell_index + 1; i < end_cell_index; i++) {
cells()[i] = 0;
}
// Finally, set all bits until the end address in the last cell with 0s.
cells()[end_cell_index] &= ~(end_index_mask - 1);
} else {
cells()[start_cell_index] &= ~(end_index_mask - start_index_mask);
}
}
// Returns true if all bits in the range [start_index, end_index) are set.
bool AllBitsSetInRange(uint32_t start_index, uint32_t end_index) {
unsigned int start_cell_index = start_index >> Bitmap::kBitsPerCellLog2;
MarkBit::CellType start_index_mask = 1u << Bitmap::IndexInCell(start_index);
unsigned int end_cell_index = end_index >> Bitmap::kBitsPerCellLog2;
MarkBit::CellType end_index_mask = 1u << Bitmap::IndexInCell(end_index);
MarkBit::CellType matching_mask;
if (start_cell_index != end_cell_index) {
matching_mask = ~(start_index_mask - 1);
if ((cells()[start_cell_index] & matching_mask) != matching_mask) {
return false;
}
for (unsigned int i = start_cell_index + 1; i < end_cell_index; i++) {
if (cells()[i] != ~0u) return false;
}
matching_mask = (end_index_mask - 1);
return ((cells()[end_cell_index] & matching_mask) == matching_mask);
} else {
matching_mask = end_index_mask - start_index_mask;
return (cells()[end_cell_index] & matching_mask) == matching_mask;
}
}
// Returns true if all bits in the range [start_index, end_index) are cleared.
bool AllBitsClearInRange(uint32_t start_index, uint32_t end_index) {
unsigned int start_cell_index = start_index >> Bitmap::kBitsPerCellLog2;
MarkBit::CellType start_index_mask = 1u << Bitmap::IndexInCell(start_index);
unsigned int end_cell_index = end_index >> Bitmap::kBitsPerCellLog2;
MarkBit::CellType end_index_mask = 1u << Bitmap::IndexInCell(end_index);
MarkBit::CellType matching_mask;
if (start_cell_index != end_cell_index) {
matching_mask = ~(start_index_mask - 1);
if ((cells()[start_cell_index] & matching_mask)) return false;
for (unsigned int i = start_cell_index + 1; i < end_cell_index; i++) {
if (cells()[i]) return false;
}
matching_mask = (end_index_mask - 1);
return !(cells()[end_cell_index] & matching_mask);
} else {
matching_mask = end_index_mask - start_index_mask;
return !(cells()[end_cell_index] & matching_mask);
}
void SetAllBits() {
for (int i = 0; i < CellsCount(); i++) cells()[i] = 0xffffffff;
}
static void PrintWord(uint32_t word, uint32_t himask = 0) {
......@@ -265,6 +175,23 @@ class Bitmap {
}
return true;
}
// Clears all bits starting from {cell_base_index} up to and excluding
// {index}. Note that {cell_base_index} is required to be cell aligned.
void ClearRange(uint32_t cell_base_index, uint32_t index) {
DCHECK_EQ(IndexInCell(cell_base_index), 0u);
DCHECK_GE(index, cell_base_index);
uint32_t start_cell_index = IndexToCell(cell_base_index);
uint32_t end_cell_index = IndexToCell(index);
DCHECK_GE(end_cell_index, start_cell_index);
// Clear all cells till the cell containing the last index.
for (uint32_t i = start_cell_index; i < end_cell_index; i++) {
cells()[i] = 0;
}
// Clear all bits in the last cell till the last bit before index.
uint32_t clear_mask = ~((1u << IndexInCell(index)) - 1);
cells()[end_cell_index] &= clear_mask;
}
};
class Marking : public AllStatic {
......
......@@ -138,7 +138,7 @@ class ScavengingVisitor : public StaticVisitorBase {
}
if (marks_handling == TRANSFER_MARKS) {
if (IncrementalMarking::TransferColor(source, target, size)) {
if (IncrementalMarking::TransferColor(source, target)) {
MemoryChunk::IncrementLiveBytesFromGC(target, size);
}
}
......
......@@ -34,6 +34,7 @@ NewSpacePageRange::NewSpacePageRange(Address start, Address limit)
SemiSpace::AssertValidRange(start, limit);
}
// -----------------------------------------------------------------------------
// SemiSpaceIterator
......@@ -241,6 +242,7 @@ void MemoryChunk::ResetLiveBytes() {
}
void MemoryChunk::IncrementLiveBytes(int by) {
if (IsFlagSet(BLACK_PAGE)) return;
if (FLAG_trace_live_bytes) {
PrintIsolate(
heap()->isolate(), "live-bytes: update page=%p delta=%d %d->%d\n",
......@@ -442,12 +444,6 @@ AllocationResult PagedSpace::AllocateRawUnaligned(
if (object == NULL) {
object = SlowAllocateRaw(size_in_bytes);
}
if (object != NULL) {
if (heap()->incremental_marking()->black_allocation()) {
Marking::MarkBlack(ObjectMarking::MarkBitFrom(object));
MemoryChunk::IncrementLiveBytesFromGC(object, size_in_bytes);
}
}
}
if (object != NULL) {
......
......@@ -1229,6 +1229,18 @@ bool PagedSpace::Expand() {
// Pages created during bootstrapping may contain immortal immovable objects.
if (!heap()->deserialization_complete()) p->MarkNeverEvacuate();
// When incremental marking was activated, old space pages are allocated
// black.
if (heap()->incremental_marking()->black_allocation() &&
identity() == OLD_SPACE) {
p->markbits()->SetAllBits();
p->SetFlag(Page::BLACK_PAGE);
if (FLAG_trace_incremental_marking) {
PrintIsolate(heap()->isolate(), "Added black page %p\n",
static_cast<void*>(p));
}
}
DCHECK(Capacity() <= heap()->MaxOldGenerationSize());
p->InsertAfter(anchor_.prev_page());
......@@ -1253,50 +1265,6 @@ void PagedSpace::ResetFreeListStatistics() {
}
}
void PagedSpace::SetAllocationInfo(Address top, Address limit) {
SetTopAndLimit(top, limit);
if (top != nullptr && top != limit &&
heap()->incremental_marking()->black_allocation()) {
Page* page = Page::FromAddress(top);
page->markbits()->SetRange(page->AddressToMarkbitIndex(top),
page->AddressToMarkbitIndex(limit));
page->IncrementLiveBytes(static_cast<int>(limit - top));
}
}
void PagedSpace::MarkAllocationInfoBlack() {
DCHECK(heap()->incremental_marking()->black_allocation());
Address current_top = top();
Address current_limit = limit();
if (current_top != nullptr && current_top != current_limit) {
Page* page = Page::FromAddress(current_top);
page->markbits()->SetRange(page->AddressToMarkbitIndex(current_top),
page->AddressToMarkbitIndex(current_limit));
page->IncrementLiveBytes(static_cast<int>(current_limit - current_top));
}
}
// Empty space allocation info, returning unused area to free list.
void PagedSpace::EmptyAllocationInfo() {
// Mark the old linear allocation area with a free space map so it can be
// skipped when scanning the heap.
Address current_top = top();
Address current_limit = limit();
if (current_top == nullptr) {
DCHECK(current_limit == nullptr);
return;
}
int old_linear_size = static_cast<int>(current_limit - current_top);
SetTopAndLimit(NULL, NULL);
if (current_top != current_limit &&
heap()->incremental_marking()->black_allocation()) {
Page* page = Page::FromAddress(current_top);
page->markbits()->ClearRange(page->AddressToMarkbitIndex(current_top),
page->AddressToMarkbitIndex(current_limit));
page->IncrementLiveBytes(-static_cast<int>(current_limit - current_top));
}
Free(current_top, old_linear_size);
}
void PagedSpace::IncreaseCapacity(int size) {
accounting_stats_.ExpandSpace(size);
......@@ -1363,7 +1331,8 @@ void PagedSpace::Verify(ObjectVisitor* visitor) {
// All the interior pointers should be contained in the heap.
int size = object->Size();
object->IterateBody(map->instance_type(), size, visitor);
if (Marking::IsBlack(ObjectMarking::MarkBitFrom(object))) {
if (!page->IsFlagSet(Page::BLACK_PAGE) &&
Marking::IsBlack(ObjectMarking::MarkBitFrom(object))) {
black_size += size;
}
......@@ -2460,7 +2429,8 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
// Mark the old linear allocation area with a free space map so it can be
// skipped when scanning the heap. This also puts it back in the free list
// if it is big enough.
owner_->EmptyAllocationInfo();
owner_->Free(owner_->top(), old_linear_size);
owner_->SetTopAndLimit(nullptr, nullptr);
owner_->heap()->incremental_marking()->OldSpaceStep(size_in_bytes -
old_linear_size);
......@@ -2494,8 +2464,8 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
// Keep the linear allocation area empty if requested to do so, just
// return area back to the free list instead.
owner_->Free(new_node->address() + size_in_bytes, bytes_left);
owner_->SetAllocationInfo(new_node->address() + size_in_bytes,
new_node->address() + size_in_bytes);
owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
new_node->address() + size_in_bytes);
} else if (bytes_left > kThreshold &&
owner_->heap()->incremental_marking()->IsMarkingIncomplete() &&
FLAG_incremental_marking) {
......@@ -2505,15 +2475,14 @@ HeapObject* FreeList::Allocate(int size_in_bytes) {
// we want to do another increment until the linear area is used up.
owner_->Free(new_node->address() + size_in_bytes + linear_size,
new_node_size - size_in_bytes - linear_size);
owner_->SetAllocationInfo(
new_node->address() + size_in_bytes,
new_node->address() + size_in_bytes + linear_size);
owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
new_node->address() + size_in_bytes + linear_size);
} else {
DCHECK(bytes_left >= 0);
// Normally we give the rest of the node to the allocator as its new
// linear allocation area.
owner_->SetAllocationInfo(new_node->address() + size_in_bytes,
new_node->address() + new_node_size);
owner_->SetTopAndLimit(new_node->address() + size_in_bytes,
new_node->address() + new_node_size);
}
owner_->AllocationStep(new_node->address(), size_in_bytes);
......@@ -2903,11 +2872,6 @@ AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
heap()->incremental_marking()->OldSpaceStep(object_size);
AllocationStep(object->address(), object_size);
if (heap()->incremental_marking()->black_allocation()) {
Marking::MarkBlack(ObjectMarking::MarkBitFrom(object));
MemoryChunk::IncrementLiveBytesFromGC(object, object_size);
}
return object;
}
......
......@@ -249,6 +249,11 @@ class MemoryChunk {
// within the new space during evacuation.
PAGE_NEW_NEW_PROMOTION,
// A black page has all mark bits set to 1 (black). A black page currently
// cannot be iterated because it is not swept. Moreover live bytes are also
// not updated.
BLACK_PAGE,
// This flag is intended to be used for testing. Works only when both
// FLAG_stress_compaction and FLAG_manual_evacuation_candidates_selection
// are set. It forces the page to become an evacuation candidate at next
......@@ -423,10 +428,12 @@ class MemoryChunk {
int LiveBytes() {
DCHECK_LE(static_cast<unsigned>(live_byte_count_), size_);
DCHECK(!IsFlagSet(BLACK_PAGE) || live_byte_count_ == 0);
return live_byte_count_;
}
void SetLiveBytes(int live_bytes) {
if (IsFlagSet(BLACK_PAGE)) return;
DCHECK_GE(live_bytes, 0);
DCHECK_LE(static_cast<size_t>(live_bytes), size_);
live_byte_count_ = live_bytes;
......@@ -2065,12 +2072,14 @@ class PagedSpace : public Space {
allocation_info_.Reset(top, limit);
}
void SetAllocationInfo(Address top, Address limit);
// Empty space allocation info, returning unused area to free list.
void EmptyAllocationInfo();
void MarkAllocationInfoBlack();
void EmptyAllocationInfo() {
// Mark the old linear allocation area with a free space map so it can be
// skipped when scanning the heap.
int old_linear_size = static_cast<int>(limit() - top());
Free(top(), old_linear_size);
SetTopAndLimit(NULL, NULL);
}
void Allocate(int bytes) { accounting_stats_.AllocateBytes(bytes); }
......
......@@ -139,8 +139,6 @@ void RelocInfo::set_target_object(Object* target,
host() != NULL &&
target->IsHeapObject()) {
host()->GetHeap()->RecordWriteIntoCode(host(), this, target);
host()->GetHeap()->incremental_marking()->RecordWriteIntoCode(
host(), this, HeapObject::cast(target));
}
}
......
......@@ -1993,9 +1993,10 @@ void WeakCell::initialize(HeapObject* val) {
// We just have to execute the generational barrier here because we never
// mark through a weak cell and collect evacuation candidates when we process
// all weak cells.
WriteBarrierMode mode = Marking::IsBlack(ObjectMarking::MarkBitFrom(this))
? UPDATE_WRITE_BARRIER
: UPDATE_WEAK_WRITE_BARRIER;
WriteBarrierMode mode =
Page::FromAddress(this->address())->IsFlagSet(Page::BLACK_PAGE)
? UPDATE_WRITE_BARRIER
: UPDATE_WEAK_WRITE_BARRIER;
CONDITIONAL_WRITE_BARRIER(GetHeap(), this, kValueOffset, val, mode);
}
......
......@@ -110,51 +110,5 @@ TEST(Marking, TransitionWhiteGreyBlackGrey) {
free(bitmap);
}
TEST(Marking, SetAndClearRange) {
Bitmap* bitmap = reinterpret_cast<Bitmap*>(
calloc(Bitmap::kSize / kPointerSize, kPointerSize));
for (int i = 0; i < 3; i++) {
bitmap->SetRange(i, Bitmap::kBitsPerCell + i);
CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[0], 0xffffffff << i);
CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[1], (1 << i) - 1);
bitmap->ClearRange(i, Bitmap::kBitsPerCell + i);
CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[0], 0x0);
CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[1], 0x0);
}
free(bitmap);
}
TEST(Marking, ClearMultipleRanges) {
Bitmap* bitmap = reinterpret_cast<Bitmap*>(
calloc(Bitmap::kSize / kPointerSize, kPointerSize));
CHECK(bitmap->AllBitsClearInRange(0, Bitmap::kBitsPerCell * 3));
bitmap->SetRange(0, Bitmap::kBitsPerCell * 3);
CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[0], 0xffffffff);
CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[1], 0xffffffff);
CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[2], 0xffffffff);
CHECK(bitmap->AllBitsSetInRange(0, Bitmap::kBitsPerCell * 3));
bitmap->ClearRange(Bitmap::kBitsPerCell / 2, Bitmap::kBitsPerCell);
bitmap->ClearRange(Bitmap::kBitsPerCell,
Bitmap::kBitsPerCell + Bitmap::kBitsPerCell / 2);
bitmap->ClearRange(Bitmap::kBitsPerCell * 2 + 8,
Bitmap::kBitsPerCell * 2 + 16);
bitmap->ClearRange(Bitmap::kBitsPerCell * 2 + 24, Bitmap::kBitsPerCell * 3);
CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[0], 0xffff);
CHECK(bitmap->AllBitsSetInRange(0, Bitmap::kBitsPerCell / 2));
CHECK(bitmap->AllBitsClearInRange(Bitmap::kBitsPerCell / 2,
Bitmap::kBitsPerCell));
CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[1], 0xffff0000);
CHECK(
bitmap->AllBitsSetInRange(Bitmap::kBitsPerCell + Bitmap::kBitsPerCell / 2,
2 * Bitmap::kBitsPerCell));
CHECK(bitmap->AllBitsClearInRange(
Bitmap::kBitsPerCell, Bitmap::kBitsPerCell + Bitmap::kBitsPerCell / 2));
CHECK_EQ(reinterpret_cast<uint32_t*>(bitmap)[2], 0xff00ff);
CHECK(bitmap->AllBitsSetInRange(2 * Bitmap::kBitsPerCell,
2 * Bitmap::kBitsPerCell + 8));
CHECK(bitmap->AllBitsClearInRange(2 * Bitmap::kBitsPerCell + 24,
Bitmap::kBitsPerCell * 3));
free(bitmap);
}
} // namespace internal
} // namespace v8
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment