Commit 6c7304b0 authored by Ulan Degenbaev's avatar Ulan Degenbaev Committed by Commit Bot

[heap] Separate iteration of black allocated objects from write barrier.

This patch replaces IterateBlackObject with two functions:
- RecordWrites,
- ProcessBlackAllocatedObject.

The RecordWrites function is a write barrier, and its behaviour depends
on whether the concurrent marking is on or not.

The ProcessBlackAllocatedObject is the same indepenent from the
concurrent marker.

BUG=chromium:694255

Change-Id: I1666371fbdac9b26c6f875b9e1d1751da4ea1960
Reviewed-on: https://chromium-review.googlesource.com/541441
Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#46060}
parent d58bb2dc
......@@ -3455,7 +3455,7 @@ AllocationResult Heap::CopyCode(Code* code) {
new_code->Relocate(new_addr - old_addr);
// We have to iterate over the object and process its pointers when black
// allocation is on.
incremental_marking()->IterateBlackObject(new_code);
incremental_marking()->ProcessBlackAllocatedObject(new_code);
// Record all references to embedded objects in the new code object.
RecordWritesIntoCode(new_code);
return new_code;
......@@ -4269,7 +4269,7 @@ void Heap::RegisterDeserializedObjectsForBlackAllocation(
// incremental marking. E.g. see VisitNativeContextIncremental.
DCHECK(ObjectMarking::IsBlackOrGrey(obj, MarkingState::Internal(obj)));
if (ObjectMarking::IsBlack(obj, MarkingState::Internal(obj))) {
incremental_marking()->IterateBlackObject(obj);
incremental_marking()->ProcessBlackAllocatedObject(obj);
}
addr += obj->Size();
}
......@@ -4281,7 +4281,7 @@ void Heap::RegisterDeserializedObjectsForBlackAllocation(
// Large object space doesn't use reservations, so it needs custom handling.
for (HeapObject* object : *large_objects) {
incremental_marking()->IterateBlackObject(object);
incremental_marking()->ProcessBlackAllocatedObject(object);
}
}
......
......@@ -342,7 +342,7 @@ using v8::MemoryPressureLevel;
#define FIXED_ARRAY_ELEMENTS_WRITE_BARRIER(heap, array, start, length) \
do { \
heap->RecordFixedArrayElements(array, start, length); \
heap->incremental_marking()->IterateBlackObject(array); \
heap->incremental_marking()->RecordWrites(array); \
} while (false)
// Forward declarations.
......
......@@ -19,6 +19,14 @@ void IncrementalMarking::RecordWrite(HeapObject* obj, Object** slot,
}
}
void IncrementalMarking::RecordWrites(HeapObject* obj) {
if (IsMarking()) {
if (FLAG_concurrent_marking ||
ObjectMarking::IsBlack<kAtomicity>(obj, marking_state(obj))) {
RevisitObject(obj);
}
}
}
void IncrementalMarking::RecordWriteOfCodeEntry(JSFunction* host, Object** slot,
Code* value) {
......
......@@ -321,20 +321,6 @@ class IncrementalMarkingMarkingVisitor
}
};
void IncrementalMarking::IterateBlackObject(HeapObject* object) {
if (IsMarking() &&
ObjectMarking::IsBlack<kAtomicity>(object, marking_state(object))) {
Page* page = Page::FromAddress(object->address());
if ((page->owner() != nullptr) && (page->owner()->identity() == LO_SPACE)) {
// IterateBlackObject requires us to visit the whole object.
page->ResetProgressBar();
}
Map* map = object->map();
WhiteToGreyAndPush(map);
IncrementalMarkingMarkingVisitor::IterateBody(map, object);
}
}
class IncrementalMarkingRootMarkingVisitor : public RootVisitor {
public:
explicit IncrementalMarkingRootMarkingVisitor(
......@@ -902,6 +888,26 @@ void IncrementalMarking::VisitObject(Map* map, HeapObject* obj, int size) {
IncrementalMarkingMarkingVisitor::IterateBody(map, obj);
}
void IncrementalMarking::ProcessBlackAllocatedObject(HeapObject* obj) {
if (IsMarking() &&
ObjectMarking::IsBlack<kAtomicity>(obj, marking_state(obj))) {
RevisitObject(obj);
}
}
void IncrementalMarking::RevisitObject(HeapObject* obj) {
DCHECK(IsMarking());
DCHECK(FLAG_concurrent_marking ||
ObjectMarking::IsBlack<kAtomicity>(obj, marking_state(obj)));
Page* page = Page::FromAddress(obj->address());
if ((page->owner() != nullptr) && (page->owner()->identity() == LO_SPACE)) {
page->ResetProgressBar();
}
Map* map = obj->map();
WhiteToGreyAndPush(map);
IncrementalMarkingMarkingVisitor::IterateBody(map, obj);
}
intptr_t IncrementalMarking::ProcessMarkingDeque(
intptr_t bytes_to_process, ForceCompletionAction completion) {
intptr_t bytes_processed = 0;
......
......@@ -211,6 +211,7 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
INLINE(void RecordWriteIntoCode(Code* host, RelocInfo* rinfo, Object* value));
INLINE(void RecordWriteOfCodeEntry(JSFunction* host, Object** slot,
Code* value));
INLINE(void RecordWrites(HeapObject* obj));
void RecordWriteSlow(HeapObject* obj, Object** slot, Object* value);
void RecordWriteIntoCodeSlow(Code* host, RelocInfo* rinfo, Object* value);
......@@ -247,7 +248,7 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
bool IsIdleMarkingDelayCounterLimitReached();
void IterateBlackObject(HeapObject* object);
void ProcessBlackAllocatedObject(HeapObject* obj);
Heap* heap() const { return heap_; }
......@@ -310,8 +311,6 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
static void SetNewSpacePageFlags(MemoryChunk* chunk, bool is_marking);
INLINE(void ProcessMarkingDeque());
INLINE(intptr_t ProcessMarkingDeque(
intptr_t bytes_to_process,
ForceCompletionAction completion = DO_NOT_FORCE_COMPLETION));
......@@ -319,6 +318,8 @@ class V8_EXPORT_PRIVATE IncrementalMarking {
INLINE(bool IsFixedArrayWithProgressBar(HeapObject* object));
INLINE(void VisitObject(Map* map, HeapObject* obj, int size));
void RevisitObject(HeapObject* obj);
void IncrementIdleMarkingDelayCounter();
void AdvanceIncrementalMarkingOnAllocation();
......
......@@ -3967,7 +3967,10 @@ void Map::ReplaceDescriptors(DescriptorArray* new_descriptors,
}
DescriptorArray* to_replace = instance_descriptors();
isolate->heap()->incremental_marking()->IterateBlackObject(to_replace);
// Replace descriptors by new_descriptors in all maps that share it. The old
// descriptors will not be trimmed in the mark-compactor, we need to mark
// all its elements.
isolate->heap()->incremental_marking()->RecordWrites(to_replace);
Map* current = this;
while (current->instance_descriptors() == to_replace) {
Object* next = current->GetBackPointer();
......@@ -4706,8 +4709,10 @@ void Map::EnsureDescriptorSlack(Handle<Map> map, int slack) {
}
Isolate* isolate = map->GetIsolate();
// Replace descriptors by new_descriptors in all maps that share it.
isolate->heap()->incremental_marking()->IterateBlackObject(*descriptors);
// Replace descriptors by new_descriptors in all maps that share it. The old
// descriptors will not be trimmed in the mark-compactor, we need to mark
// all its elements.
isolate->heap()->incremental_marking()->RecordWrites(*descriptors);
Map* current = *map;
while (current->instance_descriptors() == *descriptors) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment