Commit bb24b91f authored by ulan's avatar ulan Committed by Commit bot

[heap] Invoke incremental marking step before allocation.

This ensures that the newly allocated object immediatly precedes the
linear allocation area, which is needed for allocation folding.

For more info see:
https://bugs.chromium.org/p/chromium/issues/detail?id=659165#c13

BUG=chromium:659165

Review-Url: https://codereview.chromium.org/2464393002
Cr-Commit-Position: refs/heads/master@{#40704}
parent 5a18685e
......@@ -32,9 +32,7 @@ IncrementalMarking::IncrementalMarking(Heap* heap)
was_activated_(false),
black_allocation_(false),
finalize_marking_completed_(false),
request_type_(NONE),
new_generation_observer_(*this, kAllocatedThreshold),
old_generation_observer_(*this, kAllocatedThreshold) {}
request_type_(NONE) {}
bool IncrementalMarking::BaseRecordWrite(HeapObject* obj, Object* value) {
HeapObject* value_heap_obj = HeapObject::cast(value);
......@@ -489,16 +487,6 @@ void IncrementalMarking::Start(GarbageCollectionReason gc_reason) {
state_ = SWEEPING;
}
SpaceIterator it(heap_);
while (it.has_next()) {
Space* space = it.next();
if (space == heap_->new_space()) {
space->AddAllocationObserver(&new_generation_observer_);
} else {
space->AddAllocationObserver(&old_generation_observer_);
}
}
incremental_marking_job()->Start(heap_);
}
......@@ -957,16 +945,6 @@ void IncrementalMarking::Stop() {
Max(0, old_generation_size_mb - old_generation_limit_mb));
}
SpaceIterator it(heap_);
while (it.has_next()) {
Space* space = it.next();
if (space == heap_->new_space()) {
space->RemoveAllocationObserver(&new_generation_observer_);
} else {
space->RemoveAllocationObserver(&old_generation_observer_);
}
}
IncrementalMarking::set_should_hurry(false);
if (IsMarking()) {
PatchIncrementalMarkingRecordWriteStubs(heap_,
......@@ -1085,30 +1063,33 @@ void IncrementalMarking::AdvanceIncrementalMarkingOnAllocation() {
return;
}
size_t bytes_to_process =
StepSizeToKeepUpWithAllocations() + StepSizeToMakeProgress();
if (bytes_to_process >= IncrementalMarking::kAllocatedThreshold) {
// The first step after Scavenge will see many allocated bytes.
// Cap the step size to distribute the marking work more uniformly.
size_t max_step_size = GCIdleTimeHandler::EstimateMarkingStepSize(
kMaxStepSizeInMs,
heap()->tracer()->IncrementalMarkingSpeedInBytesPerMillisecond());
bytes_to_process = Min(bytes_to_process, max_step_size);
size_t bytes_processed = 0;
if (bytes_marked_ahead_of_schedule_ >= bytes_to_process) {
// Steps performed in tasks have put us ahead of schedule.
// We skip processing of marking dequeue here and thus
// shift marking time from inside V8 to standalone tasks.
bytes_marked_ahead_of_schedule_ -= bytes_to_process;
bytes_processed = bytes_to_process;
} else {
bytes_processed = Step(bytes_to_process, GC_VIA_STACK_GUARD,
FORCE_COMPLETION, StepOrigin::kV8);
}
bytes_allocated_ -= Min(bytes_allocated_, bytes_processed);
size_t bytes_to_process = StepSizeToKeepUpWithAllocations();
if (bytes_to_process < IncrementalMarking::kAllocatedThreshold) {
return;
}
bytes_to_process += StepSizeToMakeProgress();
// The first step after Scavenge will see many allocated bytes.
// Cap the step size to distribute the marking work more uniformly.
size_t max_step_size = GCIdleTimeHandler::EstimateMarkingStepSize(
kMaxStepSizeInMs,
heap()->tracer()->IncrementalMarkingSpeedInBytesPerMillisecond());
bytes_to_process = Min(bytes_to_process, max_step_size);
size_t bytes_processed = 0;
if (bytes_marked_ahead_of_schedule_ >= bytes_to_process) {
// Steps performed in tasks have put us ahead of schedule.
// We skip processing of marking dequeue here and thus
// shift marking time from inside V8 to standalone tasks.
bytes_marked_ahead_of_schedule_ -= bytes_to_process;
bytes_processed = bytes_to_process;
} else {
bytes_processed = Step(bytes_to_process, GC_VIA_STACK_GUARD,
FORCE_COMPLETION, StepOrigin::kV8);
}
bytes_allocated_ -= Min(bytes_allocated_, bytes_processed);
}
size_t IncrementalMarking::Step(size_t bytes_to_process,
......
......@@ -99,6 +99,7 @@ class IncrementalMarking {
CompletionAction completion_action,
ForceCompletionAction force_completion,
StepOrigin step_origin);
void AdvanceIncrementalMarkingOnAllocation();
// It's hard to know how much work the incremental marker should do to make
// progress in the face of the mutator creating new work for it. We start
......@@ -218,20 +219,6 @@ class IncrementalMarking {
void AbortBlackAllocation();
private:
class Observer : public AllocationObserver {
public:
Observer(IncrementalMarking& incremental_marking, intptr_t step_size)
: AllocationObserver(step_size),
incremental_marking_(incremental_marking) {}
void Step(int bytes_allocated, Address, size_t) override {
incremental_marking_.AdvanceIncrementalMarkingOnAllocation();
}
private:
IncrementalMarking& incremental_marking_;
};
int64_t SpaceLeftInOldSpace();
void StartMarking();
......@@ -269,8 +256,6 @@ class IncrementalMarking {
void IncrementIdleMarkingDelayCounter();
void AdvanceIncrementalMarkingOnAllocation();
size_t StepSizeToKeepUpWithAllocations();
size_t StepSizeToMakeProgress();
......@@ -297,8 +282,6 @@ class IncrementalMarking {
GCRequestType request_type_;
IncrementalMarkingJob incremental_marking_job_;
Observer new_generation_observer_;
Observer old_generation_observer_;
DISALLOW_IMPLICIT_CONSTRUCTORS(IncrementalMarking);
};
......
......@@ -2582,6 +2582,15 @@ HeapObject* FreeList::Allocate(size_t size_in_bytes) {
owner_->heap()->StartIncrementalMarkingIfAllocationLimitIsReached(
Heap::kNoGCFlags, kNoGCCallbackFlags);
// We cannot place incremental marking step in an AllocationObserver because
// 1) incremental marking step can change linear allocation area.
// 2) allocation observers are called after allocation.
// 3) allocation folding assumes that the newly allocated object immediately
// precedes the linear allocation area.
// See crbug.com/659165.
owner_->heap()
->incremental_marking()
->AdvanceIncrementalMarkingOnAllocation();
size_t new_node_size = 0;
FreeSpace* new_node = FindNodeFor(size_in_bytes, &new_node_size);
......@@ -3011,6 +3020,8 @@ AllocationResult LargeObjectSpace::AllocateRaw(int object_size,
heap()->StartIncrementalMarkingIfAllocationLimitIsReached(Heap::kNoGCFlags,
kNoGCCallbackFlags);
heap()->incremental_marking()->AdvanceIncrementalMarkingOnAllocation();
AllocationStep(object->address(), object_size);
if (heap()->incremental_marking()->black_allocation()) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment