Commit 007baaed authored by ofrobots's avatar ofrobots Committed by Commit bot

improve allocation accounting for incremental mark

Add an assertion that allocated_bytes >= 0 in IncrementalMark::Step and then
make it pass. We were not being diligent in maintaining top_on_previous_step_
and as a result inaccurate, and even negative values of allocated_bytes were
being reported to Step.

BUG=
R=hpayer@chromium.org

Review URL: https://codereview.chromium.org/1274453002

Cr-Commit-Position: refs/heads/master@{#30778}
parent 12c7bc9a
......@@ -901,6 +901,8 @@ intptr_t IncrementalMarking::Step(intptr_t allocated_bytes,
CompletionAction action,
ForceMarkingAction marking,
ForceCompletionAction completion) {
DCHECK(allocated_bytes >= 0);
if (heap_->gc_state() != Heap::NOT_IN_GC || !FLAG_incremental_marking ||
(state_ != SWEEPING && state_ != MARKING)) {
return 0;
......
......@@ -1451,6 +1451,7 @@ void NewSpace::UpdateAllocationInfo() {
void NewSpace::ResetAllocationInfo() {
Address old_top = allocation_info_.top();
to_space_.Reset();
UpdateAllocationInfo();
pages_used_ = 0;
......@@ -1459,6 +1460,12 @@ void NewSpace::ResetAllocationInfo() {
while (it.has_next()) {
Bitmap::Clear(it.next());
}
if (top_on_previous_step_) {
int bytes_allocated = static_cast<int>(old_top - top_on_previous_step_);
heap()->incremental_marking()->Step(bytes_allocated,
IncrementalMarking::GC_VIA_STACK_GUARD);
top_on_previous_step_ = allocation_info_.top();
}
}
......@@ -1537,13 +1544,15 @@ bool NewSpace::EnsureAllocation(int size_in_bytes,
return false;
}
if (top_on_previous_step_) {
// Do a step for the bytes allocated on the last page.
int bytes_allocated = static_cast<int>(old_top - top_on_previous_step_);
heap()->incremental_marking()->Step(bytes_allocated,
IncrementalMarking::GC_VIA_STACK_GUARD);
old_top = allocation_info_.top();
top_on_previous_step_ = old_top;
heap()->incremental_marking()->Step(
bytes_allocated, IncrementalMarking::GC_VIA_STACK_GUARD);
top_on_previous_step_ = allocation_info_.top();
}
old_top = allocation_info_.top();
high = to_space_.page_high();
filler_size = Heap::GetFillToAlign(old_top, alignment);
aligned_size_in_bytes = size_in_bytes + filler_size;
......@@ -1555,13 +1564,15 @@ bool NewSpace::EnsureAllocation(int size_in_bytes,
// Either the limit has been lowered because linear allocation was disabled
// or because incremental marking wants to get a chance to do a step. Set
// the new limit accordingly.
if (top_on_previous_step_) {
Address new_top = old_top + aligned_size_in_bytes;
int bytes_allocated = static_cast<int>(new_top - top_on_previous_step_);
heap()->incremental_marking()->Step(bytes_allocated,
IncrementalMarking::GC_VIA_STACK_GUARD);
UpdateInlineAllocationLimit(aligned_size_in_bytes);
heap()->incremental_marking()->Step(
bytes_allocated, IncrementalMarking::GC_VIA_STACK_GUARD);
top_on_previous_step_ = new_top;
}
UpdateInlineAllocationLimit(aligned_size_in_bytes);
}
return true;
}
......
......@@ -2427,7 +2427,8 @@ class NewSpace : public Space {
to_space_(heap, kToSpace),
from_space_(heap, kFromSpace),
reservation_(),
inline_allocation_limit_step_(0) {}
inline_allocation_limit_step_(0),
top_on_previous_step_(0) {}
// Sets up the new space using the given chunk.
bool SetUp(int reserved_semispace_size_, int max_semi_space_size);
......@@ -2609,7 +2610,7 @@ class NewSpace : public Space {
void LowerInlineAllocationLimit(intptr_t step) {
inline_allocation_limit_step_ = step;
UpdateInlineAllocationLimit(0);
top_on_previous_step_ = allocation_info_.top();
top_on_previous_step_ = step ? allocation_info_.top() : 0;
}
// Get the extent of the inactive semispace (for use as a marking stack,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment