Commit f8a08f38 authored by Ulan Degenbaev's avatar Ulan Degenbaev Committed by Commit Bot

[heap] Remove live byte adjustments from mutator.

The effect of array/string trimming on space size is postponed until sweeping
completes. This simplifies runtime code and fixes live byte update race with
the concurrent marker.

This patch restores monotonicity of PromotedSinceLastGC by notify the heap
when sweeper discovers more free space than estimated.

Bug: chromium:694255
Change-Id: I7a8c24f2c3398bc0c8a43ffd1d35ace68010cd65
Reviewed-on: https://chromium-review.googlesource.com/621326
Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#47464}
parent e49accd9
...@@ -3352,23 +3352,6 @@ bool Heap::IsImmovable(HeapObject* object) { ...@@ -3352,23 +3352,6 @@ bool Heap::IsImmovable(HeapObject* object) {
return chunk->NeverEvacuate() || chunk->owner()->identity() == LO_SPACE; return chunk->NeverEvacuate() || chunk->owner()->identity() == LO_SPACE;
} }
void Heap::AdjustLiveBytes(HeapObject* object, int by) {
// As long as the inspected object is black and we are currently not iterating
// the heap using HeapIterator, we can update the live byte count. We cannot
// update while using HeapIterator because the iterator is temporarily
// marking the whole object graph, without updating live bytes.
if (lo_space()->Contains(object)) {
lo_space()->AdjustLiveBytes(by);
} else if (!in_heap_iterator() &&
!mark_compact_collector()->sweeping_in_progress() &&
incremental_marking()->marking_state()->IsBlack(object)) {
DCHECK(MemoryChunk::FromAddress(object->address())->SweepingDone());
incremental_marking()->marking_state()->IncrementLiveBytes(
MemoryChunk::FromAddress(object->address()), by);
}
}
FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object, FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object,
int elements_to_trim) { int elements_to_trim) {
CHECK_NOT_NULL(object); CHECK_NOT_NULL(object);
...@@ -3416,9 +3399,6 @@ FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object, ...@@ -3416,9 +3399,6 @@ FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object,
FixedArrayBase* new_object = FixedArrayBase* new_object =
FixedArrayBase::cast(HeapObject::FromAddress(new_start)); FixedArrayBase::cast(HeapObject::FromAddress(new_start));
// Maintain consistency of live bytes during incremental marking
AdjustLiveBytes(new_object, -bytes_to_trim);
// Remove recorded slots for the new map and length offset. // Remove recorded slots for the new map and length offset.
ClearRecordedSlot(new_object, HeapObject::RawField(new_object, 0)); ClearRecordedSlot(new_object, HeapObject::RawField(new_object, 0));
ClearRecordedSlot(new_object, HeapObject::RawField( ClearRecordedSlot(new_object, HeapObject::RawField(
...@@ -3492,9 +3472,6 @@ void Heap::RightTrimFixedArray(FixedArrayBase* object, int elements_to_trim) { ...@@ -3492,9 +3472,6 @@ void Heap::RightTrimFixedArray(FixedArrayBase* object, int elements_to_trim) {
// avoid races with the sweeper thread. // avoid races with the sweeper thread.
object->synchronized_set_length(len - elements_to_trim); object->synchronized_set_length(len - elements_to_trim);
// Maintain consistency of live bytes during incremental marking
AdjustLiveBytes(object, -bytes_to_trim);
// Notify the heap profiler of change in object layout. The array may not be // Notify the heap profiler of change in object layout. The array may not be
// moved during GC, and size has to be adjusted nevertheless. // moved during GC, and size has to be adjusted nevertheless.
HeapProfiler* profiler = isolate()->heap_profiler(); HeapProfiler* profiler = isolate()->heap_profiler();
......
...@@ -716,9 +716,6 @@ class Heap { ...@@ -716,9 +716,6 @@ class Heap {
static bool IsImmovable(HeapObject* object); static bool IsImmovable(HeapObject* object);
// Maintain consistency of live bytes during incremental marking.
void AdjustLiveBytes(HeapObject* object, int by);
// Trim the given array from the left. Note that this relocates the object // Trim the given array from the left. Note that this relocates the object
// start and hence is only valid if there is only a single reference to it. // start and hence is only valid if there is only a single reference to it.
FixedArrayBase* LeftTrimFixedArray(FixedArrayBase* obj, int elements_to_trim); FixedArrayBase* LeftTrimFixedArray(FixedArrayBase* obj, int elements_to_trim);
...@@ -1394,6 +1391,7 @@ class Heap { ...@@ -1394,6 +1391,7 @@ class Heap {
void UpdateOldGenerationAllocationCounter() { void UpdateOldGenerationAllocationCounter() {
old_generation_allocation_counter_at_last_gc_ = old_generation_allocation_counter_at_last_gc_ =
OldGenerationAllocationCounter(); OldGenerationAllocationCounter();
old_generation_size_at_last_gc_ = 0;
} }
size_t OldGenerationAllocationCounter() { size_t OldGenerationAllocationCounter() {
...@@ -1408,14 +1406,22 @@ class Heap { ...@@ -1408,14 +1406,22 @@ class Heap {
size_t PromotedSinceLastGC() { size_t PromotedSinceLastGC() {
size_t old_generation_size = PromotedSpaceSizeOfObjects(); size_t old_generation_size = PromotedSpaceSizeOfObjects();
if (old_generation_size < old_generation_size_at_last_gc_) { DCHECK_GE(old_generation_size, old_generation_size_at_last_gc_);
// This can happen if the promoted space size was refined in the
// sweeper after mutator doing array trimming.
return 0;
}
return old_generation_size - old_generation_size_at_last_gc_; return old_generation_size - old_generation_size_at_last_gc_;
} }
// This is called by the sweeper when it discovers more free space
// as expected at the end of the last GC.
void NotifyRefinedOldGenerationSize(size_t decreased_bytes) {
if (old_generation_size_at_last_gc_ != 0) {
// PromotedSpaceSizeOfObjects() is now smaller by |decreased_bytes|.
// Adjust old_generation_size_at_last_gc_ too so that PromotedSinceLastGC
// stay monotonically non-decreasing function.
DCHECK_GE(old_generation_size_at_last_gc_, decreased_bytes);
old_generation_size_at_last_gc_ -= decreased_bytes;
}
}
int gc_count() const { return gc_count_; } int gc_count() const { return gc_count_; }
// Returns the size of objects residing in non new spaces. // Returns the size of objects residing in non new spaces.
......
...@@ -1493,8 +1493,15 @@ void PagedSpace::RefineAllocatedBytesAfterSweeping(Page* page) { ...@@ -1493,8 +1493,15 @@ void PagedSpace::RefineAllocatedBytesAfterSweeping(Page* page) {
// The live_byte on the page was accounted in the space allocated // The live_byte on the page was accounted in the space allocated
// bytes counter. After sweeping allocated_bytes() contains the // bytes counter. After sweeping allocated_bytes() contains the
// accurate live byte count on the page. // accurate live byte count on the page.
DecreaseAllocatedBytes(marking_state->live_bytes(page), page); size_t old_counter = marking_state->live_bytes(page);
IncreaseAllocatedBytes(page->allocated_bytes(), page); size_t new_counter = page->allocated_bytes();
DCHECK_GE(old_counter, new_counter);
if (old_counter > new_counter) {
DecreaseAllocatedBytes(old_counter - new_counter, page);
// Give the heap a chance to adjust counters in response to the
// more precise and smaller old generation size.
heap()->NotifyRefinedOldGenerationSize(old_counter - new_counter);
}
marking_state->SetLiveBytes(page, 0); marking_state->SetLiveBytes(page, 0);
} }
...@@ -3206,12 +3213,12 @@ void PagedSpace::ReportStatistics() { ...@@ -3206,12 +3213,12 @@ void PagedSpace::ReportStatistics() {
void MapSpace::VerifyObject(HeapObject* object) { CHECK(object->IsMap()); } void MapSpace::VerifyObject(HeapObject* object) { CHECK(object->IsMap()); }
#endif #endif
Address LargePage::GetAddressToShrink() { Address LargePage::GetAddressToShrink(Address object_address,
HeapObject* object = GetObject(); size_t object_size) {
if (executable() == EXECUTABLE) { if (executable() == EXECUTABLE) {
return 0; return 0;
} }
size_t used_size = ::RoundUp((object->address() - address()) + object->Size(), size_t used_size = ::RoundUp((object_address - address()) + object_size,
MemoryAllocator::GetCommitPageSize()); MemoryAllocator::GetCommitPageSize());
if (used_size < CommittedPhysicalMemory()) { if (used_size < CommittedPhysicalMemory()) {
return address() + used_size; return address() + used_size;
...@@ -3418,12 +3425,16 @@ void LargeObjectSpace::FreeUnmarkedObjects() { ...@@ -3418,12 +3425,16 @@ void LargeObjectSpace::FreeUnmarkedObjects() {
LargePage* current = first_page_; LargePage* current = first_page_;
IncrementalMarking::NonAtomicMarkingState* marking_state = IncrementalMarking::NonAtomicMarkingState* marking_state =
heap()->incremental_marking()->non_atomic_marking_state(); heap()->incremental_marking()->non_atomic_marking_state();
objects_size_ = 0;
while (current != nullptr) { while (current != nullptr) {
HeapObject* object = current->GetObject(); HeapObject* object = current->GetObject();
DCHECK(!marking_state->IsGrey(object)); DCHECK(!marking_state->IsGrey(object));
if (marking_state->IsBlack(object)) { if (marking_state->IsBlack(object)) {
Address free_start; Address free_start;
if ((free_start = current->GetAddressToShrink()) != 0) { size_t size = static_cast<size_t>(object->Size());
objects_size_ += size;
if ((free_start = current->GetAddressToShrink(object->address(), size)) !=
0) {
DCHECK(!current->IsFlagSet(Page::IS_EXECUTABLE)); DCHECK(!current->IsFlagSet(Page::IS_EXECUTABLE));
current->ClearOutOfLiveRangeSlots(free_start); current->ClearOutOfLiveRangeSlots(free_start);
RemoveChunkMapEntries(current, free_start); RemoveChunkMapEntries(current, free_start);
...@@ -3450,7 +3461,6 @@ void LargeObjectSpace::FreeUnmarkedObjects() { ...@@ -3450,7 +3461,6 @@ void LargeObjectSpace::FreeUnmarkedObjects() {
// Free the chunk. // Free the chunk.
size_ -= static_cast<int>(page->size()); size_ -= static_cast<int>(page->size());
AccountUncommitted(page->size()); AccountUncommitted(page->size());
objects_size_ -= object->Size();
page_count_--; page_count_--;
RemoveChunkMapEntries(page); RemoveChunkMapEntries(page);
......
...@@ -861,7 +861,7 @@ class LargePage : public MemoryChunk { ...@@ -861,7 +861,7 @@ class LargePage : public MemoryChunk {
// Uncommit memory that is not in use anymore by the object. If the object // Uncommit memory that is not in use anymore by the object. If the object
// cannot be shrunk 0 is returned. // cannot be shrunk 0 is returned.
Address GetAddressToShrink(); Address GetAddressToShrink(Address object_address, size_t object_size);
void ClearOutOfLiveRangeSlots(Address free_start); void ClearOutOfLiveRangeSlots(Address free_start);
...@@ -2923,8 +2923,6 @@ class LargeObjectSpace : public Space { ...@@ -2923,8 +2923,6 @@ class LargeObjectSpace : public Space {
// Checks whether the space is empty. // Checks whether the space is empty.
bool IsEmpty() { return first_page_ == NULL; } bool IsEmpty() { return first_page_ == NULL; }
void AdjustLiveBytes(int by) { objects_size_ += by; }
LargePage* first_page() { return first_page_; } LargePage* first_page() { return first_page_; }
// Collect code statistics. // Collect code statistics.
......
...@@ -2654,8 +2654,6 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) { ...@@ -2654,8 +2654,6 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
ExternalTwoByteString* self = ExternalTwoByteString::cast(this); ExternalTwoByteString* self = ExternalTwoByteString::cast(this);
self->set_resource(resource); self->set_resource(resource);
if (is_internalized) self->Hash(); // Force regeneration of the hash value. if (is_internalized) self->Hash(); // Force regeneration of the hash value.
heap->AdjustLiveBytes(this, new_size - size);
return true; return true;
} }
...@@ -2725,8 +2723,6 @@ bool String::MakeExternal(v8::String::ExternalOneByteStringResource* resource) { ...@@ -2725,8 +2723,6 @@ bool String::MakeExternal(v8::String::ExternalOneByteStringResource* resource) {
ExternalOneByteString* self = ExternalOneByteString::cast(this); ExternalOneByteString* self = ExternalOneByteString::cast(this);
self->set_resource(resource); self->set_resource(resource);
if (is_internalized) self->Hash(); // Force regeneration of the hash value. if (is_internalized) self->Hash(); // Force regeneration of the hash value.
heap->AdjustLiveBytes(this, new_size - size);
return true; return true;
} }
...@@ -4033,7 +4029,6 @@ void MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) { ...@@ -4033,7 +4029,6 @@ void MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
Address address = object->address(); Address address = object->address();
heap->CreateFillerObjectAt(address + new_instance_size, instance_size_delta, heap->CreateFillerObjectAt(address + new_instance_size, instance_size_delta,
ClearRecordedSlots::kYes); ClearRecordedSlots::kYes);
heap->AdjustLiveBytes(*object, -instance_size_delta);
} }
// We are storing the new map using release store after creating a filler for // We are storing the new map using release store after creating a filler for
...@@ -4116,7 +4111,6 @@ void MigrateFastToSlow(Handle<JSObject> object, Handle<Map> new_map, ...@@ -4116,7 +4111,6 @@ void MigrateFastToSlow(Handle<JSObject> object, Handle<Map> new_map,
if (instance_size_delta > 0) { if (instance_size_delta > 0) {
heap->CreateFillerObjectAt(object->address() + new_instance_size, heap->CreateFillerObjectAt(object->address() + new_instance_size,
instance_size_delta, ClearRecordedSlots::kYes); instance_size_delta, ClearRecordedSlots::kYes);
heap->AdjustLiveBytes(*object, -instance_size_delta);
} }
// We are storing the new map using release store after creating a filler for // We are storing the new map using release store after creating a filler for
...@@ -12120,8 +12114,6 @@ Handle<String> SeqString::Truncate(Handle<SeqString> string, int new_length) { ...@@ -12120,8 +12114,6 @@ Handle<String> SeqString::Truncate(Handle<SeqString> string, int new_length) {
// that are a multiple of pointer size. // that are a multiple of pointer size.
heap->CreateFillerObjectAt(start_of_string + new_size, delta, heap->CreateFillerObjectAt(start_of_string + new_size, delta,
ClearRecordedSlots::kNo); ClearRecordedSlots::kNo);
heap->AdjustLiveBytes(*string, -delta);
// We are storing the new length using release store after creating a filler // We are storing the new length using release store after creating a filler
// for the left-over space to avoid races with the sweeper thread. // for the left-over space to avoid races with the sweeper thread.
string->synchronized_set_length(new_length); string->synchronized_set_length(new_length);
...@@ -17134,7 +17126,6 @@ void MakeStringThin(String* string, String* internalized, Isolate* isolate) { ...@@ -17134,7 +17126,6 @@ void MakeStringThin(String* string, String* internalized, Isolate* isolate) {
if (size_delta != 0) { if (size_delta != 0) {
Heap* heap = isolate->heap(); Heap* heap = isolate->heap();
heap->CreateFillerObjectAt(thin_end, size_delta, ClearRecordedSlots::kNo); heap->CreateFillerObjectAt(thin_end, size_delta, ClearRecordedSlots::kNo);
heap->AdjustLiveBytes(thin, -size_delta);
} }
} }
} }
......
...@@ -777,7 +777,6 @@ MUST_USE_RESULT static Object* StringReplaceGlobalRegExpWithEmptyString( ...@@ -777,7 +777,6 @@ MUST_USE_RESULT static Object* StringReplaceGlobalRegExpWithEmptyString(
if (!heap->lo_space()->Contains(*answer)) { if (!heap->lo_space()->Contains(*answer)) {
heap->CreateFillerObjectAt(end_of_string, delta, ClearRecordedSlots::kNo); heap->CreateFillerObjectAt(end_of_string, delta, ClearRecordedSlots::kNo);
} }
heap->AdjustLiveBytes(*answer, -delta);
return *answer; return *answer;
} }
......
...@@ -5646,17 +5646,44 @@ TEST(Regress598319) { ...@@ -5646,17 +5646,44 @@ TEST(Regress598319) {
} }
} }
Handle<FixedArray> ShrinkArrayAndCheckSize(Heap* heap, int length) {
// Make sure there is no garbage and the compilation cache is empty.
for (int i = 0; i < 5; i++) {
CcTest::CollectAllGarbage();
}
heap->mark_compact_collector()->EnsureSweepingCompleted();
size_t size_before_allocation = heap->SizeOfObjects();
Handle<FixedArray> array =
heap->isolate()->factory()->NewFixedArray(length, TENURED);
size_t size_after_allocation = heap->SizeOfObjects();
CHECK_EQ(size_after_allocation, size_before_allocation + array->Size());
array->Shrink(1);
size_t size_after_shrinking = heap->SizeOfObjects();
// Shrinking does not change the space size immediately.
CHECK_EQ(size_after_allocation, size_after_shrinking);
// GC and sweeping updates the size to acccount for shrinking.
CcTest::CollectAllGarbage();
heap->mark_compact_collector()->EnsureSweepingCompleted();
intptr_t size_after_gc = heap->SizeOfObjects();
CHECK_EQ(size_after_gc, size_before_allocation + array->Size());
return array;
}
TEST(Regress609761) { TEST(Regress609761) {
CcTest::InitializeVM(); CcTest::InitializeVM();
v8::HandleScope scope(CcTest::isolate()); v8::HandleScope scope(CcTest::isolate());
Heap* heap = CcTest::heap(); Heap* heap = CcTest::heap();
Isolate* isolate = heap->isolate(); int length = kMaxRegularHeapObjectSize / kPointerSize + 1;
Handle<FixedArray> array = ShrinkArrayAndCheckSize(heap, length);
CHECK(heap->lo_space()->Contains(*array));
}
intptr_t size_before = heap->SizeOfObjects(); TEST(LiveBytes) {
Handle<FixedArray> array = isolate->factory()->NewFixedArray(200000); CcTest::InitializeVM();
array->Shrink(1); v8::HandleScope scope(CcTest::isolate());
intptr_t size_after = heap->SizeOfObjects(); Heap* heap = CcTest::heap();
CHECK_EQ(size_after, size_before + array->Size()); Handle<FixedArray> array = ShrinkArrayAndCheckSize(heap, 2000);
CHECK(heap->old_space()->Contains(*array));
} }
TEST(Regress615489) { TEST(Regress615489) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment