Commit 1e182fd4 authored by Ulan Degenbaev's avatar Ulan Degenbaev Committed by Commit Bot

Revert "[heap, runtime] Avoid redundant clearing of slots outside an object."

This reverts commit 3f820ebb.

Revert "[heap, runtime] Avoid redundant clearing of slots in a trimmed array."

This reverts commit ed76f17b.

Reason: canary crashes.

Bug: chromium:752750, chromium:694255
TBR: mlippautz@chromium.org
Change-Id: I57c533e8a3db5f28e7659c5f326fa343627a3ff7
Reviewed-on: https://chromium-review.googlesource.com/607868Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
Cr-Commit-Position: refs/heads/master@{#47300}
parent 0410e7e8
...@@ -3325,7 +3325,7 @@ HeapObject* Heap::CreateFillerObjectAt(Address addr, int size, ...@@ -3325,7 +3325,7 @@ HeapObject* Heap::CreateFillerObjectAt(Address addr, int size,
FreeSpace::cast(filler)->relaxed_write_size(size); FreeSpace::cast(filler)->relaxed_write_size(size);
} }
if (mode == ClearRecordedSlots::kYes) { if (mode == ClearRecordedSlots::kYes) {
UNREACHABLE(); ClearRecordedSlotRange(addr, addr + size);
} }
// At this point, we may be deserializing the heap from a snapshot, and // At this point, we may be deserializing the heap from a snapshot, and
...@@ -3407,8 +3407,7 @@ FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object, ...@@ -3407,8 +3407,7 @@ FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object,
// Technically in new space this write might be omitted (except for // Technically in new space this write might be omitted (except for
// debug mode which iterates through the heap), but to play safer // debug mode which iterates through the heap), but to play safer
// we still do it. // we still do it.
// Recorded slots will be cleared by the sweeper. CreateFillerObjectAt(old_start, bytes_to_trim, ClearRecordedSlots::kYes);
CreateFillerObjectAt(old_start, bytes_to_trim, ClearRecordedSlots::kNo);
// Initialize header of the trimmed array. Since left trimming is only // Initialize header of the trimmed array. Since left trimming is only
// performed on pages which are not concurrently swept creating a filler // performed on pages which are not concurrently swept creating a filler
...@@ -3477,9 +3476,8 @@ void Heap::RightTrimFixedArray(FixedArrayBase* object, int elements_to_trim) { ...@@ -3477,9 +3476,8 @@ void Heap::RightTrimFixedArray(FixedArrayBase* object, int elements_to_trim) {
// TODO(hpayer): We should shrink the large object page if the size // TODO(hpayer): We should shrink the large object page if the size
// of the object changed significantly. // of the object changed significantly.
if (!lo_space()->Contains(object)) { if (!lo_space()->Contains(object)) {
// Recorded slots will be cleared by the sweeper.
HeapObject* filler = HeapObject* filler =
CreateFillerObjectAt(new_end, bytes_to_trim, ClearRecordedSlots::kNo); CreateFillerObjectAt(new_end, bytes_to_trim, ClearRecordedSlots::kYes);
DCHECK_NOT_NULL(filler); DCHECK_NOT_NULL(filler);
// Clear the mark bits of the black area that belongs now to the filler. // Clear the mark bits of the black area that belongs now to the filler.
// This is an optimization. The sweeper will release black fillers anyway. // This is an optimization. The sweeper will release black fillers anyway.
......
...@@ -4030,11 +4030,8 @@ void MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) { ...@@ -4030,11 +4030,8 @@ void MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
if (instance_size_delta > 0) { if (instance_size_delta > 0) {
Address address = object->address(); Address address = object->address();
// The object has shrunk and is not going to use these slots again.
// Since there will be no untagged stores in these slots,
// we can just let the sweeper remove slots in the filler.
heap->CreateFillerObjectAt(address + new_instance_size, instance_size_delta, heap->CreateFillerObjectAt(address + new_instance_size, instance_size_delta,
ClearRecordedSlots::kNo); ClearRecordedSlots::kYes);
heap->AdjustLiveBytes(*object, -instance_size_delta); heap->AdjustLiveBytes(*object, -instance_size_delta);
} }
...@@ -4116,11 +4113,8 @@ void MigrateFastToSlow(Handle<JSObject> object, Handle<Map> new_map, ...@@ -4116,11 +4113,8 @@ void MigrateFastToSlow(Handle<JSObject> object, Handle<Map> new_map,
DCHECK(instance_size_delta >= 0); DCHECK(instance_size_delta >= 0);
if (instance_size_delta > 0) { if (instance_size_delta > 0) {
// The object has shrunk and is not going to use these slots again.
// Since there will be no untagged stores in these slots,
// we can just let the sweeper remove slots in the filler.
heap->CreateFillerObjectAt(object->address() + new_instance_size, heap->CreateFillerObjectAt(object->address() + new_instance_size,
instance_size_delta, ClearRecordedSlots::kNo); instance_size_delta, ClearRecordedSlots::kYes);
heap->AdjustLiveBytes(*object, -instance_size_delta); heap->AdjustLiveBytes(*object, -instance_size_delta);
} }
...@@ -4161,7 +4155,6 @@ void MigrateFastToSlow(Handle<JSObject> object, Handle<Map> new_map, ...@@ -4161,7 +4155,6 @@ void MigrateFastToSlow(Handle<JSObject> object, Handle<Map> new_map,
// static // static
void JSObject::NotifyMapChange(Handle<Map> old_map, Handle<Map> new_map, void JSObject::NotifyMapChange(Handle<Map> old_map, Handle<Map> new_map,
Isolate* isolate) { Isolate* isolate) {
DCHECK_LE(new_map->instance_size(), old_map->instance_size());
if (!old_map->is_prototype_map()) return; if (!old_map->is_prototype_map()) return;
InvalidatePrototypeChains(*old_map); InvalidatePrototypeChains(*old_map);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment