Commit edde7b28 authored by ulan's avatar ulan Committed by Commit bot

Filter out invalid slots in store buffer eagerly during object transition.

BUG=chromium:578883
LOG=NO

Review URL: https://codereview.chromium.org/1675163003

Cr-Commit-Position: refs/heads/master@{#33841}
parent bf521632
......@@ -5449,6 +5449,11 @@ void Heap::CheckHandleCount() {
isolate_->handle_scope_implementer()->Iterate(&v);
}
void Heap::ClearRecordedSlot(HeapObject* object, Object** slot) {
if (!InNewSpace(object)) {
store_buffer()->Remove(reinterpret_cast<Address>(slot));
}
}
Space* AllSpaces::next() {
switch (counter_++) {
......
......@@ -1076,6 +1076,8 @@ class Heap {
return reinterpret_cast<Address*>(&roots_[kStoreBufferTopRootIndex]);
}
void ClearRecordedSlot(HeapObject* object, Object** slot);
// ===========================================================================
// Incremental marking API. ==================================================
// ===========================================================================
......
......@@ -63,6 +63,15 @@ void StoreBuffer::StoreBufferOverflow(Isolate* isolate) {
isolate->counters()->store_buffer_overflows()->Increment();
}
void StoreBuffer::Remove(Address addr) {
InsertEntriesFromBuffer();
MemoryChunk* chunk = MemoryChunk::FromAddress(addr);
DCHECK_EQ(chunk->owner()->identity(), OLD_SPACE);
uintptr_t offset = addr - chunk->address();
DCHECK_LT(offset, static_cast<uintptr_t>(Page::kPageSize));
if (chunk->old_to_new_slots() == nullptr) return;
chunk->old_to_new_slots()->Remove(static_cast<uint32_t>(offset));
}
#ifdef VERIFY_HEAP
void StoreBuffer::VerifyPointers(LargeObjectSpace* space) {
......
......@@ -36,6 +36,10 @@ class StoreBuffer {
// This is used to add addresses to the store buffer non-concurrently.
inline void Mark(Address addr);
// Removes the given slot from the store buffer non-concurrently. If the
// slot was never added to the store buffer, then the function does nothing.
void Remove(Address addr);
// Slots that do not point to the ToSpace after callback invocation will be
// removed from the set.
void IteratePointersToNewSpace(ObjectSlotCallback callback);
......
......@@ -2998,6 +2998,8 @@ void JSObject::MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
// From here on we cannot fail and we shouldn't GC anymore.
DisallowHeapAllocation no_allocation;
Heap* heap = isolate->heap();
// Copy (real) inobject properties. If necessary, stop at number_of_fields to
// avoid overwriting |one_pointer_filler_map|.
int limit = Min(inobject, number_of_fields);
......@@ -3010,12 +3012,16 @@ void JSObject::MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
DCHECK(value->IsMutableHeapNumber());
object->RawFastDoublePropertyAtPut(index,
HeapNumber::cast(value)->value());
if (i < old_number_of_fields && !old_map->IsUnboxedDoubleField(index)) {
// Transition from tagged to untagged slot.
heap->ClearRecordedSlot(*object,
HeapObject::RawField(*object, index.offset()));
}
} else {
object->RawFastPropertyAtPut(index, value);
}
}
Heap* heap = isolate->heap();
// If there are properties in the new backing store, trim it to the correct
// size and install the backing store into the object.
......
......@@ -1549,9 +1549,7 @@ static void TestWriteBarrierObjectShiftFieldsRight(
}
}
// TODO(ishell): enable when this issue is fixed.
DISABLED_TEST(WriteBarrierObjectShiftFieldsRight) {
TEST(WriteBarrierObjectShiftFieldsRight) {
TestWriteBarrierObjectShiftFieldsRight(OLD_TO_NEW_WRITE_BARRIER);
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment