Commit 0a9d5150 authored by Ulan Degenbaev's avatar Ulan Degenbaev Committed by Commit Bot

Reland^2 "[heap] Add mechanism for tracking invalidated slots per memory chunk."

This reverts commit 6fde541d.

Bug: chromium:694255
Change-Id: I4670d0de3d2749afbb3bdb8dc5418822a885330c
Reviewed-on: https://chromium-review.googlesource.com/597850
Commit-Queue: Ulan Degenbaev <ulan@chromium.org>
Reviewed-by: 's avatarMichael Lippautz <mlippautz@chromium.org>
Cr-Commit-Position: refs/heads/master@{#47083}
parent c924a942
......@@ -1581,6 +1581,9 @@ v8_source_set("v8_base") {
"src/heap/incremental-marking-job.h",
"src/heap/incremental-marking.cc",
"src/heap/incremental-marking.h",
"src/heap/invalidated-slots-inl.h",
"src/heap/invalidated-slots.cc",
"src/heap/invalidated-slots.h",
"src/heap/item-parallel-job.h",
"src/heap/local-allocator.h",
"src/heap/mark-compact-inl.h",
......
......@@ -4607,10 +4607,17 @@ void Heap::RegisterDeserializedObjectsForBlackAllocation(
}
}
void Heap::NotifyObjectLayoutChange(HeapObject* object,
void Heap::NotifyObjectLayoutChange(HeapObject* object, int size,
const DisallowHeapAllocation&) {
DCHECK(InOldSpace(object) || InNewSpace(object));
if (FLAG_incremental_marking && incremental_marking()->IsMarking()) {
incremental_marking()->MarkBlackAndPush(object);
if (InOldSpace(object) && incremental_marking()->IsCompacting()) {
// The concurrent marker might have recorded slots for the object.
// Register this object as invalidated to filter out the slots.
MemoryChunk* chunk = MemoryChunk::FromAddress(object->address());
chunk->RegisterObjectWithInvalidatedSlots(object, size);
}
}
#ifdef VERIFY_HEAP
DCHECK(pending_layout_change_object_ == nullptr);
......
......@@ -1186,7 +1186,8 @@ class Heap {
// The runtime uses this function to notify potentially unsafe object layout
// changes that require special synchronization with the concurrent marker.
void NotifyObjectLayoutChange(HeapObject* object,
// The old size is the size of the object before layout change.
void NotifyObjectLayoutChange(HeapObject* object, int old_size,
const DisallowHeapAllocation&);
#ifdef VERIFY_HEAP
......
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_INVALIDATED_SLOTS_INL_H
#define V8_INVALIDATED_SLOTS_INL_H
#include <map>
#include "src/allocation.h"
#include "src/heap/invalidated-slots.h"
#include "src/heap/spaces.h"
#include "src/objects-body-descriptors-inl.h"
#include "src/objects-body-descriptors.h"
#include "src/objects.h"
namespace v8 {
namespace internal {
bool InvalidatedSlotsFilter::IsValid(Address slot) {
#ifdef DEBUG
DCHECK_LT(slot, sentinel_);
// Slots must come in non-decreasing order.
DCHECK_LE(last_slot_, slot);
last_slot_ = slot;
#endif
while (slot >= invalidated_end_) {
++iterator_;
if (iterator_ != iterator_end_) {
// Invalidated ranges must not overlap.
DCHECK_LE(invalidated_end_, iterator_->first->address());
invalidated_start_ = iterator_->first->address();
invalidated_end_ = invalidated_start_ + iterator_->second;
} else {
invalidated_start_ = sentinel_;
invalidated_end_ = sentinel_;
}
}
// Now the invalidated region ends after the slot.
if (slot < invalidated_start_) {
// The invalidated region starts after the slot.
return true;
}
// The invalidated region includes the slot.
// Ask the object if the slot is valid.
if (invalidated_object_ == nullptr) {
invalidated_object_ = HeapObject::FromAddress(invalidated_start_);
invalidated_object_size_ =
invalidated_object_->SizeFromMap(invalidated_object_->map());
}
int offset = static_cast<int>(slot - invalidated_start_);
DCHECK_GT(offset, 0);
DCHECK_LE(invalidated_object_size_,
static_cast<int>(invalidated_end_ - invalidated_start_));
if (offset >= invalidated_object_size_) {
// A new object could have been allocated during evacuation in the free
// space outside the object. Since objects are not invalidated in GC pause
// we can return true here.
return true;
}
return invalidated_object_->IsValidSlot(offset);
}
} // namespace internal
} // namespace v8
#endif // V8_INVALIDATED_SLOTS_INL_H
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/heap/invalidated-slots.h"
#include "src/heap/spaces.h"
namespace v8 {
namespace internal {
InvalidatedSlotsFilter::InvalidatedSlotsFilter(MemoryChunk* chunk) {
DCHECK_IMPLIES(chunk->invalidated_slots() != nullptr,
chunk->owner()->identity() == OLD_SPACE);
InvalidatedSlots* invalidated_slots =
chunk->invalidated_slots() ? chunk->invalidated_slots() : &empty_;
iterator_ = invalidated_slots->begin();
iterator_end_ = invalidated_slots->end();
sentinel_ = chunk->area_end();
if (iterator_ != iterator_end_) {
invalidated_start_ = iterator_->first->address();
invalidated_end_ = invalidated_start_ + iterator_->second;
} else {
invalidated_start_ = sentinel_;
invalidated_end_ = sentinel_;
}
// These values will be lazily set when needed.
invalidated_object_ = nullptr;
invalidated_object_size_ = 0;
#ifdef DEBUG
last_slot_ = chunk->area_start();
#endif
}
} // namespace internal
} // namespace v8
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_INVALIDATED_SLOTS_H
#define V8_INVALIDATED_SLOTS_H
#include <map>
#include <stack>
#include "src/allocation.h"
#include "src/base/atomic-utils.h"
#include "src/base/bits.h"
#include "src/utils.h"
namespace v8 {
namespace internal {
class HeapObject;
// This data structure stores objects that went through object layout change
// that potentially invalidates slots recorded concurrently. The second part
// of each element is the size of the corresponding object before the layout
// change.
using InvalidatedSlots = std::map<HeapObject*, int>;
// This class provides IsValid predicate that takes into account the set
// of invalidated objects in the given memory chunk.
// The sequence of queried slot must be non-decreasing. This allows fast
// implementation with complexity O(m*log(m) + n), where
// m is the number of invalidated objects in the memory chunk.
// n is the number of IsValid queries.
class InvalidatedSlotsFilter {
public:
explicit InvalidatedSlotsFilter(MemoryChunk* chunk);
inline bool IsValid(Address slot);
private:
InvalidatedSlots::const_iterator iterator_;
InvalidatedSlots::const_iterator iterator_end_;
Address sentinel_;
Address invalidated_start_;
Address invalidated_end_;
HeapObject* invalidated_object_;
int invalidated_object_size_;
InvalidatedSlots empty_;
#ifdef DEBUG
Address last_slot_;
#endif
};
} // namespace internal
} // namespace v8
#endif // V8_INVALIDATED_SLOTS_H
......@@ -21,6 +21,8 @@
#include "src/heap/concurrent-marking.h"
#include "src/heap/gc-tracer.h"
#include "src/heap/incremental-marking.h"
#include "src/heap/invalidated-slots-inl.h"
#include "src/heap/invalidated-slots.h"
#include "src/heap/item-parallel-job.h"
#include "src/heap/local-allocator.h"
#include "src/heap/mark-compact-inl.h"
......@@ -3263,6 +3265,14 @@ void MarkCompactCollector::EvacuateEpilogue() {
heap()->new_space()->set_age_mark(heap()->new_space()->top());
// Old space. Deallocate evacuated candidate pages.
ReleaseEvacuationCandidates();
#ifdef DEBUG
// Old-to-old slot sets must be empty after evacuation.
for (Page* p : *heap()->old_space()) {
DCHECK_NULL((p->slot_set<OLD_TO_OLD, AccessMode::ATOMIC>()));
DCHECK_NULL((p->typed_slot_set<OLD_TO_OLD, AccessMode::ATOMIC>()));
DCHECK_NULL(p->invalidated_slots());
}
#endif
}
class Evacuator : public Malloced {
......@@ -4130,13 +4140,21 @@ class RememberedSetUpdatingItem : public UpdatingItem {
}
if ((updating_mode_ == RememberedSetUpdatingMode::ALL) &&
(chunk_->slot_set<OLD_TO_OLD, AccessMode::NON_ATOMIC>() != nullptr)) {
RememberedSet<OLD_TO_OLD>::Iterate(
chunk_,
[](Address slot) {
return UpdateSlot<AccessMode::NON_ATOMIC>(
reinterpret_cast<Object**>(slot));
},
SlotSet::PREFREE_EMPTY_BUCKETS);
InvalidatedSlotsFilter filter(chunk_);
RememberedSet<OLD_TO_OLD>::Iterate(
chunk_,
[&filter](Address slot) {
if (!filter.IsValid(slot)) return REMOVE_SLOT;
return UpdateSlot<AccessMode::NON_ATOMIC>(
reinterpret_cast<Object**>(slot));
},
SlotSet::PREFREE_EMPTY_BUCKETS);
}
if ((updating_mode_ == RememberedSetUpdatingMode::ALL) &&
chunk_->invalidated_slots() != nullptr) {
// The invalidated slots are not needed after old-to-old slots were
// processsed.
chunk_->ReleaseInvalidatedSlots();
}
}
......
......@@ -120,7 +120,8 @@ class RememberedSet : public AllStatic {
while ((chunk = it.next()) != nullptr) {
SlotSet* slots = chunk->slot_set<type>();
TypedSlotSet* typed_slots = chunk->typed_slot_set<type>();
if (slots != nullptr || typed_slots != nullptr) {
if (slots != nullptr || typed_slots != nullptr ||
chunk->invalidated_slots() != nullptr) {
callback(chunk);
}
}
......@@ -230,6 +231,7 @@ class RememberedSet : public AllStatic {
while ((chunk = it.next()) != nullptr) {
chunk->ReleaseSlotSet<OLD_TO_OLD>();
chunk->ReleaseTypedSlotSet<OLD_TO_OLD>();
chunk->ReleaseInvalidatedSlots();
}
}
......
......@@ -549,6 +549,7 @@ MemoryChunk* MemoryChunk::Initialize(Heap* heap, Address base, size_t size,
nullptr);
base::AsAtomicWord::Release_Store(&chunk->typed_slot_set_[OLD_TO_OLD],
nullptr);
chunk->invalidated_slots_ = nullptr;
chunk->skip_list_ = nullptr;
chunk->progress_bar_ = 0;
chunk->high_water_mark_.SetValue(static_cast<intptr_t>(area_start - base));
......@@ -1216,6 +1217,7 @@ void MemoryChunk::ReleaseAllocatedMemory() {
ReleaseSlotSet<OLD_TO_OLD>();
ReleaseTypedSlotSet<OLD_TO_NEW>();
ReleaseTypedSlotSet<OLD_TO_OLD>();
ReleaseInvalidatedSlots();
if (local_tracker_ != nullptr) ReleaseLocalTracker();
if (young_generation_bitmap_ != nullptr) ReleaseYoungGenerationBitmap();
}
......@@ -1286,6 +1288,28 @@ void MemoryChunk::ReleaseTypedSlotSet() {
}
}
InvalidatedSlots* MemoryChunk::AllocateInvalidatedSlots() {
DCHECK_NULL(invalidated_slots_);
invalidated_slots_ = new InvalidatedSlots();
return invalidated_slots_;
}
void MemoryChunk::ReleaseInvalidatedSlots() {
if (invalidated_slots_) {
delete invalidated_slots_;
invalidated_slots_ = nullptr;
}
}
void MemoryChunk::RegisterObjectWithInvalidatedSlots(HeapObject* object,
int size) {
if (invalidated_slots() == nullptr) {
AllocateInvalidatedSlots();
}
int old_size = (*invalidated_slots())[object];
(*invalidated_slots())[object] = std::max(old_size, size);
}
void MemoryChunk::AllocateLocalTracker() {
DCHECK_NULL(local_tracker_);
local_tracker_ = new LocalArrayBufferTracker(heap());
......
......@@ -6,6 +6,7 @@
#define V8_HEAP_SPACES_H_
#include <list>
#include <map>
#include <memory>
#include <unordered_set>
......@@ -19,6 +20,7 @@
#include "src/flags.h"
#include "src/globals.h"
#include "src/heap/heap.h"
#include "src/heap/invalidated-slots.h"
#include "src/heap/marking.h"
#include "src/list.h"
#include "src/objects.h"
......@@ -354,7 +356,8 @@ class MemoryChunk {
+ kIntptrSize // intptr_t live_byte_count_
+ kPointerSize * NUMBER_OF_REMEMBERED_SET_TYPES // SlotSet* array
+ kPointerSize * NUMBER_OF_REMEMBERED_SET_TYPES // TypedSlotSet* array
+ kPointerSize // SkipList* skip_list_
+ kPointerSize // InvalidatedSlots* invalidated_slots_
+ kPointerSize // SkipList* skip_list_
+ kPointerSize // AtomicValue high_water_mark_
+ kPointerSize // base::RecursiveMutex* mutex_
+ kPointerSize // base::AtomicWord concurrent_sweeping_
......@@ -472,6 +475,11 @@ class MemoryChunk {
template <RememberedSetType type>
void ReleaseTypedSlotSet();
InvalidatedSlots* AllocateInvalidatedSlots();
void ReleaseInvalidatedSlots();
void RegisterObjectWithInvalidatedSlots(HeapObject* object, int size);
InvalidatedSlots* invalidated_slots() { return invalidated_slots_; }
void AllocateLocalTracker();
void ReleaseLocalTracker();
inline LocalArrayBufferTracker* local_tracker() { return local_tracker_; }
......@@ -631,6 +639,7 @@ class MemoryChunk {
// is ceil(size() / kPageSize).
SlotSet* slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES];
TypedSlotSet* typed_slot_set_[NUMBER_OF_REMEMBERED_SET_TYPES];
InvalidatedSlots* invalidated_slots_;
SkipList* skip_list_;
......
......@@ -2598,7 +2598,7 @@ bool String::MakeExternal(v8::String::ExternalStringResource* resource) {
bool is_internalized = this->IsInternalizedString();
bool has_pointers = StringShape(this).IsIndirect();
if (has_pointers) {
heap->NotifyObjectLayoutChange(this, no_allocation);
heap->NotifyObjectLayoutChange(this, size, no_allocation);
}
// Morph the string to an external string by replacing the map and
// reinitializing the fields. This won't work if the space the existing
......@@ -2674,7 +2674,7 @@ bool String::MakeExternal(v8::String::ExternalOneByteStringResource* resource) {
bool has_pointers = StringShape(this).IsIndirect();
if (has_pointers) {
heap->NotifyObjectLayoutChange(this, no_allocation);
heap->NotifyObjectLayoutChange(this, size, no_allocation);
}
// Morph the string to an external string by replacing the map and
......@@ -3980,7 +3980,9 @@ void MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
Heap* heap = isolate->heap();
heap->NotifyObjectLayoutChange(*object, no_allocation);
int old_instance_size = old_map->instance_size();
heap->NotifyObjectLayoutChange(*object, old_instance_size, no_allocation);
// Copy (real) inobject properties. If necessary, stop at number_of_fields to
// avoid overwriting |one_pointer_filler_map|.
......@@ -4014,7 +4016,7 @@ void MigrateFastToFast(Handle<JSObject> object, Handle<Map> new_map) {
// Create filler object past the new instance size.
int new_instance_size = new_map->instance_size();
int instance_size_delta = old_map->instance_size() - new_instance_size;
int instance_size_delta = old_instance_size - new_instance_size;
DCHECK(instance_size_delta >= 0);
if (instance_size_delta > 0) {
......@@ -4096,11 +4098,12 @@ void MigrateFastToSlow(Handle<JSObject> object, Handle<Map> new_map,
DisallowHeapAllocation no_allocation;
Heap* heap = isolate->heap();
heap->NotifyObjectLayoutChange(*object, no_allocation);
int old_instance_size = map->instance_size();
heap->NotifyObjectLayoutChange(*object, old_instance_size, no_allocation);
// Resize the object in the heap if necessary.
int new_instance_size = new_map->instance_size();
int instance_size_delta = map->instance_size() - new_instance_size;
int instance_size_delta = old_instance_size - new_instance_size;
DCHECK(instance_size_delta >= 0);
if (instance_size_delta > 0) {
......@@ -17083,11 +17086,11 @@ void MakeStringThin(String* string, String* internalized, Isolate* isolate) {
if (!string->IsInternalizedString()) {
DisallowHeapAllocation no_gc;
isolate->heap()->NotifyObjectLayoutChange(string, no_gc);
int old_size = string->Size();
isolate->heap()->NotifyObjectLayoutChange(string, old_size, no_gc);
bool one_byte = internalized->IsOneByteRepresentation();
Handle<Map> map = one_byte ? isolate->factory()->thin_one_byte_string_map()
: isolate->factory()->thin_string_map();
int old_size = string->Size();
DCHECK(old_size >= ThinString::kSize);
string->synchronized_set_map(*map);
ThinString* thin = ThinString::cast(string);
......
......@@ -160,7 +160,8 @@ bool DeleteObjectPropertyFast(Isolate* isolate, Handle<JSReceiver> receiver,
// Zap the property to avoid keeping objects alive. Zapping is not necessary
// for properties stored in the descriptor array.
if (details.location() == kField) {
isolate->heap()->NotifyObjectLayoutChange(*receiver, no_allocation);
isolate->heap()->NotifyObjectLayoutChange(*receiver, map->instance_size(),
no_allocation);
Object* filler = isolate->heap()->one_pointer_filler_map();
FieldIndex index = FieldIndex::ForPropertyIndex(map, details.field_index());
JSObject::cast(*receiver)->RawFastPropertyAtPut(index, filler);
......
......@@ -1027,6 +1027,9 @@
'heap/incremental-marking-job.h',
'heap/incremental-marking.cc',
'heap/incremental-marking.h',
'heap/invalidated-slots-inl.h',
'heap/invalidated-slots.cc',
'heap/invalidated-slots.h',
'heap/item-parallel-job.h',
'heap/local-allocator.h',
'heap/mark-compact-inl.h',
......
......@@ -78,6 +78,7 @@ v8_executable("cctest") {
"heap/test-concurrent-marking.cc",
"heap/test-heap.cc",
"heap/test-incremental-marking.cc",
"heap/test-invalidated-slots.cc",
"heap/test-lab.cc",
"heap/test-mark-compact.cc",
"heap/test-page-promotion.cc",
......
......@@ -96,6 +96,7 @@
'heap/test-concurrent-marking.cc',
'heap/test-heap.cc',
'heap/test-incremental-marking.cc',
'heap/test-invalidated-slots.cc',
'heap/test-lab.cc',
'heap/test-mark-compact.cc',
'heap/test-page-promotion.cc',
......
......@@ -16,6 +16,10 @@
V(CompactionPartiallyAbortedPageWithStoreBufferEntries) \
V(CompactionSpaceDivideMultiplePages) \
V(CompactionSpaceDivideSinglePage) \
V(InvalidatedSlotsNoInvalidatedRanges) \
V(InvalidatedSlotsSomeInvalidatedRanges) \
V(InvalidatedSlotsAllInvalidatedRanges) \
V(InvalidatedSlotsAfterTrimming) \
V(TestNewSpaceRefsInCopiedCode) \
V(GCFlags) \
V(MarkCompactCollector) \
......@@ -66,11 +70,15 @@ class HeapTester {
HEAP_TEST_METHODS(DECLARE_STATIC)
#undef HEAP_TEST_METHODS
/* test-alloc.cc */
// test-alloc.cc
static AllocationResult AllocateAfterFailures();
static Handle<Object> TestAllocateAfterFailures();
/* test-api.cc */
// test-invalidated-slots.cc
static Page* AllocateByteArraysOnPage(Heap* heap,
std::vector<ByteArray*>* byte_arrays);
// test-api.cc
static void ResetWeakHandle(bool global_gc);
};
......
// Copyright 2017 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include <stdlib.h>
#include "src/v8.h"
#include "src/heap/heap-inl.h"
#include "src/heap/heap.h"
#include "src/heap/invalidated-slots-inl.h"
#include "src/heap/invalidated-slots.h"
#include "test/cctest/cctest.h"
#include "test/cctest/heap/heap-tester.h"
#include "test/cctest/heap/heap-utils.h"
namespace v8 {
namespace internal {
Page* v8::internal::HeapTester::AllocateByteArraysOnPage(
Heap* heap, std::vector<ByteArray*>* byte_arrays) {
const int kLength = 256 - ByteArray::kHeaderSize;
const int kSize = ByteArray::SizeFor(kLength);
CHECK_EQ(kSize, 256);
Isolate* isolate = heap->isolate();
PagedSpace* old_space = heap->old_space();
Page* page;
// Fill a page with byte arrays.
{
AlwaysAllocateScope always_allocate(isolate);
heap::SimulateFullSpace(old_space);
ByteArray* byte_array;
CHECK(heap->AllocateByteArray(kLength, TENURED).To(&byte_array));
byte_arrays->push_back(byte_array);
page = Page::FromAddress(byte_array->address());
CHECK_EQ(page->area_size() % kSize, 0u);
size_t n = page->area_size() / kSize;
for (size_t i = 1; i < n; i++) {
CHECK(heap->AllocateByteArray(kLength, TENURED).To(&byte_array));
byte_arrays->push_back(byte_array);
CHECK_EQ(page, Page::FromAddress(byte_array->address()));
}
}
CHECK_NULL(page->invalidated_slots());
return page;
}
HEAP_TEST(InvalidatedSlotsNoInvalidatedRanges) {
CcTest::InitializeVM();
Heap* heap = CcTest::heap();
std::vector<ByteArray*> byte_arrays;
Page* page = AllocateByteArraysOnPage(heap, &byte_arrays);
InvalidatedSlotsFilter filter(page);
for (auto byte_array : byte_arrays) {
Address start = byte_array->address() + ByteArray::kHeaderSize;
Address end = byte_array->address() + byte_array->Size();
for (Address addr = start; addr < end; addr += kPointerSize) {
CHECK(filter.IsValid(addr));
}
}
}
HEAP_TEST(InvalidatedSlotsSomeInvalidatedRanges) {
CcTest::InitializeVM();
Heap* heap = CcTest::heap();
std::vector<ByteArray*> byte_arrays;
Page* page = AllocateByteArraysOnPage(heap, &byte_arrays);
// Register every second byte arrays as invalidated.
for (size_t i = 0; i < byte_arrays.size(); i += 2) {
page->RegisterObjectWithInvalidatedSlots(byte_arrays[i],
byte_arrays[i]->Size());
}
InvalidatedSlotsFilter filter(page);
for (size_t i = 0; i < byte_arrays.size(); i++) {
ByteArray* byte_array = byte_arrays[i];
Address start = byte_array->address() + ByteArray::kHeaderSize;
Address end = byte_array->address() + byte_array->Size();
for (Address addr = start; addr < end; addr += kPointerSize) {
if (i % 2 == 0) {
CHECK(!filter.IsValid(addr));
} else {
CHECK(filter.IsValid(addr));
}
}
}
}
HEAP_TEST(InvalidatedSlotsAllInvalidatedRanges) {
CcTest::InitializeVM();
Heap* heap = CcTest::heap();
std::vector<ByteArray*> byte_arrays;
Page* page = AllocateByteArraysOnPage(heap, &byte_arrays);
// Register the all byte arrays as invalidated.
for (size_t i = 0; i < byte_arrays.size(); i++) {
page->RegisterObjectWithInvalidatedSlots(byte_arrays[i],
byte_arrays[i]->Size());
}
InvalidatedSlotsFilter filter(page);
for (size_t i = 0; i < byte_arrays.size(); i++) {
ByteArray* byte_array = byte_arrays[i];
Address start = byte_array->address() + ByteArray::kHeaderSize;
Address end = byte_array->address() + byte_array->Size();
for (Address addr = start; addr < end; addr += kPointerSize) {
CHECK(!filter.IsValid(addr));
}
}
}
HEAP_TEST(InvalidatedSlotsAfterTrimming) {
CcTest::InitializeVM();
Heap* heap = CcTest::heap();
std::vector<ByteArray*> byte_arrays;
Page* page = AllocateByteArraysOnPage(heap, &byte_arrays);
// Register the all byte arrays as invalidated.
for (size_t i = 0; i < byte_arrays.size(); i++) {
page->RegisterObjectWithInvalidatedSlots(byte_arrays[i],
byte_arrays[i]->Size());
}
// Trim byte arrays and check that the slots outside the byte arrays are
// considered valid. Free space outside invalidated object can be reused
// during evacuation for allocation of the evacuated objects. That can
// add new valid slots to evacuation candidates.
InvalidatedSlotsFilter filter(page);
for (size_t i = 0; i < byte_arrays.size(); i++) {
ByteArray* byte_array = byte_arrays[i];
Address start = byte_array->address() + ByteArray::kHeaderSize;
Address end = byte_array->address() + byte_array->Size();
heap->RightTrimFixedArray(byte_array, byte_array->length());
for (Address addr = start; addr < end; addr += kPointerSize) {
CHECK(filter.IsValid(addr));
}
}
}
} // namespace internal
} // namespace v8
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment