Commit 6529971a authored by bmeurer's avatar bmeurer Committed by Commit bot

Revert of [heap] Remove retry space from AllocateRaw. (patchset #3 id:40001 of...

Revert of [heap] Remove retry space from AllocateRaw. (patchset #3 id:40001 of https://codereview.chromium.org/1370123002/ )

Reason for revert:
Breaks arm64 nosnap simulator (bogus cctest?): https://chromegw.corp.google.com/i/client.v8/builders/V8%20Linux%20-%20arm64%20-%20sim%20-%20nosnap%20-%20debug%20-%201/builds/2934/steps/Bisect%202786ceec.Retry/logs/IncrementalWriteBarri..

Original issue's description:
> [heap] Remove retry space from AllocateRaw.
>
> BUG=
>
> Committed: https://crrev.com/2786ceec1eb491494d24d29a59eefbe3b6704be2
> Cr-Commit-Position: refs/heads/master@{#30984}

TBR=mstarzinger@chromium.org,hpayer@chromium.org
NOPRESUBMIT=true
NOTREECHECKS=true
NOTRY=true
BUG=

Review URL: https://codereview.chromium.org/1379493003

Cr-Commit-Position: refs/heads/master@{#30991}
parent d5a9dd65
......@@ -126,7 +126,7 @@ AllocationResult Heap::AllocateOneByteInternalizedString(
// Allocate string.
HeapObject* result = nullptr;
{
AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
AllocationResult allocation = AllocateRaw(size, OLD_SPACE, OLD_SPACE);
if (!allocation.To(&result)) return allocation;
}
......@@ -157,7 +157,7 @@ AllocationResult Heap::AllocateTwoByteInternalizedString(Vector<const uc16> str,
// Allocate string.
HeapObject* result = nullptr;
{
AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
AllocationResult allocation = AllocateRaw(size, OLD_SPACE, OLD_SPACE);
if (!allocation.To(&result)) return allocation;
}
......@@ -189,6 +189,7 @@ AllocationResult Heap::CopyFixedDoubleArray(FixedDoubleArray* src) {
AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationSpace space,
AllocationSpace retry_space,
AllocationAlignment alignment) {
DCHECK(AllowHandleAllocation::IsAllowed());
DCHECK(AllowHeapAllocation::IsAllowed());
......@@ -206,14 +207,19 @@ AllocationResult Heap::AllocateRaw(int size_in_bytes, AllocationSpace space,
HeapObject* object = nullptr;
AllocationResult allocation;
if (NEW_SPACE == space) {
if (large_object) {
space = LO_SPACE;
} else {
if (!large_object) {
allocation = new_space_.AllocateRaw(size_in_bytes, alignment);
if (allocation.To(&object)) {
OnAllocationEvent(object, size_in_bytes);
if (always_allocate() && allocation.IsRetry() &&
retry_space != NEW_SPACE) {
space = retry_space;
} else {
if (allocation.To(&object)) {
OnAllocationEvent(object, size_in_bytes);
}
return allocation;
}
return allocation;
} else {
space = LO_SPACE;
}
}
......
......@@ -1988,7 +1988,7 @@ void Heap::ConfigureInitialOldGenerationSize() {
AllocationResult Heap::AllocatePartialMap(InstanceType instance_type,
int instance_size) {
Object* result = nullptr;
AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE);
AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE, MAP_SPACE);
if (!allocation.To(&result)) return allocation;
// Map::cast cannot be used due to uninitialized map field.
......@@ -2022,7 +2022,7 @@ AllocationResult Heap::AllocateMap(InstanceType instance_type,
int instance_size,
ElementsKind elements_kind) {
HeapObject* result = nullptr;
AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE);
AllocationResult allocation = AllocateRaw(Map::kSize, MAP_SPACE, MAP_SPACE);
if (!allocation.To(&result)) return allocation;
result->set_map_no_write_barrier(meta_map());
......@@ -2063,7 +2063,7 @@ AllocationResult Heap::AllocateFillerObject(int size, bool double_align,
HeapObject* obj = nullptr;
{
AllocationAlignment align = double_align ? kDoubleAligned : kWordAligned;
AllocationResult allocation = AllocateRaw(size, space, align);
AllocationResult allocation = AllocateRaw(size, space, space, align);
if (!allocation.To(&obj)) return allocation;
}
#ifdef DEBUG
......@@ -2376,7 +2376,8 @@ AllocationResult Heap::AllocateHeapNumber(double value, MutableMode mode,
HeapObject* result = nullptr;
{
AllocationResult allocation = AllocateRaw(size, space, kDoubleUnaligned);
AllocationResult allocation =
AllocateRaw(size, space, OLD_SPACE, kDoubleUnaligned);
if (!allocation.To(&result)) return allocation;
}
......@@ -2397,7 +2398,7 @@ AllocationResult Heap::AllocateHeapNumber(double value, MutableMode mode,
HeapObject* result = nullptr; \
{ \
AllocationResult allocation = \
AllocateRaw(size, space, kSimd128Unaligned); \
AllocateRaw(size, space, OLD_SPACE, kSimd128Unaligned); \
if (!allocation.To(&result)) return allocation; \
} \
\
......@@ -2418,7 +2419,7 @@ AllocationResult Heap::AllocateCell(Object* value) {
HeapObject* result = nullptr;
{
AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
AllocationResult allocation = AllocateRaw(size, OLD_SPACE, OLD_SPACE);
if (!allocation.To(&result)) return allocation;
}
result->set_map_no_write_barrier(cell_map());
......@@ -2432,7 +2433,7 @@ AllocationResult Heap::AllocatePropertyCell() {
STATIC_ASSERT(PropertyCell::kSize <= Page::kMaxRegularHeapObjectSize);
HeapObject* result = nullptr;
AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
AllocationResult allocation = AllocateRaw(size, OLD_SPACE, OLD_SPACE);
if (!allocation.To(&result)) return allocation;
result->set_map_no_write_barrier(global_property_cell_map());
......@@ -2450,7 +2451,7 @@ AllocationResult Heap::AllocateWeakCell(HeapObject* value) {
STATIC_ASSERT(WeakCell::kSize <= Page::kMaxRegularHeapObjectSize);
HeapObject* result = nullptr;
{
AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
AllocationResult allocation = AllocateRaw(size, OLD_SPACE, OLD_SPACE);
if (!allocation.To(&result)) return allocation;
}
result->set_map_no_write_barrier(weak_cell_map());
......@@ -2935,7 +2936,7 @@ AllocationResult Heap::AllocateByteArray(int length, PretenureFlag pretenure) {
AllocationSpace space = SelectSpace(pretenure);
HeapObject* result = nullptr;
{
AllocationResult allocation = AllocateRaw(size, space);
AllocationResult allocation = AllocateRaw(size, space, OLD_SPACE);
if (!allocation.To(&result)) return allocation;
}
......@@ -2959,7 +2960,7 @@ AllocationResult Heap::AllocateBytecodeArray(int length,
int size = BytecodeArray::SizeFor(length);
HeapObject* result = nullptr;
{
AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
AllocationResult allocation = AllocateRaw(size, OLD_SPACE, OLD_SPACE);
if (!allocation.To(&result)) return allocation;
}
......@@ -3146,7 +3147,7 @@ AllocationResult Heap::AllocateFixedTypedArrayWithExternalPointer(
AllocationSpace space = SelectSpace(pretenure);
HeapObject* result = nullptr;
{
AllocationResult allocation = AllocateRaw(size, space);
AllocationResult allocation = AllocateRaw(size, space, OLD_SPACE);
if (!allocation.To(&result)) return allocation;
}
......@@ -3191,7 +3192,7 @@ AllocationResult Heap::AllocateFixedTypedArray(int length,
HeapObject* object = nullptr;
AllocationResult allocation = AllocateRaw(
size, space,
size, space, OLD_SPACE,
array_type == kExternalFloat64Array ? kDoubleAligned : kWordAligned);
if (!allocation.To(&object)) return allocation;
......@@ -3209,7 +3210,8 @@ AllocationResult Heap::AllocateFixedTypedArray(int length,
AllocationResult Heap::AllocateCode(int object_size, bool immovable) {
DCHECK(IsAligned(static_cast<intptr_t>(object_size), kCodeAlignment));
AllocationResult allocation = AllocateRaw(object_size, CODE_SPACE);
AllocationResult allocation =
AllocateRaw(object_size, CODE_SPACE, CODE_SPACE);
HeapObject* result = nullptr;
if (!allocation.To(&result)) return allocation;
......@@ -3248,7 +3250,7 @@ AllocationResult Heap::CopyCode(Code* code) {
HeapObject* result = nullptr;
// Allocate an object the same size as the code object.
int obj_size = code->Size();
allocation = AllocateRaw(obj_size, CODE_SPACE);
allocation = AllocateRaw(obj_size, CODE_SPACE, CODE_SPACE);
if (!allocation.To(&result)) return allocation;
// Copy code object.
......@@ -3287,7 +3289,8 @@ AllocationResult Heap::CopyCode(Code* code, Vector<byte> reloc_info) {
static_cast<size_t>(code->instruction_end() - old_addr);
HeapObject* result = nullptr;
AllocationResult allocation = AllocateRaw(new_obj_size, CODE_SPACE);
AllocationResult allocation =
AllocateRaw(new_obj_size, CODE_SPACE, CODE_SPACE);
if (!allocation.To(&result)) return allocation;
// Copy code object.
......@@ -3333,12 +3336,15 @@ AllocationResult Heap::Allocate(Map* map, AllocationSpace space,
AllocationSite* allocation_site) {
DCHECK(gc_state_ == NOT_IN_GC);
DCHECK(map->instance_type() != MAP_TYPE);
// If allocation failures are disallowed, we may allocate in a different
// space when new space is full and the object is not a large object.
AllocationSpace retry_space = (space != NEW_SPACE) ? space : OLD_SPACE;
int size = map->instance_size();
if (allocation_site != NULL) {
size += AllocationMemento::kSize;
}
HeapObject* result = nullptr;
AllocationResult allocation = AllocateRaw(size, space);
AllocationResult allocation = AllocateRaw(size, space, retry_space);
if (!allocation.To(&result)) return allocation;
// No need for write barrier since object is white and map is in old space.
result->set_map_no_write_barrier(map);
......@@ -3440,20 +3446,65 @@ AllocationResult Heap::CopyJSObject(JSObject* source, AllocationSite* site) {
DCHECK(site == NULL || AllocationSite::CanTrack(map->instance_type()));
int adjusted_object_size =
site != NULL ? object_size + AllocationMemento::kSize : object_size;
AllocationResult allocation = AllocateRaw(adjusted_object_size, NEW_SPACE);
if (!allocation.To(&clone)) return allocation;
WriteBarrierMode wb_mode = UPDATE_WRITE_BARRIER;
// If we're forced to always allocate, we use the general allocation
// functions which may leave us with an object in old space.
if (always_allocate()) {
{
AllocationResult allocation =
AllocateRaw(object_size, NEW_SPACE, OLD_SPACE);
if (!allocation.To(&clone)) return allocation;
}
Address clone_address = clone->address();
CopyBlock(clone_address, source->address(), object_size);
SLOW_DCHECK(InNewSpace(clone));
// Since we know the clone is allocated in new space, we can copy
// the contents without worrying about updating the write barrier.
CopyBlock(clone->address(), source->address(), object_size);
// Update write barrier for all tagged fields that lie beyond the header.
const int start_offset = JSObject::kHeaderSize;
const int end_offset = object_size;
if (site != NULL) {
AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
reinterpret_cast<Address>(clone) + object_size);
InitializeAllocationMemento(alloc_memento, site);
#if V8_DOUBLE_FIELDS_UNBOXING
LayoutDescriptorHelper helper(map);
bool has_only_tagged_fields = helper.all_fields_tagged();
if (!has_only_tagged_fields) {
for (int offset = start_offset; offset < end_offset;) {
int end_of_region_offset;
if (helper.IsTagged(offset, end_offset, &end_of_region_offset)) {
RecordWrites(clone_address, offset,
(end_of_region_offset - offset) / kPointerSize);
}
offset = end_of_region_offset;
}
} else {
#endif
// Object has only tagged fields.
RecordWrites(clone_address, start_offset,
(end_offset - start_offset) / kPointerSize);
#if V8_DOUBLE_FIELDS_UNBOXING
}
#endif
} else {
wb_mode = SKIP_WRITE_BARRIER;
{
int adjusted_object_size =
site != NULL ? object_size + AllocationMemento::kSize : object_size;
AllocationResult allocation =
AllocateRaw(adjusted_object_size, NEW_SPACE, NEW_SPACE);
if (!allocation.To(&clone)) return allocation;
}
SLOW_DCHECK(InNewSpace(clone));
// Since we know the clone is allocated in new space, we can copy
// the contents without worrying about updating the write barrier.
CopyBlock(clone->address(), source->address(), object_size);
if (site != NULL) {
AllocationMemento* alloc_memento = reinterpret_cast<AllocationMemento*>(
reinterpret_cast<Address>(clone) + object_size);
InitializeAllocationMemento(alloc_memento, site);
}
}
SLOW_DCHECK(JSObject::cast(clone)->GetElementsKind() ==
......@@ -3474,7 +3525,7 @@ AllocationResult Heap::CopyJSObject(JSObject* source, AllocationSite* site) {
}
if (!allocation.To(&elem)) return allocation;
}
JSObject::cast(clone)->set_elements(elem, SKIP_WRITE_BARRIER);
JSObject::cast(clone)->set_elements(elem, wb_mode);
}
// Update properties if necessary.
if (properties->length() > 0) {
......@@ -3483,7 +3534,7 @@ AllocationResult Heap::CopyJSObject(JSObject* source, AllocationSite* site) {
AllocationResult allocation = CopyFixedArray(properties);
if (!allocation.To(&prop)) return allocation;
}
JSObject::cast(clone)->set_properties(prop, SKIP_WRITE_BARRIER);
JSObject::cast(clone)->set_properties(prop, wb_mode);
}
// Return the new clone.
return clone;
......@@ -3557,7 +3608,7 @@ AllocationResult Heap::AllocateInternalizedStringImpl(T t, int chars,
// Allocate string.
HeapObject* result = nullptr;
{
AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
AllocationResult allocation = AllocateRaw(size, OLD_SPACE, OLD_SPACE);
if (!allocation.To(&result)) return allocation;
}
......@@ -3599,7 +3650,7 @@ AllocationResult Heap::AllocateRawOneByteString(int length,
HeapObject* result = nullptr;
{
AllocationResult allocation = AllocateRaw(size, space);
AllocationResult allocation = AllocateRaw(size, space, OLD_SPACE);
if (!allocation.To(&result)) return allocation;
}
......@@ -3623,7 +3674,7 @@ AllocationResult Heap::AllocateRawTwoByteString(int length,
HeapObject* result = nullptr;
{
AllocationResult allocation = AllocateRaw(size, space);
AllocationResult allocation = AllocateRaw(size, space, OLD_SPACE);
if (!allocation.To(&result)) return allocation;
}
......@@ -3640,7 +3691,7 @@ AllocationResult Heap::AllocateEmptyFixedArray() {
int size = FixedArray::SizeFor(0);
HeapObject* result = nullptr;
{
AllocationResult allocation = AllocateRaw(size, OLD_SPACE);
AllocationResult allocation = AllocateRaw(size, OLD_SPACE, OLD_SPACE);
if (!allocation.To(&result)) return allocation;
}
// Initialize the object.
......@@ -3756,7 +3807,7 @@ AllocationResult Heap::AllocateRawFixedArray(int length,
int size = FixedArray::SizeFor(length);
AllocationSpace space = SelectSpace(pretenure);
return AllocateRaw(size, space);
return AllocateRaw(size, space, OLD_SPACE);
}
......@@ -3827,7 +3878,8 @@ AllocationResult Heap::AllocateRawFixedDoubleArray(int length,
HeapObject* object = nullptr;
{
AllocationResult allocation = AllocateRaw(size, space, kDoubleAligned);
AllocationResult allocation =
AllocateRaw(size, space, OLD_SPACE, kDoubleAligned);
if (!allocation.To(&object)) return allocation;
}
......@@ -3840,7 +3892,8 @@ AllocationResult Heap::AllocateSymbol() {
STATIC_ASSERT(Symbol::kSize <= Page::kMaxRegularHeapObjectSize);
HeapObject* result = nullptr;
AllocationResult allocation = AllocateRaw(Symbol::kSize, OLD_SPACE);
AllocationResult allocation =
AllocateRaw(Symbol::kSize, OLD_SPACE, OLD_SPACE);
if (!allocation.To(&result)) return allocation;
result->set_map_no_write_barrier(symbol_map());
......
......@@ -1967,7 +1967,7 @@ class Heap {
// performed by the runtime and should not be bypassed (to extend this to
// inlined allocations, use the Heap::DisableInlineAllocation() support).
MUST_USE_RESULT inline AllocationResult AllocateRaw(
int size_in_bytes, AllocationSpace space,
int size_in_bytes, AllocationSpace space, AllocationSpace retry_space,
AllocationAlignment aligment = kWordAligned);
// Allocates a heap object based on the map.
......
......@@ -40,6 +40,7 @@ AllocationResult v8::internal::HeapTester::AllocateAfterFailures() {
Heap* heap = CcTest::heap();
// New space.
SimulateFullSpace(heap->new_space());
heap->AllocateByteArray(100).ToObjectChecked();
heap->AllocateFixedArray(100, NOT_TENURED).ToObjectChecked();
......
......@@ -1181,6 +1181,85 @@ TEST(Iteration) {
}
static int LenFromSize(int size) {
return (size - FixedArray::kHeaderSize) / kPointerSize;
}
HEAP_TEST(Regression39128) {
// Test case for crbug.com/39128.
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
Heap* heap = CcTest::heap();
// Increase the chance of 'bump-the-pointer' allocation in old space.
heap->CollectAllGarbage();
v8::HandleScope scope(CcTest::isolate());
// The plan: create JSObject which references objects in new space.
// Then clone this object (forcing it to go into old space) and check
// that region dirty marks are updated correctly.
// Step 1: prepare a map for the object. We add 1 inobject property to it.
// Create a map with single inobject property.
Handle<Map> my_map = Map::Create(CcTest::i_isolate(), 1);
int n_properties = my_map->GetInObjectProperties();
CHECK_GT(n_properties, 0);
int object_size = my_map->instance_size();
// Step 2: allocate a lot of objects so to almost fill new space: we need
// just enough room to allocate JSObject and thus fill the newspace.
int allocation_amount = Min(FixedArray::kMaxSize,
Page::kMaxRegularHeapObjectSize + kPointerSize);
int allocation_len = LenFromSize(allocation_amount);
NewSpace* new_space = heap->new_space();
DisableInlineAllocationSteps(new_space);
Address* top_addr = new_space->allocation_top_address();
Address* limit_addr = new_space->allocation_limit_address();
while ((*limit_addr - *top_addr) > allocation_amount) {
CHECK(!heap->always_allocate());
Object* array = heap->AllocateFixedArray(allocation_len).ToObjectChecked();
CHECK(new_space->Contains(array));
}
// Step 3: now allocate fixed array and JSObject to fill the whole new space.
int to_fill = static_cast<int>(*limit_addr - *top_addr - object_size);
int fixed_array_len = LenFromSize(to_fill);
CHECK(fixed_array_len < FixedArray::kMaxLength);
CHECK(!heap->always_allocate());
Object* array = heap->AllocateFixedArray(fixed_array_len).ToObjectChecked();
CHECK(new_space->Contains(array));
Object* object = heap->AllocateJSObjectFromMap(*my_map).ToObjectChecked();
CHECK(new_space->Contains(object));
JSObject* jsobject = JSObject::cast(object);
CHECK_EQ(0, FixedArray::cast(jsobject->elements())->length());
CHECK_EQ(0, jsobject->properties()->length());
// Create a reference to object in new space in jsobject.
FieldIndex index = FieldIndex::ForInObjectOffset(
JSObject::kHeaderSize - kPointerSize);
jsobject->FastPropertyAtPut(index, array);
CHECK_EQ(0, static_cast<int>(*limit_addr - *top_addr));
// Step 4: clone jsobject, but force always allocate first to create a clone
// in old pointer space.
Address old_space_top = heap->old_space()->top();
AlwaysAllocateScope aa_scope(isolate);
Object* clone_obj = heap->CopyJSObject(jsobject).ToObjectChecked();
JSObject* clone = JSObject::cast(clone_obj);
if (clone->address() != old_space_top) {
// Alas, got allocated from free list, we cannot do checks.
return;
}
CHECK(heap->old_space()->Contains(clone->address()));
}
UNINITIALIZED_TEST(TestCodeFlushing) {
// If we do not flush code this test is invalid.
if (!FLAG_flush_code) return;
......@@ -3612,6 +3691,38 @@ TEST(CountForcedGC) {
}
TEST(Regress2237) {
i::FLAG_stress_compaction = false;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
Factory* factory = isolate->factory();
v8::HandleScope scope(CcTest::isolate());
Handle<String> slice(CcTest::heap()->empty_string());
{
// Generate a parent that lives in new-space.
v8::HandleScope inner_scope(CcTest::isolate());
const char* c = "This text is long enough to trigger sliced strings.";
Handle<String> s = factory->NewStringFromAsciiChecked(c);
CHECK(s->IsSeqOneByteString());
CHECK(CcTest::heap()->InNewSpace(*s));
// Generate a sliced string that is based on the above parent and
// lives in old-space.
SimulateFullSpace(CcTest::heap()->new_space());
AlwaysAllocateScope always_allocate(isolate);
Handle<String> t = factory->NewProperSubString(s, 5, 35);
CHECK(t->IsSlicedString());
CHECK(!CcTest::heap()->InNewSpace(*t));
*slice.location() = *t.location();
}
CHECK(SlicedString::cast(*slice)->parent()->IsSeqOneByteString());
CcTest::heap()->CollectAllGarbage();
CHECK(SlicedString::cast(*slice)->parent()->IsSeqOneByteString());
}
#ifdef OBJECT_PRINT
TEST(PrintSharedFunctionInfo) {
CcTest::InitializeVM();
......
......@@ -642,7 +642,6 @@ static inline void PrintStats(const ConsStringGenerationData& data) {
template<typename BuildString>
void TestStringCharacterStream(BuildString build, int test_cases) {
FLAG_gc_global = true;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
HandleScope outer_scope(isolate);
......
......@@ -1404,6 +1404,89 @@ TEST(StoreBufferScanOnScavenge) {
}
static int LenFromSize(int size) {
return (size - FixedArray::kHeaderSize) / kPointerSize;
}
HEAP_TEST(WriteBarriersInCopyJSObject) {
FLAG_max_semi_space_size = 1; // Ensure new space is not growing.
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
Heap* heap = CcTest::heap();
v8::HandleScope scope(CcTest::isolate());
// The plan: create JSObject which contains unboxed double value that looks
// like a reference to an object in new space.
// Then clone this object (forcing it to go into old space) and check
// that the value of the unboxed double property of the cloned object has
// was not corrupted by GC.
// Step 1: prepare a map for the object. We add unboxed double property to it.
// Create a map with single inobject property.
Handle<Map> my_map = Map::Create(isolate, 1);
Handle<String> name = isolate->factory()->InternalizeUtf8String("foo");
my_map = Map::CopyWithField(my_map, name, HeapType::Any(isolate), NONE,
Representation::Double(),
INSERT_TRANSITION).ToHandleChecked();
int object_size = my_map->instance_size();
// Step 2: allocate a lot of objects so to almost fill new space: we need
// just enough room to allocate JSObject and thus fill the newspace.
int allocation_amount =
Min(FixedArray::kMaxSize, Page::kMaxRegularHeapObjectSize + kPointerSize);
int allocation_len = LenFromSize(allocation_amount);
NewSpace* new_space = heap->new_space();
DisableInlineAllocationSteps(new_space);
Address* top_addr = new_space->allocation_top_address();
Address* limit_addr = new_space->allocation_limit_address();
while ((*limit_addr - *top_addr) > allocation_amount) {
CHECK(!heap->always_allocate());
Object* array = heap->AllocateFixedArray(allocation_len).ToObjectChecked();
CHECK(new_space->Contains(array));
}
// Step 3: now allocate fixed array and JSObject to fill the whole new space.
int to_fill = static_cast<int>(*limit_addr - *top_addr - object_size);
int fixed_array_len = LenFromSize(to_fill);
CHECK(fixed_array_len < FixedArray::kMaxLength);
CHECK(!heap->always_allocate());
Object* array = heap->AllocateFixedArray(fixed_array_len).ToObjectChecked();
CHECK(new_space->Contains(array));
Object* object = heap->AllocateJSObjectFromMap(*my_map).ToObjectChecked();
CHECK(new_space->Contains(object));
JSObject* jsobject = JSObject::cast(object);
CHECK_EQ(0, FixedArray::cast(jsobject->elements())->length());
CHECK_EQ(0, jsobject->properties()->length());
// Construct a double value that looks like a pointer to the new space object
// and store it into the obj.
Address fake_object = reinterpret_cast<Address>(array) + kPointerSize;
double boom_value = bit_cast<double>(fake_object);
FieldIndex index = FieldIndex::ForDescriptor(*my_map, 0);
jsobject->RawFastDoublePropertyAtPut(index, boom_value);
CHECK_EQ(0, static_cast<int>(*limit_addr - *top_addr));
// Step 4: clone jsobject, but force always allocate first to create a clone
// in old pointer space.
AlwaysAllocateScope aa_scope(isolate);
Object* clone_obj = heap->CopyJSObject(jsobject).ToObjectChecked();
Handle<JSObject> clone(JSObject::cast(clone_obj));
CHECK(heap->old_space()->Contains(clone->address()));
CcTest::heap()->CollectGarbage(NEW_SPACE, "boom");
// The value in cloned object should not be corrupted by GC.
CHECK_EQ(boom_value, clone->RawFastDoublePropertyAt(index));
}
static void TestWriteBarrier(Handle<Map> map, Handle<Map> new_map,
int tagged_descriptor, int double_descriptor,
bool check_tagged_value = true) {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment