Commit 8a1a2867 authored by Igor Sheludko's avatar Igor Sheludko Committed by Commit Bot

[csa] Remove ParameterMode from CSA::BuildFastLoop

Bug: v8:9708
Change-Id: I305cc007a4e7302c8587b999cbb11f23ced4cfd3
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1800579
Commit-Queue: Igor Sheludko <ishell@chromium.org>
Reviewed-by: 's avatarLeszek Swirski <leszeks@chromium.org>
Cr-Commit-Position: refs/heads/master@{#63735}
parent 6cf125a9
...@@ -262,16 +262,17 @@ Node* ArgumentsBuiltinsAssembler::EmitFastNewSloppyArguments(Node* context, ...@@ -262,16 +262,17 @@ Node* ArgumentsBuiltinsAssembler::EmitFastNewSloppyArguments(Node* context,
TVARIABLE(IntPtrT, current_argument, TVARIABLE(IntPtrT, current_argument,
Signed(arguments.AtIndexPtr(info.argument_count, mode))); Signed(arguments.AtIndexPtr(info.argument_count, mode)));
VariableList var_list1({&current_argument}, zone()); VariableList var_list1({&current_argument}, zone());
mapped_offset = UncheckedCast<IntPtrT>(BuildFastLoop( mapped_offset = BuildFastLoop<IntPtrT>(
var_list1, argument_offset, mapped_offset, var_list1, argument_offset, mapped_offset,
[this, elements, &current_argument](Node* offset) { [&](TNode<IntPtrT> offset) {
Increment(&current_argument, kSystemPointerSize); Increment(&current_argument, kSystemPointerSize);
TNode<Object> arg = LoadBufferObject( TNode<Object> arg = LoadBufferObject(
ReinterpretCast<RawPtrT>(current_argument.value()), 0); ReinterpretCast<RawPtrT>(current_argument.value()), 0);
StoreNoWriteBarrier(MachineRepresentation::kTagged, elements, offset, StoreNoWriteBarrier(MachineRepresentation::kTagged, elements, offset,
arg); arg);
return;
}, },
-kTaggedSize, INTPTR_PARAMETERS)); -kTaggedSize);
// Copy the parameter slots and the holes in the arguments. // Copy the parameter slots and the holes in the arguments.
// We need to fill in mapped_count slots. They index the context, // We need to fill in mapped_count slots. They index the context,
...@@ -295,9 +296,9 @@ Node* ArgumentsBuiltinsAssembler::EmitFastNewSloppyArguments(Node* context, ...@@ -295,9 +296,9 @@ Node* ArgumentsBuiltinsAssembler::EmitFastNewSloppyArguments(Node* context,
IntPtrConstant(kParameterMapHeaderSize - FixedArray::kHeaderSize)); IntPtrConstant(kParameterMapHeaderSize - FixedArray::kHeaderSize));
TNode<IntPtrT> zero_offset = ElementOffsetFromIndex( TNode<IntPtrT> zero_offset = ElementOffsetFromIndex(
zero, PACKED_ELEMENTS, mode, FixedArray::kHeaderSize - kHeapObjectTag); zero, PACKED_ELEMENTS, mode, FixedArray::kHeaderSize - kHeapObjectTag);
BuildFastLoop( BuildFastLoop<IntPtrT>(
var_list2, mapped_offset, zero_offset, var_list2, mapped_offset, zero_offset,
[=, &context_index](Node* offset) { [&](TNode<IntPtrT> offset) {
StoreNoWriteBarrier(MachineRepresentation::kTagged, elements, offset, StoreNoWriteBarrier(MachineRepresentation::kTagged, elements, offset,
the_hole); the_hole);
StoreNoWriteBarrier(MachineRepresentation::kTagged, StoreNoWriteBarrier(MachineRepresentation::kTagged,
...@@ -305,7 +306,7 @@ Node* ArgumentsBuiltinsAssembler::EmitFastNewSloppyArguments(Node* context, ...@@ -305,7 +306,7 @@ Node* ArgumentsBuiltinsAssembler::EmitFastNewSloppyArguments(Node* context,
BIntToSmi(context_index.value())); BIntToSmi(context_index.value()));
Increment(&context_index); Increment(&context_index);
}, },
-kTaggedSize, INTPTR_PARAMETERS); -kTaggedSize);
result.Bind(argument_object); result.Bind(argument_object);
Goto(&done); Goto(&done);
......
...@@ -224,16 +224,8 @@ void ArrayBuiltinsAssembler::VisitAllTypedArrayElements( ...@@ -224,16 +224,8 @@ void ArrayBuiltinsAssembler::VisitAllTypedArrayElements(
ForEachDirection direction, TNode<JSTypedArray> typed_array) { ForEachDirection direction, TNode<JSTypedArray> typed_array) {
VariableList list({&a_, &k_, &to_}, zone()); VariableList list({&a_, &k_, &to_}, zone());
FastLoopBody body = [&](Node* index) { TNode<Smi> start = SmiConstant(0);
GotoIf(IsDetachedBuffer(CAST(array_buffer)), detached); TNode<Smi> end = CAST(len_);
TNode<RawPtrT> data_ptr = LoadJSTypedArrayBackingStore(typed_array);
auto value = LoadFixedTypedArrayElementAsTagged(
data_ptr, index, source_elements_kind_, SMI_PARAMETERS);
k_.Bind(index);
a_.Bind(processor(this, value, index));
};
Node* start = SmiConstant(0);
Node* end = len_;
IndexAdvanceMode advance_mode = IndexAdvanceMode::kPost; IndexAdvanceMode advance_mode = IndexAdvanceMode::kPost;
int incr = 1; int incr = 1;
if (direction == ForEachDirection::kReverse) { if (direction == ForEachDirection::kReverse) {
...@@ -241,8 +233,17 @@ void ArrayBuiltinsAssembler::VisitAllTypedArrayElements( ...@@ -241,8 +233,17 @@ void ArrayBuiltinsAssembler::VisitAllTypedArrayElements(
advance_mode = IndexAdvanceMode::kPre; advance_mode = IndexAdvanceMode::kPre;
incr = -1; incr = -1;
} }
BuildFastLoop(list, start, end, body, incr, ParameterMode::SMI_PARAMETERS, BuildFastLoop<Smi>(
advance_mode); list, start, end,
[&](TNode<Smi> index) {
GotoIf(IsDetachedBuffer(CAST(array_buffer)), detached);
TNode<RawPtrT> data_ptr = LoadJSTypedArrayBackingStore(typed_array);
TNode<Object> value = LoadFixedTypedArrayElementAsTagged(
data_ptr, index, source_elements_kind_, SMI_PARAMETERS);
k_.Bind(index);
a_.Bind(processor(this, value, index));
},
incr, advance_mode);
} }
// Perform ArraySpeciesCreate (ES6 #sec-arrayspeciescreate). // Perform ArraySpeciesCreate (ES6 #sec-arrayspeciescreate).
......
...@@ -259,7 +259,7 @@ void BaseCollectionsAssembler::AddConstructorEntriesFromFastJSArray( ...@@ -259,7 +259,7 @@ void BaseCollectionsAssembler::AddConstructorEntriesFromFastJSArray(
&if_doubles); &if_doubles);
BIND(&if_smiorobjects); BIND(&if_smiorobjects);
{ {
auto set_entry = [&](Node* index) { auto set_entry = [&](TNode<IntPtrT> index) {
TNode<Object> element = LoadAndNormalizeFixedArrayElement( TNode<Object> element = LoadAndNormalizeFixedArrayElement(
CAST(elements), UncheckedCast<IntPtrT>(index)); CAST(elements), UncheckedCast<IntPtrT>(index));
AddConstructorEntry(variant, context, collection, add_func, element, AddConstructorEntry(variant, context, collection, add_func, element,
...@@ -270,8 +270,8 @@ void BaseCollectionsAssembler::AddConstructorEntriesFromFastJSArray( ...@@ -270,8 +270,8 @@ void BaseCollectionsAssembler::AddConstructorEntriesFromFastJSArray(
// elements, a fast loop is used. This assumes that adding an element // elements, a fast loop is used. This assumes that adding an element
// to the collection does not call user code that could mutate the elements // to the collection does not call user code that could mutate the elements
// or collection. // or collection.
BuildFastLoop(IntPtrConstant(0), length, set_entry, 1, BuildFastLoop<IntPtrT>(IntPtrConstant(0), length, set_entry, 1,
ParameterMode::INTPTR_PARAMETERS, IndexAdvanceMode::kPost); IndexAdvanceMode::kPost);
Goto(&exit); Goto(&exit);
} }
BIND(&if_doubles); BIND(&if_doubles);
...@@ -286,13 +286,13 @@ void BaseCollectionsAssembler::AddConstructorEntriesFromFastJSArray( ...@@ -286,13 +286,13 @@ void BaseCollectionsAssembler::AddConstructorEntriesFromFastJSArray(
element); element);
} else { } else {
DCHECK(variant == kSet || variant == kWeakSet); DCHECK(variant == kSet || variant == kWeakSet);
auto set_entry = [&](Node* index) { auto set_entry = [&](TNode<IntPtrT> index) {
TNode<Object> entry = LoadAndNormalizeFixedDoubleArrayElement( TNode<Object> entry = LoadAndNormalizeFixedDoubleArrayElement(
elements, UncheckedCast<IntPtrT>(index)); elements, UncheckedCast<IntPtrT>(index));
AddConstructorEntry(variant, context, collection, add_func, entry); AddConstructorEntry(variant, context, collection, add_func, entry);
}; };
BuildFastLoop(IntPtrConstant(0), length, set_entry, 1, BuildFastLoop<IntPtrT>(IntPtrConstant(0), length, set_entry, 1,
ParameterMode::INTPTR_PARAMETERS, IndexAdvanceMode::kPost); IndexAdvanceMode::kPost);
Goto(&exit); Goto(&exit);
} }
} }
......
...@@ -263,13 +263,12 @@ TNode<Context> ConstructorBuiltinsAssembler::EmitFastNewFunctionContext( ...@@ -263,13 +263,12 @@ TNode<Context> ConstructorBuiltinsAssembler::EmitFastNewFunctionContext(
TNode<Oddball> undefined = UndefinedConstant(); TNode<Oddball> undefined = UndefinedConstant();
TNode<IntPtrT> start_offset = IntPtrConstant(Context::kTodoHeaderSize); TNode<IntPtrT> start_offset = IntPtrConstant(Context::kTodoHeaderSize);
CodeStubAssembler::VariableList vars(0, zone()); CodeStubAssembler::VariableList vars(0, zone());
BuildFastLoop( BuildFastLoop<IntPtrT>(
vars, start_offset, size, vars, start_offset, size,
[=](SloppyTNode<IntPtrT> offset) { [=](TNode<IntPtrT> offset) {
StoreObjectFieldNoWriteBarrier( StoreObjectFieldNoWriteBarrier(function_context, offset, undefined);
function_context, UncheckedCast<IntPtrT>(offset), undefined);
}, },
kTaggedSize, INTPTR_PARAMETERS, IndexAdvanceMode::kPost); kTaggedSize, IndexAdvanceMode::kPost);
return function_context; return function_context;
} }
...@@ -571,18 +570,18 @@ TNode<HeapObject> ConstructorBuiltinsAssembler::EmitCreateShallowObjectLiteral( ...@@ -571,18 +570,18 @@ TNode<HeapObject> ConstructorBuiltinsAssembler::EmitCreateShallowObjectLiteral(
BIND(&continue_with_write_barrier); BIND(&continue_with_write_barrier);
{ {
Comment("Copy in-object properties slow"); Comment("Copy in-object properties slow");
BuildFastLoop( BuildFastLoop<IntPtrT>(
offset.value(), instance_size, offset.value(), instance_size,
[=](SloppyTNode<IntPtrT> offset) { [=](TNode<IntPtrT> offset) {
// TODO(ishell): value decompression is not necessary here. // TODO(ishell): value decompression is not necessary here.
TNode<Object> field = LoadObjectField(boilerplate, offset); TNode<Object> field = LoadObjectField(boilerplate, offset);
StoreObjectFieldNoWriteBarrier(copy, offset, field); StoreObjectFieldNoWriteBarrier(copy, offset, field);
}, },
kTaggedSize, INTPTR_PARAMETERS, IndexAdvanceMode::kPost); kTaggedSize, IndexAdvanceMode::kPost);
Comment("Copy mutable HeapNumber values"); Comment("Copy mutable HeapNumber values");
BuildFastLoop( BuildFastLoop<IntPtrT>(
offset.value(), instance_size, offset.value(), instance_size,
[=](SloppyTNode<IntPtrT> offset) { [=](TNode<IntPtrT> offset) {
TNode<Object> field = LoadObjectField(copy, offset); TNode<Object> field = LoadObjectField(copy, offset);
Label copy_heap_number(this, Label::kDeferred), continue_loop(this); Label copy_heap_number(this, Label::kDeferred), continue_loop(this);
// We only have to clone complex field values. // We only have to clone complex field values.
...@@ -601,7 +600,7 @@ TNode<HeapObject> ConstructorBuiltinsAssembler::EmitCreateShallowObjectLiteral( ...@@ -601,7 +600,7 @@ TNode<HeapObject> ConstructorBuiltinsAssembler::EmitCreateShallowObjectLiteral(
} }
BIND(&continue_loop); BIND(&continue_loop);
}, },
kTaggedSize, INTPTR_PARAMETERS, IndexAdvanceMode::kPost); kTaggedSize, IndexAdvanceMode::kPost);
Goto(&done_init); Goto(&done_init);
} }
BIND(&done_init); BIND(&done_init);
......
...@@ -61,7 +61,8 @@ TF_BUILTIN(StringToLowerCaseIntl, IntlBuiltinsAssembler) { ...@@ -61,7 +61,8 @@ TF_BUILTIN(StringToLowerCaseIntl, IntlBuiltinsAssembler) {
Node* const dst_ptr = PointerToSeqStringData(dst); Node* const dst_ptr = PointerToSeqStringData(dst);
TVARIABLE(IntPtrT, var_cursor, IntPtrConstant(0)); TVARIABLE(IntPtrT, var_cursor, IntPtrConstant(0));
TNode<RawPtrT> const start_address = to_direct.PointerToData(&call_c); TNode<IntPtrT> const start_address =
ReinterpretCast<IntPtrT>(to_direct.PointerToData(&call_c));
TNode<IntPtrT> const end_address = TNode<IntPtrT> const end_address =
Signed(IntPtrAdd(start_address, ChangeUint32ToWord(length))); Signed(IntPtrAdd(start_address, ChangeUint32ToWord(length)));
...@@ -71,9 +72,9 @@ TF_BUILTIN(StringToLowerCaseIntl, IntlBuiltinsAssembler) { ...@@ -71,9 +72,9 @@ TF_BUILTIN(StringToLowerCaseIntl, IntlBuiltinsAssembler) {
VARIABLE(var_did_change, MachineRepresentation::kWord32, Int32Constant(0)); VARIABLE(var_did_change, MachineRepresentation::kWord32, Int32Constant(0));
VariableList push_vars({&var_cursor, &var_did_change}, zone()); VariableList push_vars({&var_cursor, &var_did_change}, zone());
BuildFastLoop( BuildFastLoop<IntPtrT>(
push_vars, start_address, end_address, push_vars, start_address, end_address,
[=, &var_cursor, &var_did_change](Node* current) { [&](TNode<IntPtrT> current) {
TNode<Uint8T> c = Load<Uint8T>(current); TNode<Uint8T> c = Load<Uint8T>(current);
TNode<Uint8T> lower = TNode<Uint8T> lower =
Load<Uint8T>(to_lower_table_addr, ChangeInt32ToIntPtr(c)); Load<Uint8T>(to_lower_table_addr, ChangeInt32ToIntPtr(c));
...@@ -85,7 +86,7 @@ TF_BUILTIN(StringToLowerCaseIntl, IntlBuiltinsAssembler) { ...@@ -85,7 +86,7 @@ TF_BUILTIN(StringToLowerCaseIntl, IntlBuiltinsAssembler) {
Increment(&var_cursor); Increment(&var_cursor);
}, },
kCharSize, INTPTR_PARAMETERS, IndexAdvanceMode::kPost); kCharSize, IndexAdvanceMode::kPost);
// Return the original string if it remained unchanged in order to preserve // Return the original string if it remained unchanged in order to preserve
// e.g. internalization and private symbols (such as the preserved object // e.g. internalization and private symbols (such as the preserved object
......
...@@ -679,9 +679,9 @@ TNode<HeapObject> RegExpBuiltinsAssembler::RegExpExecInternal( ...@@ -679,9 +679,9 @@ TNode<HeapObject> RegExpBuiltinsAssembler::RegExpExecInternal(
TVARIABLE(IntPtrT, var_to_offset, to_offset); TVARIABLE(IntPtrT, var_to_offset, to_offset);
VariableList vars({&var_to_offset}, zone()); VariableList vars({&var_to_offset}, zone());
BuildFastLoop( BuildFastLoop<IntPtrT>(
vars, IntPtrZero(), limit_offset, vars, IntPtrZero(), limit_offset,
[=, &var_to_offset](Node* offset) { [&](TNode<IntPtrT> offset) {
TNode<Int32T> value = UncheckedCast<Int32T>(Load( TNode<Int32T> value = UncheckedCast<Int32T>(Load(
MachineType::Int32(), static_offsets_vector_address, offset)); MachineType::Int32(), static_offsets_vector_address, offset));
TNode<Smi> smi_value = SmiFromInt32(value); TNode<Smi> smi_value = SmiFromInt32(value);
...@@ -689,7 +689,7 @@ TNode<HeapObject> RegExpBuiltinsAssembler::RegExpExecInternal( ...@@ -689,7 +689,7 @@ TNode<HeapObject> RegExpBuiltinsAssembler::RegExpExecInternal(
var_to_offset.value(), smi_value); var_to_offset.value(), smi_value);
Increment(&var_to_offset, kTaggedSize); Increment(&var_to_offset, kTaggedSize);
}, },
kInt32Size, INTPTR_PARAMETERS, IndexAdvanceMode::kPost); kInt32Size, IndexAdvanceMode::kPost);
} }
var_result = match_info; var_result = match_info;
......
...@@ -1366,9 +1366,9 @@ TNode<JSArray> StringBuiltinsAssembler::StringToArray( ...@@ -1366,9 +1366,9 @@ TNode<JSArray> StringBuiltinsAssembler::StringToArray(
TNode<IntPtrT> string_data_offset = to_direct.offset(); TNode<IntPtrT> string_data_offset = to_direct.offset();
TNode<FixedArray> cache = SingleCharacterStringCacheConstant(); TNode<FixedArray> cache = SingleCharacterStringCacheConstant();
BuildFastLoop( BuildFastLoop<IntPtrT>(
IntPtrConstant(0), length, IntPtrConstant(0), length,
[&](Node* index) { [&](TNode<IntPtrT> index) {
// TODO(jkummerow): Implement a CSA version of DisallowHeapAllocation // TODO(jkummerow): Implement a CSA version of DisallowHeapAllocation
// and use that to guard ToDirectStringAssembler.PointerToData(). // and use that to guard ToDirectStringAssembler.PointerToData().
CSA_ASSERT(this, WordEqual(to_direct.PointerToData(&call_runtime), CSA_ASSERT(this, WordEqual(to_direct.PointerToData(&call_runtime),
...@@ -1385,7 +1385,7 @@ TNode<JSArray> StringBuiltinsAssembler::StringToArray( ...@@ -1385,7 +1385,7 @@ TNode<JSArray> StringBuiltinsAssembler::StringToArray(
StoreFixedArrayElement(elements, index, entry); StoreFixedArrayElement(elements, index, entry);
}, },
1, ParameterMode::INTPTR_PARAMETERS, IndexAdvanceMode::kPost); 1, IndexAdvanceMode::kPost);
TNode<Map> array_map = LoadJSArrayElementsMap(PACKED_ELEMENTS, context); TNode<Map> array_map = LoadJSArrayElementsMap(PACKED_ELEMENTS, context);
result_array = AllocateJSArray(array_map, elements, length_smi); result_array = AllocateJSArray(array_map, elements, length_smi);
......
...@@ -735,9 +735,9 @@ TF_BUILTIN(TypedArrayOf, TypedArrayBuiltinsAssembler) { ...@@ -735,9 +735,9 @@ TF_BUILTIN(TypedArrayOf, TypedArrayBuiltinsAssembler) {
DispatchTypedArrayByElementsKind( DispatchTypedArrayByElementsKind(
elements_kind, elements_kind,
[&](ElementsKind kind, int size, int typed_array_fun_index) { [&](ElementsKind kind, int size, int typed_array_fun_index) {
BuildFastLoop( BuildFastLoop<IntPtrT>(
IntPtrConstant(0), length, IntPtrConstant(0), length,
[&](Node* index) { [&](TNode<IntPtrT> index) {
TNode<Object> item = args.AtIndex(index, INTPTR_PARAMETERS); TNode<Object> item = args.AtIndex(index, INTPTR_PARAMETERS);
Node* value = Node* value =
PrepareValueForWriteToTypedArray(item, kind, context); PrepareValueForWriteToTypedArray(item, kind, context);
...@@ -755,7 +755,7 @@ TF_BUILTIN(TypedArrayOf, TypedArrayBuiltinsAssembler) { ...@@ -755,7 +755,7 @@ TF_BUILTIN(TypedArrayOf, TypedArrayBuiltinsAssembler) {
StoreElement(backing_store, kind, index, value, StoreElement(backing_store, kind, index, value,
INTPTR_PARAMETERS); INTPTR_PARAMETERS);
}, },
1, ParameterMode::INTPTR_PARAMETERS, IndexAdvanceMode::kPost); 1, IndexAdvanceMode::kPost);
}); });
// 8. Return newObj. // 8. Return newObj.
...@@ -948,9 +948,9 @@ TF_BUILTIN(TypedArrayFrom, TypedArrayBuiltinsAssembler) { ...@@ -948,9 +948,9 @@ TF_BUILTIN(TypedArrayFrom, TypedArrayBuiltinsAssembler) {
TNode<Int32T> elements_kind = LoadElementsKind(target_obj.value()); TNode<Int32T> elements_kind = LoadElementsKind(target_obj.value());
// 7e/13 : Copy the elements // 7e/13 : Copy the elements
BuildFastLoop( BuildFastLoop<Smi>(
SmiConstant(0), final_length.value(), SmiConstant(0), final_length.value(),
[&](Node* index) { [&](TNode<Smi> index) {
TNode<Object> const k_value = TNode<Object> const k_value =
GetProperty(context, final_source.value(), index); GetProperty(context, final_source.value(), index);
...@@ -978,7 +978,7 @@ TF_BUILTIN(TypedArrayFrom, TypedArrayBuiltinsAssembler) { ...@@ -978,7 +978,7 @@ TF_BUILTIN(TypedArrayFrom, TypedArrayBuiltinsAssembler) {
SMI_PARAMETERS); SMI_PARAMETERS);
}); });
}, },
1, ParameterMode::SMI_PARAMETERS, IndexAdvanceMode::kPost); 1, IndexAdvanceMode::kPost);
args.PopAndReturn(target_obj.value()); args.PopAndReturn(target_obj.value());
......
...@@ -3947,21 +3947,20 @@ Node* CodeStubAssembler::AllocateStruct(Node* map, AllocationFlags flags) { ...@@ -3947,21 +3947,20 @@ Node* CodeStubAssembler::AllocateStruct(Node* map, AllocationFlags flags) {
TNode<IntPtrT> size = TimesTaggedSize(LoadMapInstanceSizeInWords(map)); TNode<IntPtrT> size = TimesTaggedSize(LoadMapInstanceSizeInWords(map));
TNode<HeapObject> object = Allocate(size, flags); TNode<HeapObject> object = Allocate(size, flags);
StoreMapNoWriteBarrier(object, map); StoreMapNoWriteBarrier(object, map);
InitializeStructBody(object, map, size, Struct::kHeaderSize); InitializeStructBody(object, size, Struct::kHeaderSize);
return object; return object;
} }
void CodeStubAssembler::InitializeStructBody(Node* object, Node* map, void CodeStubAssembler::InitializeStructBody(TNode<HeapObject> object,
Node* size, int start_offset) { TNode<IntPtrT> size,
CSA_SLOW_ASSERT(this, IsMap(map)); int start_offset) {
Comment("InitializeStructBody"); Comment("InitializeStructBody");
TNode<Oddball> filler = UndefinedConstant(); TNode<Oddball> filler = UndefinedConstant();
// Calculate the untagged field addresses. // Calculate the untagged field addresses.
object = BitcastTaggedToWord(object); TNode<IntPtrT> start_address =
TNode<WordT> start_address = IntPtrAdd(BitcastTaggedToWord(object),
IntPtrAdd(object, IntPtrConstant(start_offset - kHeapObjectTag)); IntPtrConstant(start_offset - kHeapObjectTag));
TNode<WordT> end_address = TNode<IntPtrT> end_address = IntPtrAdd(start_address, size);
IntPtrSub(IntPtrAdd(object, size), IntPtrConstant(kHeapObjectTag));
StoreFieldsNoWriteBarrier(start_address, end_address, filler); StoreFieldsNoWriteBarrier(start_address, end_address, filler);
} }
...@@ -3983,8 +3982,9 @@ TNode<JSObject> CodeStubAssembler::AllocateJSObjectFromMap( ...@@ -3983,8 +3982,9 @@ TNode<JSObject> CodeStubAssembler::AllocateJSObjectFromMap(
} }
void CodeStubAssembler::InitializeJSObjectFromMap( void CodeStubAssembler::InitializeJSObjectFromMap(
Node* object, Node* map, Node* instance_size, Node* properties, SloppyTNode<HeapObject> object, SloppyTNode<Map> map,
Node* elements, SlackTrackingMode slack_tracking_mode) { SloppyTNode<IntPtrT> instance_size, Node* properties, Node* elements,
SlackTrackingMode slack_tracking_mode) {
CSA_SLOW_ASSERT(this, IsMap(map)); CSA_SLOW_ASSERT(this, IsMap(map));
// This helper assumes that the object is in new-space, as guarded by the // This helper assumes that the object is in new-space, as guarded by the
// check in AllocatedJSObjectFromMap. // check in AllocatedJSObjectFromMap.
...@@ -4015,7 +4015,8 @@ void CodeStubAssembler::InitializeJSObjectFromMap( ...@@ -4015,7 +4015,8 @@ void CodeStubAssembler::InitializeJSObjectFromMap(
} }
void CodeStubAssembler::InitializeJSObjectBodyNoSlackTracking( void CodeStubAssembler::InitializeJSObjectBodyNoSlackTracking(
Node* object, Node* map, Node* instance_size, int start_offset) { SloppyTNode<HeapObject> object, SloppyTNode<Map> map,
SloppyTNode<IntPtrT> instance_size, int start_offset) {
STATIC_ASSERT(Map::kNoSlackTracking == 0); STATIC_ASSERT(Map::kNoSlackTracking == 0);
CSA_ASSERT( CSA_ASSERT(
this, IsClearWord32<Map::ConstructionCounterBits>(LoadMapBitField3(map))); this, IsClearWord32<Map::ConstructionCounterBits>(LoadMapBitField3(map)));
...@@ -4024,8 +4025,8 @@ void CodeStubAssembler::InitializeJSObjectBodyNoSlackTracking( ...@@ -4024,8 +4025,8 @@ void CodeStubAssembler::InitializeJSObjectBodyNoSlackTracking(
} }
void CodeStubAssembler::InitializeJSObjectBodyWithSlackTracking( void CodeStubAssembler::InitializeJSObjectBodyWithSlackTracking(
Node* object, Node* map, Node* instance_size) { SloppyTNode<HeapObject> object, SloppyTNode<Map> map,
CSA_SLOW_ASSERT(this, IsMap(map)); SloppyTNode<IntPtrT> instance_size) {
Comment("InitializeJSObjectBodyNoSlackTracking"); Comment("InitializeJSObjectBodyNoSlackTracking");
// Perform in-object slack tracking if requested. // Perform in-object slack tracking if requested.
...@@ -4053,9 +4054,9 @@ void CodeStubAssembler::InitializeJSObjectBodyWithSlackTracking( ...@@ -4053,9 +4054,9 @@ void CodeStubAssembler::InitializeJSObjectBodyWithSlackTracking(
// The object still has in-object slack therefore the |unsed_or_unused| // The object still has in-object slack therefore the |unsed_or_unused|
// field contain the "used" value. // field contain the "used" value.
TNode<UintPtrT> used_size = TimesTaggedSize(ChangeUint32ToWord( TNode<IntPtrT> used_size = Signed(TimesTaggedSize(ChangeUint32ToWord(
LoadObjectField(map, Map::kUsedOrUnusedInstanceSizeInWordsOffset, LoadObjectField(map, Map::kUsedOrUnusedInstanceSizeInWordsOffset,
MachineType::Uint8()))); MachineType::Uint8()))));
Comment("iInitialize filler fields"); Comment("iInitialize filler fields");
InitializeFieldsWithRoot(object, used_size, instance_size, InitializeFieldsWithRoot(object, used_size, instance_size,
...@@ -4084,19 +4085,19 @@ void CodeStubAssembler::InitializeJSObjectBodyWithSlackTracking( ...@@ -4084,19 +4085,19 @@ void CodeStubAssembler::InitializeJSObjectBodyWithSlackTracking(
BIND(&end); BIND(&end);
} }
void CodeStubAssembler::StoreFieldsNoWriteBarrier(Node* start_address, void CodeStubAssembler::StoreFieldsNoWriteBarrier(TNode<IntPtrT> start_address,
Node* end_address, TNode<IntPtrT> end_address,
Node* value) { TNode<Object> value) {
Comment("StoreFieldsNoWriteBarrier"); Comment("StoreFieldsNoWriteBarrier");
CSA_ASSERT(this, WordIsAligned(start_address, kTaggedSize)); CSA_ASSERT(this, WordIsAligned(start_address, kTaggedSize));
CSA_ASSERT(this, WordIsAligned(end_address, kTaggedSize)); CSA_ASSERT(this, WordIsAligned(end_address, kTaggedSize));
BuildFastLoop( BuildFastLoop<IntPtrT>(
start_address, end_address, start_address, end_address,
[this, value](Node* current) { [=](TNode<IntPtrT> current) {
UnsafeStoreNoWriteBarrier(MachineRepresentation::kTagged, current, UnsafeStoreNoWriteBarrier(MachineRepresentation::kTagged, current,
value); value);
}, },
kTaggedSize, INTPTR_PARAMETERS, IndexAdvanceMode::kPost); kTaggedSize, IndexAdvanceMode::kPost);
} }
TNode<BoolT> CodeStubAssembler::IsValidFastJSArrayCapacity( TNode<BoolT> CodeStubAssembler::IsValidFastJSArrayCapacity(
...@@ -5409,7 +5410,7 @@ void CodeStubAssembler::CopyStringCharacters(Node* from_string, Node* to_string, ...@@ -5409,7 +5410,7 @@ void CodeStubAssembler::CopyStringCharacters(Node* from_string, Node* to_string,
ElementOffsetFromIndex(to_index, to_kind, header_size); ElementOffsetFromIndex(to_index, to_kind, header_size);
TNode<IntPtrT> byte_count = TNode<IntPtrT> byte_count =
ElementOffsetFromIndex(character_count, from_kind); ElementOffsetFromIndex(character_count, from_kind);
TNode<WordT> limit_offset = IntPtrAdd(from_offset, byte_count); TNode<IntPtrT> limit_offset = IntPtrAdd(from_offset, byte_count);
// Prepare the fast loop // Prepare the fast loop
MachineType type = MachineType type =
...@@ -5427,9 +5428,9 @@ void CodeStubAssembler::CopyStringCharacters(Node* from_string, Node* to_string, ...@@ -5427,9 +5428,9 @@ void CodeStubAssembler::CopyStringCharacters(Node* from_string, Node* to_string,
(ToInt32Constant(from_index, &from_index_constant) && (ToInt32Constant(from_index, &from_index_constant) &&
ToInt32Constant(to_index, &to_index_constant) && ToInt32Constant(to_index, &to_index_constant) &&
from_index_constant == to_index_constant)); from_index_constant == to_index_constant));
BuildFastLoop( BuildFastLoop<IntPtrT>(
vars, from_offset, limit_offset, vars, from_offset, limit_offset,
[=, &current_to_offset](Node* offset) { [&](TNode<IntPtrT> offset) {
Node* value = Load(type, from_string, offset); Node* value = Load(type, from_string, offset);
StoreNoWriteBarrier(rep, to_string, StoreNoWriteBarrier(rep, to_string,
index_same ? offset : current_to_offset.value(), index_same ? offset : current_to_offset.value(),
...@@ -5438,7 +5439,7 @@ void CodeStubAssembler::CopyStringCharacters(Node* from_string, Node* to_string, ...@@ -5438,7 +5439,7 @@ void CodeStubAssembler::CopyStringCharacters(Node* from_string, Node* to_string,
Increment(&current_to_offset, to_increment); Increment(&current_to_offset, to_increment);
} }
}, },
from_increment, INTPTR_PARAMETERS, IndexAdvanceMode::kPost); from_increment, IndexAdvanceMode::kPost);
} }
Node* CodeStubAssembler::LoadElementAndPrepareForStore(Node* array, Node* CodeStubAssembler::LoadElementAndPrepareForStore(Node* array,
...@@ -8381,6 +8382,8 @@ void CodeStubAssembler::Increment(TVariable<TIndex>* variable, int value) { ...@@ -8381,6 +8382,8 @@ void CodeStubAssembler::Increment(TVariable<TIndex>* variable, int value) {
*variable = *variable =
IntPtrOrSmiAdd(variable->value(), IntPtrOrSmiConstant<TIndex>(value)); IntPtrOrSmiAdd(variable->value(), IntPtrOrSmiConstant<TIndex>(value));
} }
// Instantiate Increment for Smi and IntPtrT.
template void CodeStubAssembler::Increment<Smi>(TVariable<Smi>* variable, template void CodeStubAssembler::Increment<Smi>(TVariable<Smi>* variable,
int value); int value);
template void CodeStubAssembler::Increment<IntPtrT>( template void CodeStubAssembler::Increment<IntPtrT>(
...@@ -8945,16 +8948,16 @@ void CodeStubAssembler::LookupLinear(TNode<Name> unique_name, ...@@ -8945,16 +8948,16 @@ void CodeStubAssembler::LookupLinear(TNode<Name> unique_name,
first_inclusive, first_inclusive,
IntPtrMul(ChangeInt32ToIntPtr(number_of_valid_entries), factor)); IntPtrMul(ChangeInt32ToIntPtr(number_of_valid_entries), factor));
BuildFastLoop( BuildFastLoop<IntPtrT>(
last_exclusive, first_inclusive, last_exclusive, first_inclusive,
[=](SloppyTNode<IntPtrT> name_index) { [=](TNode<IntPtrT> name_index) {
TNode<MaybeObject> element = TNode<MaybeObject> element =
LoadArrayElement(array, Array::kHeaderSize, name_index); LoadArrayElement(array, Array::kHeaderSize, name_index);
TNode<Name> candidate_name = CAST(element); TNode<Name> candidate_name = CAST(element);
*var_name_index = name_index; *var_name_index = name_index;
GotoIf(TaggedEqual(candidate_name, unique_name), if_found); GotoIf(TaggedEqual(candidate_name, unique_name), if_found);
}, },
-Array::kEntrySize, INTPTR_PARAMETERS, IndexAdvanceMode::kPre); -Array::kEntrySize, IndexAdvanceMode::kPre);
Goto(if_not_found); Goto(if_not_found);
} }
...@@ -9142,12 +9145,9 @@ void CodeStubAssembler::ForEachEnumerableOwnProperty( ...@@ -9142,12 +9145,9 @@ void CodeStubAssembler::ForEachEnumerableOwnProperty(
Goto(&descriptor_array_loop); Goto(&descriptor_array_loop);
BIND(&descriptor_array_loop); BIND(&descriptor_array_loop);
BuildFastLoop( BuildFastLoop<IntPtrT>(
list, var_start_key_index.value(), var_end_key_index.value(), list, var_start_key_index.value(), var_end_key_index.value(),
[=, &var_stable, &var_has_symbol, &var_is_symbol_processing_loop, [&](TNode<IntPtrT> descriptor_key_index) {
&var_start_key_index, &var_end_key_index](Node* index) {
TNode<IntPtrT> descriptor_key_index =
TNode<IntPtrT>::UncheckedCast(index);
TNode<Name> next_key = TNode<Name> next_key =
LoadKeyByKeyIndex(descriptors, descriptor_key_index); LoadKeyByKeyIndex(descriptors, descriptor_key_index);
...@@ -9282,7 +9282,7 @@ void CodeStubAssembler::ForEachEnumerableOwnProperty( ...@@ -9282,7 +9282,7 @@ void CodeStubAssembler::ForEachEnumerableOwnProperty(
} }
BIND(&next_iteration); BIND(&next_iteration);
}, },
DescriptorArray::kEntrySize, INTPTR_PARAMETERS, IndexAdvanceMode::kPost); DescriptorArray::kEntrySize, IndexAdvanceMode::kPost);
if (mode == kEnumerationOrder) { if (mode == kEnumerationOrder) {
Label done(this); Label done(this);
...@@ -10245,6 +10245,7 @@ TNode<IntPtrT> CodeStubAssembler::ElementOffsetFromIndex( ...@@ -10245,6 +10245,7 @@ TNode<IntPtrT> CodeStubAssembler::ElementOffsetFromIndex(
return IntPtrAdd(IntPtrConstant(base_size), Signed(shifted_index)); return IntPtrAdd(IntPtrConstant(base_size), Signed(shifted_index));
} }
// Instantiate ElementOffsetFromIndex for Smi and IntPtrT.
template V8_EXPORT_PRIVATE TNode<IntPtrT> template V8_EXPORT_PRIVATE TNode<IntPtrT>
CodeStubAssembler::ElementOffsetFromIndex<Smi>(TNode<Smi> index_node, CodeStubAssembler::ElementOffsetFromIndex<Smi>(TNode<Smi> index_node,
ElementsKind kind, ElementsKind kind,
...@@ -11250,30 +11251,11 @@ TNode<Int32T> CodeStubAssembler::LoadElementsKind( ...@@ -11250,30 +11251,11 @@ TNode<Int32T> CodeStubAssembler::LoadElementsKind(
return elements_kind; return elements_kind;
} }
Node* CodeStubAssembler::BuildFastLoop(
const CodeStubAssembler::VariableList& vars, Node* start_index,
Node* end_index, const FastLoopBody& body, int increment,
ParameterMode parameter_mode, IndexAdvanceMode advance_mode) {
CSA_SLOW_ASSERT(this, MatchesParameterMode(start_index, parameter_mode));
CSA_SLOW_ASSERT(this, MatchesParameterMode(end_index, parameter_mode));
if (parameter_mode == SMI_PARAMETERS) {
return BuildFastLoop(vars, ReinterpretCast<Smi>(start_index),
ReinterpretCast<Smi>(end_index), body, increment,
advance_mode);
} else {
DCHECK_EQ(INTPTR_PARAMETERS, parameter_mode);
return BuildFastLoop(vars, ReinterpretCast<IntPtrT>(start_index),
ReinterpretCast<IntPtrT>(end_index), body, increment,
advance_mode);
}
}
template <typename TIndex> template <typename TIndex>
TNode<TIndex> CodeStubAssembler::BuildFastLoop(const VariableList& vars, TNode<TIndex> CodeStubAssembler::BuildFastLoop(const VariableList& vars,
TNode<TIndex> start_index, TNode<TIndex> start_index,
TNode<TIndex> end_index, TNode<TIndex> end_index,
const FastLoopBody& body, const FastLoopBody<TIndex>& body,
int increment, int increment,
IndexAdvanceMode advance_mode) { IndexAdvanceMode advance_mode) {
TVARIABLE(TIndex, var, start_index); TVARIABLE(TIndex, var, start_index);
...@@ -11312,6 +11294,16 @@ TNode<TIndex> CodeStubAssembler::BuildFastLoop(const VariableList& vars, ...@@ -11312,6 +11294,16 @@ TNode<TIndex> CodeStubAssembler::BuildFastLoop(const VariableList& vars,
return var.value(); return var.value();
} }
// Instantiate BuildFastLoop for Smi and IntPtrT.
template TNode<Smi> CodeStubAssembler::BuildFastLoop<Smi>(
const VariableList& vars, TNode<Smi> start_index, TNode<Smi> end_index,
const FastLoopBody<Smi>& body, int increment,
IndexAdvanceMode advance_mode);
template TNode<IntPtrT> CodeStubAssembler::BuildFastLoop<IntPtrT>(
const VariableList& vars, TNode<IntPtrT> start_index,
TNode<IntPtrT> end_index, const FastLoopBody<IntPtrT>& body, int increment,
IndexAdvanceMode advance_mode);
void CodeStubAssembler::BuildFastFixedArrayForEach( void CodeStubAssembler::BuildFastFixedArrayForEach(
const CodeStubAssembler::VariableList& vars, Node* fixed_array, const CodeStubAssembler::VariableList& vars, Node* fixed_array,
ElementsKind kind, Node* first_element_inclusive, ElementsKind kind, Node* first_element_inclusive,
...@@ -11333,17 +11325,15 @@ void CodeStubAssembler::BuildFastFixedArrayForEach( ...@@ -11333,17 +11325,15 @@ void CodeStubAssembler::BuildFastFixedArrayForEach(
if (direction == ForEachDirection::kForward) { if (direction == ForEachDirection::kForward) {
for (int i = first_val; i < last_val; ++i) { for (int i = first_val; i < last_val; ++i) {
TNode<IntPtrT> index = IntPtrConstant(i); TNode<IntPtrT> index = IntPtrConstant(i);
TNode<IntPtrT> offset = TNode<IntPtrT> offset = ElementOffsetFromIndex(
ElementOffsetFromIndex(index, kind, INTPTR_PARAMETERS, index, kind, FixedArray::kHeaderSize - kHeapObjectTag);
FixedArray::kHeaderSize - kHeapObjectTag);
body(fixed_array, offset); body(fixed_array, offset);
} }
} else { } else {
for (int i = last_val - 1; i >= first_val; --i) { for (int i = last_val - 1; i >= first_val; --i) {
TNode<IntPtrT> index = IntPtrConstant(i); TNode<IntPtrT> index = IntPtrConstant(i);
TNode<IntPtrT> offset = TNode<IntPtrT> offset = ElementOffsetFromIndex(
ElementOffsetFromIndex(index, kind, INTPTR_PARAMETERS, index, kind, FixedArray::kHeaderSize - kHeapObjectTag);
FixedArray::kHeaderSize - kHeapObjectTag);
body(fixed_array, offset); body(fixed_array, offset);
} }
} }
...@@ -11360,11 +11350,10 @@ void CodeStubAssembler::BuildFastFixedArrayForEach( ...@@ -11360,11 +11350,10 @@ void CodeStubAssembler::BuildFastFixedArrayForEach(
if (direction == ForEachDirection::kReverse) std::swap(start, limit); if (direction == ForEachDirection::kReverse) std::swap(start, limit);
int increment = IsDoubleElementsKind(kind) ? kDoubleSize : kTaggedSize; int increment = IsDoubleElementsKind(kind) ? kDoubleSize : kTaggedSize;
BuildFastLoop( BuildFastLoop<IntPtrT>(
vars, start, limit, vars, start, limit,
[fixed_array, &body](Node* offset) { body(fixed_array, offset); }, [&](TNode<IntPtrT> offset) { body(fixed_array, offset); },
direction == ForEachDirection::kReverse ? -increment : increment, direction == ForEachDirection::kReverse ? -increment : increment,
INTPTR_PARAMETERS,
direction == ForEachDirection::kReverse ? IndexAdvanceMode::kPre direction == ForEachDirection::kReverse ? IndexAdvanceMode::kPre
: IndexAdvanceMode::kPost); : IndexAdvanceMode::kPost);
} }
...@@ -11375,22 +11364,21 @@ void CodeStubAssembler::GotoIfFixedArraySizeDoesntFitInNewSpace( ...@@ -11375,22 +11364,21 @@ void CodeStubAssembler::GotoIfFixedArraySizeDoesntFitInNewSpace(
doesnt_fit); doesnt_fit);
} }
void CodeStubAssembler::InitializeFieldsWithRoot(Node* object, void CodeStubAssembler::InitializeFieldsWithRoot(TNode<HeapObject> object,
Node* start_offset, TNode<IntPtrT> start_offset,
Node* end_offset, TNode<IntPtrT> end_offset,
RootIndex root_index) { RootIndex root_index) {
CSA_SLOW_ASSERT(this, TaggedIsNotSmi(object)); CSA_SLOW_ASSERT(this, TaggedIsNotSmi(object));
start_offset = IntPtrAdd(start_offset, IntPtrConstant(-kHeapObjectTag)); start_offset = IntPtrAdd(start_offset, IntPtrConstant(-kHeapObjectTag));
end_offset = IntPtrAdd(end_offset, IntPtrConstant(-kHeapObjectTag)); end_offset = IntPtrAdd(end_offset, IntPtrConstant(-kHeapObjectTag));
TNode<Object> root_value = LoadRoot(root_index); TNode<Object> root_value = LoadRoot(root_index);
BuildFastLoop( BuildFastLoop<IntPtrT>(
end_offset, start_offset, end_offset, start_offset,
[this, object, root_value](Node* current) { [=](TNode<IntPtrT> current) {
StoreNoWriteBarrier(MachineRepresentation::kTagged, object, current, StoreNoWriteBarrier(MachineRepresentation::kTagged, object, current,
root_value); root_value);
}, },
-kTaggedSize, INTPTR_PARAMETERS, -kTaggedSize, CodeStubAssembler::IndexAdvanceMode::kPre);
CodeStubAssembler::IndexAdvanceMode::kPre);
} }
void CodeStubAssembler::BranchIfNumberRelationalComparison( void CodeStubAssembler::BranchIfNumberRelationalComparison(
...@@ -13638,14 +13626,13 @@ void CodeStubArguments::ForEach( ...@@ -13638,14 +13626,13 @@ void CodeStubArguments::ForEach(
TNode<IntPtrT> end = assembler_->IntPtrSub( TNode<IntPtrT> end = assembler_->IntPtrSub(
assembler_->UncheckedCast<IntPtrT>(base_), assembler_->UncheckedCast<IntPtrT>(base_),
assembler_->ElementOffsetFromIndex(last, SYSTEM_POINTER_ELEMENTS, mode)); assembler_->ElementOffsetFromIndex(last, SYSTEM_POINTER_ELEMENTS, mode));
assembler_->BuildFastLoop( assembler_->BuildFastLoop<IntPtrT>(
vars, start, end, vars, start, end,
[this, &body](Node* current) { [&](TNode<IntPtrT> current) {
Node* arg = assembler_->Load(MachineType::AnyTagged(), current); Node* arg = assembler_->Load(MachineType::AnyTagged(), current);
body(arg); body(arg);
}, },
-kSystemPointerSize, CodeStubAssembler::INTPTR_PARAMETERS, -kSystemPointerSize, CodeStubAssembler::IndexAdvanceMode::kPost);
CodeStubAssembler::IndexAdvanceMode::kPost);
} }
void CodeStubArguments::PopAndReturn(Node* value) { void CodeStubArguments::PopAndReturn(Node* value) {
......
...@@ -1663,8 +1663,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler ...@@ -1663,8 +1663,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
void BuildAppendJSArray(ElementsKind kind, Node* array, Node* value, void BuildAppendJSArray(ElementsKind kind, Node* array, Node* value,
Label* bailout); Label* bailout);
void StoreFieldsNoWriteBarrier(Node* start_address, Node* end_address, void StoreFieldsNoWriteBarrier(TNode<IntPtrT> start_address,
Node* value); TNode<IntPtrT> end_address,
TNode<Object> value);
Node* AllocateCellWithValue(Node* value, Node* AllocateCellWithValue(Node* value,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER); WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
...@@ -1763,7 +1764,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler ...@@ -1763,7 +1764,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<CollectionType> AllocateSmallOrderedHashTable(TNode<IntPtrT> capacity); TNode<CollectionType> AllocateSmallOrderedHashTable(TNode<IntPtrT> capacity);
Node* AllocateStruct(Node* map, AllocationFlags flags = kNone); Node* AllocateStruct(Node* map, AllocationFlags flags = kNone);
void InitializeStructBody(Node* object, Node* map, Node* size, void InitializeStructBody(TNode<HeapObject> object, TNode<IntPtrT> size,
int start_offset = Struct::kHeaderSize); int start_offset = Struct::kHeaderSize);
TNode<JSObject> AllocateJSObjectFromMap( TNode<JSObject> AllocateJSObjectFromMap(
...@@ -1772,14 +1773,17 @@ class V8_EXPORT_PRIVATE CodeStubAssembler ...@@ -1772,14 +1773,17 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
SlackTrackingMode slack_tracking_mode = kNoSlackTracking); SlackTrackingMode slack_tracking_mode = kNoSlackTracking);
void InitializeJSObjectFromMap( void InitializeJSObjectFromMap(
Node* object, Node* map, Node* instance_size, Node* properties = nullptr, SloppyTNode<HeapObject> object, SloppyTNode<Map> map,
SloppyTNode<IntPtrT> instance_size, Node* properties = nullptr,
Node* elements = nullptr, Node* elements = nullptr,
SlackTrackingMode slack_tracking_mode = kNoSlackTracking); SlackTrackingMode slack_tracking_mode = kNoSlackTracking);
void InitializeJSObjectBodyWithSlackTracking(Node* object, Node* map, void InitializeJSObjectBodyWithSlackTracking(
Node* instance_size); SloppyTNode<HeapObject> object, SloppyTNode<Map> map,
SloppyTNode<IntPtrT> instance_size);
void InitializeJSObjectBodyNoSlackTracking( void InitializeJSObjectBodyNoSlackTracking(
Node* object, Node* map, Node* instance_size, SloppyTNode<HeapObject> object, SloppyTNode<Map> map,
SloppyTNode<IntPtrT> instance_size,
int start_offset = JSObject::kHeaderSize); int start_offset = JSObject::kHeaderSize);
TNode<BoolT> IsValidFastJSArrayCapacity(Node* capacity, TNode<BoolT> IsValidFastJSArrayCapacity(Node* capacity,
...@@ -3310,39 +3314,24 @@ class V8_EXPORT_PRIVATE CodeStubAssembler ...@@ -3310,39 +3314,24 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
enum class IndexAdvanceMode { kPre, kPost }; enum class IndexAdvanceMode { kPre, kPost };
// TODO(v8:9708): typify index parameter. template <typename TIndex>
using FastLoopBody = std::function<void(Node* index)>; using FastLoopBody = std::function<void(TNode<TIndex> index)>;
template <typename TIndex> template <typename TIndex>
TNode<TIndex> BuildFastLoop( TNode<TIndex> BuildFastLoop(
const VariableList& var_list, TNode<TIndex> start_index, const VariableList& var_list, TNode<TIndex> start_index,
TNode<TIndex> end_index, const FastLoopBody& body, int increment, TNode<TIndex> end_index, const FastLoopBody<TIndex>& body, int increment,
IndexAdvanceMode advance_mode = IndexAdvanceMode::kPre); IndexAdvanceMode advance_mode = IndexAdvanceMode::kPre);
template <typename TIndex> template <typename TIndex>
TNode<TIndex> BuildFastLoop( TNode<TIndex> BuildFastLoop(
TNode<TIndex> start_index, TNode<TIndex> end_index, TNode<TIndex> start_index, TNode<TIndex> end_index,
const FastLoopBody& body, int increment, const FastLoopBody<TIndex>& body, int increment,
IndexAdvanceMode advance_mode = IndexAdvanceMode::kPre) { IndexAdvanceMode advance_mode = IndexAdvanceMode::kPre) {
return BuildFastLoop(VariableList(0, zone()), start_index, end_index, body, return BuildFastLoop(VariableList(0, zone()), start_index, end_index, body,
increment, advance_mode); increment, advance_mode);
} }
// TODO(v8:9708): remove once all uses are ported.
Node* BuildFastLoop(const VariableList& var_list, Node* start_index,
Node* end_index, const FastLoopBody& body, int increment,
ParameterMode parameter_mode,
IndexAdvanceMode advance_mode = IndexAdvanceMode::kPre);
// TODO(v8:9708): remove once all uses are ported.
Node* BuildFastLoop(Node* start_index, Node* end_index,
const FastLoopBody& body, int increment,
ParameterMode parameter_mode,
IndexAdvanceMode advance_mode = IndexAdvanceMode::kPre) {
return BuildFastLoop(VariableList(0, zone()), start_index, end_index, body,
increment, parameter_mode, advance_mode);
}
enum class ForEachDirection { kForward, kReverse }; enum class ForEachDirection { kForward, kReverse };
using FastFixedArrayForEachBody = using FastFixedArrayForEachBody =
...@@ -3387,8 +3376,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler ...@@ -3387,8 +3376,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
Label* doesnt_fit, int base_size, Label* doesnt_fit, int base_size,
ParameterMode mode); ParameterMode mode);
void InitializeFieldsWithRoot(Node* object, Node* start_offset, void InitializeFieldsWithRoot(TNode<HeapObject> object,
Node* end_offset, RootIndex root); TNode<IntPtrT> start_offset,
TNode<IntPtrT> end_offset, RootIndex root);
Node* RelationalComparison(Operation op, SloppyTNode<Object> left, Node* RelationalComparison(Operation op, SloppyTNode<Object> left,
SloppyTNode<Object> right, SloppyTNode<Object> right,
......
...@@ -3882,30 +3882,28 @@ void AccessorAssembler::GenerateCloneObjectIC() { ...@@ -3882,30 +3882,28 @@ void AccessorAssembler::GenerateCloneObjectIC() {
// Just copy the fields as raw data (pretending that there are no mutable // Just copy the fields as raw data (pretending that there are no mutable
// HeapNumbers). This doesn't need write barriers. // HeapNumbers). This doesn't need write barriers.
BuildFastLoop( BuildFastLoop<IntPtrT>(
source_start, source_size, source_start, source_size,
[=](Node* field_index) { [=](TNode<IntPtrT> field_index) {
TNode<IntPtrT> field_offset = TNode<IntPtrT> field_offset = TimesTaggedSize(field_index);
TimesTaggedSize(UncheckedCast<IntPtrT>(field_index));
TNode<TaggedT> field = TNode<TaggedT> field =
LoadObjectField<TaggedT>(CAST(source), field_offset); LoadObjectField<TaggedT>(CAST(source), field_offset);
TNode<IntPtrT> result_offset = TNode<IntPtrT> result_offset =
IntPtrAdd(field_offset, field_offset_difference); IntPtrAdd(field_offset, field_offset_difference);
StoreObjectFieldNoWriteBarrier(object, result_offset, field); StoreObjectFieldNoWriteBarrier(object, result_offset, field);
}, },
1, INTPTR_PARAMETERS, IndexAdvanceMode::kPost); 1, IndexAdvanceMode::kPost);
// If mutable HeapNumbers can occur, we need to go through the {object} // If mutable HeapNumbers can occur, we need to go through the {object}
// again here and properly clone them. We use a second loop here to // again here and properly clone them. We use a second loop here to
// ensure that the GC (and heap verifier) always sees properly initialized // ensure that the GC (and heap verifier) always sees properly initialized
// objects, i.e. never hits undefined values in double fields. // objects, i.e. never hits undefined values in double fields.
if (!FLAG_unbox_double_fields) { if (!FLAG_unbox_double_fields) {
BuildFastLoop( BuildFastLoop<IntPtrT>(
source_start, source_size, source_start, source_size,
[=](Node* field_index) { [=](TNode<IntPtrT> field_index) {
TNode<IntPtrT> result_offset = TNode<IntPtrT> result_offset = IntPtrAdd(
IntPtrAdd(TimesTaggedSize(UncheckedCast<IntPtrT>(field_index)), TimesTaggedSize(field_index), field_offset_difference);
field_offset_difference);
TNode<Object> field = LoadObjectField(object, result_offset); TNode<Object> field = LoadObjectField(object, result_offset);
Label if_done(this), if_mutableheapnumber(this, Label::kDeferred); Label if_done(this), if_mutableheapnumber(this, Label::kDeferred);
GotoIf(TaggedIsSmi(field), &if_done); GotoIf(TaggedIsSmi(field), &if_done);
...@@ -3919,7 +3917,7 @@ void AccessorAssembler::GenerateCloneObjectIC() { ...@@ -3919,7 +3917,7 @@ void AccessorAssembler::GenerateCloneObjectIC() {
} }
BIND(&if_done); BIND(&if_done);
}, },
1, INTPTR_PARAMETERS, IndexAdvanceMode::kPost); 1, IndexAdvanceMode::kPost);
} }
Return(object); Return(object);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment