Commit 8a1a2867 authored by Igor Sheludko's avatar Igor Sheludko Committed by Commit Bot

[csa] Remove ParameterMode from CSA::BuildFastLoop

Bug: v8:9708
Change-Id: I305cc007a4e7302c8587b999cbb11f23ced4cfd3
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1800579
Commit-Queue: Igor Sheludko <ishell@chromium.org>
Reviewed-by: 's avatarLeszek Swirski <leszeks@chromium.org>
Cr-Commit-Position: refs/heads/master@{#63735}
parent 6cf125a9
......@@ -262,16 +262,17 @@ Node* ArgumentsBuiltinsAssembler::EmitFastNewSloppyArguments(Node* context,
TVARIABLE(IntPtrT, current_argument,
Signed(arguments.AtIndexPtr(info.argument_count, mode)));
VariableList var_list1({&current_argument}, zone());
mapped_offset = UncheckedCast<IntPtrT>(BuildFastLoop(
mapped_offset = BuildFastLoop<IntPtrT>(
var_list1, argument_offset, mapped_offset,
[this, elements, &current_argument](Node* offset) {
[&](TNode<IntPtrT> offset) {
Increment(&current_argument, kSystemPointerSize);
TNode<Object> arg = LoadBufferObject(
ReinterpretCast<RawPtrT>(current_argument.value()), 0);
StoreNoWriteBarrier(MachineRepresentation::kTagged, elements, offset,
arg);
return;
},
-kTaggedSize, INTPTR_PARAMETERS));
-kTaggedSize);
// Copy the parameter slots and the holes in the arguments.
// We need to fill in mapped_count slots. They index the context,
......@@ -295,9 +296,9 @@ Node* ArgumentsBuiltinsAssembler::EmitFastNewSloppyArguments(Node* context,
IntPtrConstant(kParameterMapHeaderSize - FixedArray::kHeaderSize));
TNode<IntPtrT> zero_offset = ElementOffsetFromIndex(
zero, PACKED_ELEMENTS, mode, FixedArray::kHeaderSize - kHeapObjectTag);
BuildFastLoop(
BuildFastLoop<IntPtrT>(
var_list2, mapped_offset, zero_offset,
[=, &context_index](Node* offset) {
[&](TNode<IntPtrT> offset) {
StoreNoWriteBarrier(MachineRepresentation::kTagged, elements, offset,
the_hole);
StoreNoWriteBarrier(MachineRepresentation::kTagged,
......@@ -305,7 +306,7 @@ Node* ArgumentsBuiltinsAssembler::EmitFastNewSloppyArguments(Node* context,
BIntToSmi(context_index.value()));
Increment(&context_index);
},
-kTaggedSize, INTPTR_PARAMETERS);
-kTaggedSize);
result.Bind(argument_object);
Goto(&done);
......
......@@ -224,16 +224,8 @@ void ArrayBuiltinsAssembler::VisitAllTypedArrayElements(
ForEachDirection direction, TNode<JSTypedArray> typed_array) {
VariableList list({&a_, &k_, &to_}, zone());
FastLoopBody body = [&](Node* index) {
GotoIf(IsDetachedBuffer(CAST(array_buffer)), detached);
TNode<RawPtrT> data_ptr = LoadJSTypedArrayBackingStore(typed_array);
auto value = LoadFixedTypedArrayElementAsTagged(
data_ptr, index, source_elements_kind_, SMI_PARAMETERS);
k_.Bind(index);
a_.Bind(processor(this, value, index));
};
Node* start = SmiConstant(0);
Node* end = len_;
TNode<Smi> start = SmiConstant(0);
TNode<Smi> end = CAST(len_);
IndexAdvanceMode advance_mode = IndexAdvanceMode::kPost;
int incr = 1;
if (direction == ForEachDirection::kReverse) {
......@@ -241,8 +233,17 @@ void ArrayBuiltinsAssembler::VisitAllTypedArrayElements(
advance_mode = IndexAdvanceMode::kPre;
incr = -1;
}
BuildFastLoop(list, start, end, body, incr, ParameterMode::SMI_PARAMETERS,
advance_mode);
BuildFastLoop<Smi>(
list, start, end,
[&](TNode<Smi> index) {
GotoIf(IsDetachedBuffer(CAST(array_buffer)), detached);
TNode<RawPtrT> data_ptr = LoadJSTypedArrayBackingStore(typed_array);
TNode<Object> value = LoadFixedTypedArrayElementAsTagged(
data_ptr, index, source_elements_kind_, SMI_PARAMETERS);
k_.Bind(index);
a_.Bind(processor(this, value, index));
},
incr, advance_mode);
}
// Perform ArraySpeciesCreate (ES6 #sec-arrayspeciescreate).
......
......@@ -259,7 +259,7 @@ void BaseCollectionsAssembler::AddConstructorEntriesFromFastJSArray(
&if_doubles);
BIND(&if_smiorobjects);
{
auto set_entry = [&](Node* index) {
auto set_entry = [&](TNode<IntPtrT> index) {
TNode<Object> element = LoadAndNormalizeFixedArrayElement(
CAST(elements), UncheckedCast<IntPtrT>(index));
AddConstructorEntry(variant, context, collection, add_func, element,
......@@ -270,8 +270,8 @@ void BaseCollectionsAssembler::AddConstructorEntriesFromFastJSArray(
// elements, a fast loop is used. This assumes that adding an element
// to the collection does not call user code that could mutate the elements
// or collection.
BuildFastLoop(IntPtrConstant(0), length, set_entry, 1,
ParameterMode::INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
BuildFastLoop<IntPtrT>(IntPtrConstant(0), length, set_entry, 1,
IndexAdvanceMode::kPost);
Goto(&exit);
}
BIND(&if_doubles);
......@@ -286,13 +286,13 @@ void BaseCollectionsAssembler::AddConstructorEntriesFromFastJSArray(
element);
} else {
DCHECK(variant == kSet || variant == kWeakSet);
auto set_entry = [&](Node* index) {
auto set_entry = [&](TNode<IntPtrT> index) {
TNode<Object> entry = LoadAndNormalizeFixedDoubleArrayElement(
elements, UncheckedCast<IntPtrT>(index));
AddConstructorEntry(variant, context, collection, add_func, entry);
};
BuildFastLoop(IntPtrConstant(0), length, set_entry, 1,
ParameterMode::INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
BuildFastLoop<IntPtrT>(IntPtrConstant(0), length, set_entry, 1,
IndexAdvanceMode::kPost);
Goto(&exit);
}
}
......
......@@ -263,13 +263,12 @@ TNode<Context> ConstructorBuiltinsAssembler::EmitFastNewFunctionContext(
TNode<Oddball> undefined = UndefinedConstant();
TNode<IntPtrT> start_offset = IntPtrConstant(Context::kTodoHeaderSize);
CodeStubAssembler::VariableList vars(0, zone());
BuildFastLoop(
BuildFastLoop<IntPtrT>(
vars, start_offset, size,
[=](SloppyTNode<IntPtrT> offset) {
StoreObjectFieldNoWriteBarrier(
function_context, UncheckedCast<IntPtrT>(offset), undefined);
[=](TNode<IntPtrT> offset) {
StoreObjectFieldNoWriteBarrier(function_context, offset, undefined);
},
kTaggedSize, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
kTaggedSize, IndexAdvanceMode::kPost);
return function_context;
}
......@@ -571,18 +570,18 @@ TNode<HeapObject> ConstructorBuiltinsAssembler::EmitCreateShallowObjectLiteral(
BIND(&continue_with_write_barrier);
{
Comment("Copy in-object properties slow");
BuildFastLoop(
BuildFastLoop<IntPtrT>(
offset.value(), instance_size,
[=](SloppyTNode<IntPtrT> offset) {
[=](TNode<IntPtrT> offset) {
// TODO(ishell): value decompression is not necessary here.
TNode<Object> field = LoadObjectField(boilerplate, offset);
StoreObjectFieldNoWriteBarrier(copy, offset, field);
},
kTaggedSize, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
kTaggedSize, IndexAdvanceMode::kPost);
Comment("Copy mutable HeapNumber values");
BuildFastLoop(
BuildFastLoop<IntPtrT>(
offset.value(), instance_size,
[=](SloppyTNode<IntPtrT> offset) {
[=](TNode<IntPtrT> offset) {
TNode<Object> field = LoadObjectField(copy, offset);
Label copy_heap_number(this, Label::kDeferred), continue_loop(this);
// We only have to clone complex field values.
......@@ -601,7 +600,7 @@ TNode<HeapObject> ConstructorBuiltinsAssembler::EmitCreateShallowObjectLiteral(
}
BIND(&continue_loop);
},
kTaggedSize, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
kTaggedSize, IndexAdvanceMode::kPost);
Goto(&done_init);
}
BIND(&done_init);
......
......@@ -61,7 +61,8 @@ TF_BUILTIN(StringToLowerCaseIntl, IntlBuiltinsAssembler) {
Node* const dst_ptr = PointerToSeqStringData(dst);
TVARIABLE(IntPtrT, var_cursor, IntPtrConstant(0));
TNode<RawPtrT> const start_address = to_direct.PointerToData(&call_c);
TNode<IntPtrT> const start_address =
ReinterpretCast<IntPtrT>(to_direct.PointerToData(&call_c));
TNode<IntPtrT> const end_address =
Signed(IntPtrAdd(start_address, ChangeUint32ToWord(length)));
......@@ -71,9 +72,9 @@ TF_BUILTIN(StringToLowerCaseIntl, IntlBuiltinsAssembler) {
VARIABLE(var_did_change, MachineRepresentation::kWord32, Int32Constant(0));
VariableList push_vars({&var_cursor, &var_did_change}, zone());
BuildFastLoop(
BuildFastLoop<IntPtrT>(
push_vars, start_address, end_address,
[=, &var_cursor, &var_did_change](Node* current) {
[&](TNode<IntPtrT> current) {
TNode<Uint8T> c = Load<Uint8T>(current);
TNode<Uint8T> lower =
Load<Uint8T>(to_lower_table_addr, ChangeInt32ToIntPtr(c));
......@@ -85,7 +86,7 @@ TF_BUILTIN(StringToLowerCaseIntl, IntlBuiltinsAssembler) {
Increment(&var_cursor);
},
kCharSize, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
kCharSize, IndexAdvanceMode::kPost);
// Return the original string if it remained unchanged in order to preserve
// e.g. internalization and private symbols (such as the preserved object
......
......@@ -679,9 +679,9 @@ TNode<HeapObject> RegExpBuiltinsAssembler::RegExpExecInternal(
TVARIABLE(IntPtrT, var_to_offset, to_offset);
VariableList vars({&var_to_offset}, zone());
BuildFastLoop(
BuildFastLoop<IntPtrT>(
vars, IntPtrZero(), limit_offset,
[=, &var_to_offset](Node* offset) {
[&](TNode<IntPtrT> offset) {
TNode<Int32T> value = UncheckedCast<Int32T>(Load(
MachineType::Int32(), static_offsets_vector_address, offset));
TNode<Smi> smi_value = SmiFromInt32(value);
......@@ -689,7 +689,7 @@ TNode<HeapObject> RegExpBuiltinsAssembler::RegExpExecInternal(
var_to_offset.value(), smi_value);
Increment(&var_to_offset, kTaggedSize);
},
kInt32Size, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
kInt32Size, IndexAdvanceMode::kPost);
}
var_result = match_info;
......
......@@ -1366,9 +1366,9 @@ TNode<JSArray> StringBuiltinsAssembler::StringToArray(
TNode<IntPtrT> string_data_offset = to_direct.offset();
TNode<FixedArray> cache = SingleCharacterStringCacheConstant();
BuildFastLoop(
BuildFastLoop<IntPtrT>(
IntPtrConstant(0), length,
[&](Node* index) {
[&](TNode<IntPtrT> index) {
// TODO(jkummerow): Implement a CSA version of DisallowHeapAllocation
// and use that to guard ToDirectStringAssembler.PointerToData().
CSA_ASSERT(this, WordEqual(to_direct.PointerToData(&call_runtime),
......@@ -1385,7 +1385,7 @@ TNode<JSArray> StringBuiltinsAssembler::StringToArray(
StoreFixedArrayElement(elements, index, entry);
},
1, ParameterMode::INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
1, IndexAdvanceMode::kPost);
TNode<Map> array_map = LoadJSArrayElementsMap(PACKED_ELEMENTS, context);
result_array = AllocateJSArray(array_map, elements, length_smi);
......
......@@ -735,9 +735,9 @@ TF_BUILTIN(TypedArrayOf, TypedArrayBuiltinsAssembler) {
DispatchTypedArrayByElementsKind(
elements_kind,
[&](ElementsKind kind, int size, int typed_array_fun_index) {
BuildFastLoop(
BuildFastLoop<IntPtrT>(
IntPtrConstant(0), length,
[&](Node* index) {
[&](TNode<IntPtrT> index) {
TNode<Object> item = args.AtIndex(index, INTPTR_PARAMETERS);
Node* value =
PrepareValueForWriteToTypedArray(item, kind, context);
......@@ -755,7 +755,7 @@ TF_BUILTIN(TypedArrayOf, TypedArrayBuiltinsAssembler) {
StoreElement(backing_store, kind, index, value,
INTPTR_PARAMETERS);
},
1, ParameterMode::INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
1, IndexAdvanceMode::kPost);
});
// 8. Return newObj.
......@@ -948,9 +948,9 @@ TF_BUILTIN(TypedArrayFrom, TypedArrayBuiltinsAssembler) {
TNode<Int32T> elements_kind = LoadElementsKind(target_obj.value());
// 7e/13 : Copy the elements
BuildFastLoop(
BuildFastLoop<Smi>(
SmiConstant(0), final_length.value(),
[&](Node* index) {
[&](TNode<Smi> index) {
TNode<Object> const k_value =
GetProperty(context, final_source.value(), index);
......@@ -978,7 +978,7 @@ TF_BUILTIN(TypedArrayFrom, TypedArrayBuiltinsAssembler) {
SMI_PARAMETERS);
});
},
1, ParameterMode::SMI_PARAMETERS, IndexAdvanceMode::kPost);
1, IndexAdvanceMode::kPost);
args.PopAndReturn(target_obj.value());
......
......@@ -3947,21 +3947,20 @@ Node* CodeStubAssembler::AllocateStruct(Node* map, AllocationFlags flags) {
TNode<IntPtrT> size = TimesTaggedSize(LoadMapInstanceSizeInWords(map));
TNode<HeapObject> object = Allocate(size, flags);
StoreMapNoWriteBarrier(object, map);
InitializeStructBody(object, map, size, Struct::kHeaderSize);
InitializeStructBody(object, size, Struct::kHeaderSize);
return object;
}
void CodeStubAssembler::InitializeStructBody(Node* object, Node* map,
Node* size, int start_offset) {
CSA_SLOW_ASSERT(this, IsMap(map));
void CodeStubAssembler::InitializeStructBody(TNode<HeapObject> object,
TNode<IntPtrT> size,
int start_offset) {
Comment("InitializeStructBody");
TNode<Oddball> filler = UndefinedConstant();
// Calculate the untagged field addresses.
object = BitcastTaggedToWord(object);
TNode<WordT> start_address =
IntPtrAdd(object, IntPtrConstant(start_offset - kHeapObjectTag));
TNode<WordT> end_address =
IntPtrSub(IntPtrAdd(object, size), IntPtrConstant(kHeapObjectTag));
TNode<IntPtrT> start_address =
IntPtrAdd(BitcastTaggedToWord(object),
IntPtrConstant(start_offset - kHeapObjectTag));
TNode<IntPtrT> end_address = IntPtrAdd(start_address, size);
StoreFieldsNoWriteBarrier(start_address, end_address, filler);
}
......@@ -3983,8 +3982,9 @@ TNode<JSObject> CodeStubAssembler::AllocateJSObjectFromMap(
}
void CodeStubAssembler::InitializeJSObjectFromMap(
Node* object, Node* map, Node* instance_size, Node* properties,
Node* elements, SlackTrackingMode slack_tracking_mode) {
SloppyTNode<HeapObject> object, SloppyTNode<Map> map,
SloppyTNode<IntPtrT> instance_size, Node* properties, Node* elements,
SlackTrackingMode slack_tracking_mode) {
CSA_SLOW_ASSERT(this, IsMap(map));
// This helper assumes that the object is in new-space, as guarded by the
// check in AllocatedJSObjectFromMap.
......@@ -4015,7 +4015,8 @@ void CodeStubAssembler::InitializeJSObjectFromMap(
}
void CodeStubAssembler::InitializeJSObjectBodyNoSlackTracking(
Node* object, Node* map, Node* instance_size, int start_offset) {
SloppyTNode<HeapObject> object, SloppyTNode<Map> map,
SloppyTNode<IntPtrT> instance_size, int start_offset) {
STATIC_ASSERT(Map::kNoSlackTracking == 0);
CSA_ASSERT(
this, IsClearWord32<Map::ConstructionCounterBits>(LoadMapBitField3(map)));
......@@ -4024,8 +4025,8 @@ void CodeStubAssembler::InitializeJSObjectBodyNoSlackTracking(
}
void CodeStubAssembler::InitializeJSObjectBodyWithSlackTracking(
Node* object, Node* map, Node* instance_size) {
CSA_SLOW_ASSERT(this, IsMap(map));
SloppyTNode<HeapObject> object, SloppyTNode<Map> map,
SloppyTNode<IntPtrT> instance_size) {
Comment("InitializeJSObjectBodyNoSlackTracking");
// Perform in-object slack tracking if requested.
......@@ -4053,9 +4054,9 @@ void CodeStubAssembler::InitializeJSObjectBodyWithSlackTracking(
// The object still has in-object slack therefore the |unsed_or_unused|
// field contain the "used" value.
TNode<UintPtrT> used_size = TimesTaggedSize(ChangeUint32ToWord(
TNode<IntPtrT> used_size = Signed(TimesTaggedSize(ChangeUint32ToWord(
LoadObjectField(map, Map::kUsedOrUnusedInstanceSizeInWordsOffset,
MachineType::Uint8())));
MachineType::Uint8()))));
Comment("iInitialize filler fields");
InitializeFieldsWithRoot(object, used_size, instance_size,
......@@ -4084,19 +4085,19 @@ void CodeStubAssembler::InitializeJSObjectBodyWithSlackTracking(
BIND(&end);
}
void CodeStubAssembler::StoreFieldsNoWriteBarrier(Node* start_address,
Node* end_address,
Node* value) {
void CodeStubAssembler::StoreFieldsNoWriteBarrier(TNode<IntPtrT> start_address,
TNode<IntPtrT> end_address,
TNode<Object> value) {
Comment("StoreFieldsNoWriteBarrier");
CSA_ASSERT(this, WordIsAligned(start_address, kTaggedSize));
CSA_ASSERT(this, WordIsAligned(end_address, kTaggedSize));
BuildFastLoop(
BuildFastLoop<IntPtrT>(
start_address, end_address,
[this, value](Node* current) {
[=](TNode<IntPtrT> current) {
UnsafeStoreNoWriteBarrier(MachineRepresentation::kTagged, current,
value);
},
kTaggedSize, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
kTaggedSize, IndexAdvanceMode::kPost);
}
TNode<BoolT> CodeStubAssembler::IsValidFastJSArrayCapacity(
......@@ -5409,7 +5410,7 @@ void CodeStubAssembler::CopyStringCharacters(Node* from_string, Node* to_string,
ElementOffsetFromIndex(to_index, to_kind, header_size);
TNode<IntPtrT> byte_count =
ElementOffsetFromIndex(character_count, from_kind);
TNode<WordT> limit_offset = IntPtrAdd(from_offset, byte_count);
TNode<IntPtrT> limit_offset = IntPtrAdd(from_offset, byte_count);
// Prepare the fast loop
MachineType type =
......@@ -5427,9 +5428,9 @@ void CodeStubAssembler::CopyStringCharacters(Node* from_string, Node* to_string,
(ToInt32Constant(from_index, &from_index_constant) &&
ToInt32Constant(to_index, &to_index_constant) &&
from_index_constant == to_index_constant));
BuildFastLoop(
BuildFastLoop<IntPtrT>(
vars, from_offset, limit_offset,
[=, &current_to_offset](Node* offset) {
[&](TNode<IntPtrT> offset) {
Node* value = Load(type, from_string, offset);
StoreNoWriteBarrier(rep, to_string,
index_same ? offset : current_to_offset.value(),
......@@ -5438,7 +5439,7 @@ void CodeStubAssembler::CopyStringCharacters(Node* from_string, Node* to_string,
Increment(&current_to_offset, to_increment);
}
},
from_increment, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
from_increment, IndexAdvanceMode::kPost);
}
Node* CodeStubAssembler::LoadElementAndPrepareForStore(Node* array,
......@@ -8381,6 +8382,8 @@ void CodeStubAssembler::Increment(TVariable<TIndex>* variable, int value) {
*variable =
IntPtrOrSmiAdd(variable->value(), IntPtrOrSmiConstant<TIndex>(value));
}
// Instantiate Increment for Smi and IntPtrT.
template void CodeStubAssembler::Increment<Smi>(TVariable<Smi>* variable,
int value);
template void CodeStubAssembler::Increment<IntPtrT>(
......@@ -8945,16 +8948,16 @@ void CodeStubAssembler::LookupLinear(TNode<Name> unique_name,
first_inclusive,
IntPtrMul(ChangeInt32ToIntPtr(number_of_valid_entries), factor));
BuildFastLoop(
BuildFastLoop<IntPtrT>(
last_exclusive, first_inclusive,
[=](SloppyTNode<IntPtrT> name_index) {
[=](TNode<IntPtrT> name_index) {
TNode<MaybeObject> element =
LoadArrayElement(array, Array::kHeaderSize, name_index);
TNode<Name> candidate_name = CAST(element);
*var_name_index = name_index;
GotoIf(TaggedEqual(candidate_name, unique_name), if_found);
},
-Array::kEntrySize, INTPTR_PARAMETERS, IndexAdvanceMode::kPre);
-Array::kEntrySize, IndexAdvanceMode::kPre);
Goto(if_not_found);
}
......@@ -9142,12 +9145,9 @@ void CodeStubAssembler::ForEachEnumerableOwnProperty(
Goto(&descriptor_array_loop);
BIND(&descriptor_array_loop);
BuildFastLoop(
BuildFastLoop<IntPtrT>(
list, var_start_key_index.value(), var_end_key_index.value(),
[=, &var_stable, &var_has_symbol, &var_is_symbol_processing_loop,
&var_start_key_index, &var_end_key_index](Node* index) {
TNode<IntPtrT> descriptor_key_index =
TNode<IntPtrT>::UncheckedCast(index);
[&](TNode<IntPtrT> descriptor_key_index) {
TNode<Name> next_key =
LoadKeyByKeyIndex(descriptors, descriptor_key_index);
......@@ -9282,7 +9282,7 @@ void CodeStubAssembler::ForEachEnumerableOwnProperty(
}
BIND(&next_iteration);
},
DescriptorArray::kEntrySize, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
DescriptorArray::kEntrySize, IndexAdvanceMode::kPost);
if (mode == kEnumerationOrder) {
Label done(this);
......@@ -10245,6 +10245,7 @@ TNode<IntPtrT> CodeStubAssembler::ElementOffsetFromIndex(
return IntPtrAdd(IntPtrConstant(base_size), Signed(shifted_index));
}
// Instantiate ElementOffsetFromIndex for Smi and IntPtrT.
template V8_EXPORT_PRIVATE TNode<IntPtrT>
CodeStubAssembler::ElementOffsetFromIndex<Smi>(TNode<Smi> index_node,
ElementsKind kind,
......@@ -11250,30 +11251,11 @@ TNode<Int32T> CodeStubAssembler::LoadElementsKind(
return elements_kind;
}
Node* CodeStubAssembler::BuildFastLoop(
const CodeStubAssembler::VariableList& vars, Node* start_index,
Node* end_index, const FastLoopBody& body, int increment,
ParameterMode parameter_mode, IndexAdvanceMode advance_mode) {
CSA_SLOW_ASSERT(this, MatchesParameterMode(start_index, parameter_mode));
CSA_SLOW_ASSERT(this, MatchesParameterMode(end_index, parameter_mode));
if (parameter_mode == SMI_PARAMETERS) {
return BuildFastLoop(vars, ReinterpretCast<Smi>(start_index),
ReinterpretCast<Smi>(end_index), body, increment,
advance_mode);
} else {
DCHECK_EQ(INTPTR_PARAMETERS, parameter_mode);
return BuildFastLoop(vars, ReinterpretCast<IntPtrT>(start_index),
ReinterpretCast<IntPtrT>(end_index), body, increment,
advance_mode);
}
}
template <typename TIndex>
TNode<TIndex> CodeStubAssembler::BuildFastLoop(const VariableList& vars,
TNode<TIndex> start_index,
TNode<TIndex> end_index,
const FastLoopBody& body,
const FastLoopBody<TIndex>& body,
int increment,
IndexAdvanceMode advance_mode) {
TVARIABLE(TIndex, var, start_index);
......@@ -11312,6 +11294,16 @@ TNode<TIndex> CodeStubAssembler::BuildFastLoop(const VariableList& vars,
return var.value();
}
// Instantiate BuildFastLoop for Smi and IntPtrT.
template TNode<Smi> CodeStubAssembler::BuildFastLoop<Smi>(
const VariableList& vars, TNode<Smi> start_index, TNode<Smi> end_index,
const FastLoopBody<Smi>& body, int increment,
IndexAdvanceMode advance_mode);
template TNode<IntPtrT> CodeStubAssembler::BuildFastLoop<IntPtrT>(
const VariableList& vars, TNode<IntPtrT> start_index,
TNode<IntPtrT> end_index, const FastLoopBody<IntPtrT>& body, int increment,
IndexAdvanceMode advance_mode);
void CodeStubAssembler::BuildFastFixedArrayForEach(
const CodeStubAssembler::VariableList& vars, Node* fixed_array,
ElementsKind kind, Node* first_element_inclusive,
......@@ -11333,17 +11325,15 @@ void CodeStubAssembler::BuildFastFixedArrayForEach(
if (direction == ForEachDirection::kForward) {
for (int i = first_val; i < last_val; ++i) {
TNode<IntPtrT> index = IntPtrConstant(i);
TNode<IntPtrT> offset =
ElementOffsetFromIndex(index, kind, INTPTR_PARAMETERS,
FixedArray::kHeaderSize - kHeapObjectTag);
TNode<IntPtrT> offset = ElementOffsetFromIndex(
index, kind, FixedArray::kHeaderSize - kHeapObjectTag);
body(fixed_array, offset);
}
} else {
for (int i = last_val - 1; i >= first_val; --i) {
TNode<IntPtrT> index = IntPtrConstant(i);
TNode<IntPtrT> offset =
ElementOffsetFromIndex(index, kind, INTPTR_PARAMETERS,
FixedArray::kHeaderSize - kHeapObjectTag);
TNode<IntPtrT> offset = ElementOffsetFromIndex(
index, kind, FixedArray::kHeaderSize - kHeapObjectTag);
body(fixed_array, offset);
}
}
......@@ -11360,11 +11350,10 @@ void CodeStubAssembler::BuildFastFixedArrayForEach(
if (direction == ForEachDirection::kReverse) std::swap(start, limit);
int increment = IsDoubleElementsKind(kind) ? kDoubleSize : kTaggedSize;
BuildFastLoop(
BuildFastLoop<IntPtrT>(
vars, start, limit,
[fixed_array, &body](Node* offset) { body(fixed_array, offset); },
[&](TNode<IntPtrT> offset) { body(fixed_array, offset); },
direction == ForEachDirection::kReverse ? -increment : increment,
INTPTR_PARAMETERS,
direction == ForEachDirection::kReverse ? IndexAdvanceMode::kPre
: IndexAdvanceMode::kPost);
}
......@@ -11375,22 +11364,21 @@ void CodeStubAssembler::GotoIfFixedArraySizeDoesntFitInNewSpace(
doesnt_fit);
}
void CodeStubAssembler::InitializeFieldsWithRoot(Node* object,
Node* start_offset,
Node* end_offset,
void CodeStubAssembler::InitializeFieldsWithRoot(TNode<HeapObject> object,
TNode<IntPtrT> start_offset,
TNode<IntPtrT> end_offset,
RootIndex root_index) {
CSA_SLOW_ASSERT(this, TaggedIsNotSmi(object));
start_offset = IntPtrAdd(start_offset, IntPtrConstant(-kHeapObjectTag));
end_offset = IntPtrAdd(end_offset, IntPtrConstant(-kHeapObjectTag));
TNode<Object> root_value = LoadRoot(root_index);
BuildFastLoop(
BuildFastLoop<IntPtrT>(
end_offset, start_offset,
[this, object, root_value](Node* current) {
[=](TNode<IntPtrT> current) {
StoreNoWriteBarrier(MachineRepresentation::kTagged, object, current,
root_value);
},
-kTaggedSize, INTPTR_PARAMETERS,
CodeStubAssembler::IndexAdvanceMode::kPre);
-kTaggedSize, CodeStubAssembler::IndexAdvanceMode::kPre);
}
void CodeStubAssembler::BranchIfNumberRelationalComparison(
......@@ -13638,14 +13626,13 @@ void CodeStubArguments::ForEach(
TNode<IntPtrT> end = assembler_->IntPtrSub(
assembler_->UncheckedCast<IntPtrT>(base_),
assembler_->ElementOffsetFromIndex(last, SYSTEM_POINTER_ELEMENTS, mode));
assembler_->BuildFastLoop(
assembler_->BuildFastLoop<IntPtrT>(
vars, start, end,
[this, &body](Node* current) {
[&](TNode<IntPtrT> current) {
Node* arg = assembler_->Load(MachineType::AnyTagged(), current);
body(arg);
},
-kSystemPointerSize, CodeStubAssembler::INTPTR_PARAMETERS,
CodeStubAssembler::IndexAdvanceMode::kPost);
-kSystemPointerSize, CodeStubAssembler::IndexAdvanceMode::kPost);
}
void CodeStubArguments::PopAndReturn(Node* value) {
......
......@@ -1663,8 +1663,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
void BuildAppendJSArray(ElementsKind kind, Node* array, Node* value,
Label* bailout);
void StoreFieldsNoWriteBarrier(Node* start_address, Node* end_address,
Node* value);
void StoreFieldsNoWriteBarrier(TNode<IntPtrT> start_address,
TNode<IntPtrT> end_address,
TNode<Object> value);
Node* AllocateCellWithValue(Node* value,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
......@@ -1763,7 +1764,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<CollectionType> AllocateSmallOrderedHashTable(TNode<IntPtrT> capacity);
Node* AllocateStruct(Node* map, AllocationFlags flags = kNone);
void InitializeStructBody(Node* object, Node* map, Node* size,
void InitializeStructBody(TNode<HeapObject> object, TNode<IntPtrT> size,
int start_offset = Struct::kHeaderSize);
TNode<JSObject> AllocateJSObjectFromMap(
......@@ -1772,14 +1773,17 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
SlackTrackingMode slack_tracking_mode = kNoSlackTracking);
void InitializeJSObjectFromMap(
Node* object, Node* map, Node* instance_size, Node* properties = nullptr,
SloppyTNode<HeapObject> object, SloppyTNode<Map> map,
SloppyTNode<IntPtrT> instance_size, Node* properties = nullptr,
Node* elements = nullptr,
SlackTrackingMode slack_tracking_mode = kNoSlackTracking);
void InitializeJSObjectBodyWithSlackTracking(Node* object, Node* map,
Node* instance_size);
void InitializeJSObjectBodyWithSlackTracking(
SloppyTNode<HeapObject> object, SloppyTNode<Map> map,
SloppyTNode<IntPtrT> instance_size);
void InitializeJSObjectBodyNoSlackTracking(
Node* object, Node* map, Node* instance_size,
SloppyTNode<HeapObject> object, SloppyTNode<Map> map,
SloppyTNode<IntPtrT> instance_size,
int start_offset = JSObject::kHeaderSize);
TNode<BoolT> IsValidFastJSArrayCapacity(Node* capacity,
......@@ -3310,39 +3314,24 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
enum class IndexAdvanceMode { kPre, kPost };
// TODO(v8:9708): typify index parameter.
using FastLoopBody = std::function<void(Node* index)>;
template <typename TIndex>
using FastLoopBody = std::function<void(TNode<TIndex> index)>;
template <typename TIndex>
TNode<TIndex> BuildFastLoop(
const VariableList& var_list, TNode<TIndex> start_index,
TNode<TIndex> end_index, const FastLoopBody& body, int increment,
TNode<TIndex> end_index, const FastLoopBody<TIndex>& body, int increment,
IndexAdvanceMode advance_mode = IndexAdvanceMode::kPre);
template <typename TIndex>
TNode<TIndex> BuildFastLoop(
TNode<TIndex> start_index, TNode<TIndex> end_index,
const FastLoopBody& body, int increment,
const FastLoopBody<TIndex>& body, int increment,
IndexAdvanceMode advance_mode = IndexAdvanceMode::kPre) {
return BuildFastLoop(VariableList(0, zone()), start_index, end_index, body,
increment, advance_mode);
}
// TODO(v8:9708): remove once all uses are ported.
Node* BuildFastLoop(const VariableList& var_list, Node* start_index,
Node* end_index, const FastLoopBody& body, int increment,
ParameterMode parameter_mode,
IndexAdvanceMode advance_mode = IndexAdvanceMode::kPre);
// TODO(v8:9708): remove once all uses are ported.
Node* BuildFastLoop(Node* start_index, Node* end_index,
const FastLoopBody& body, int increment,
ParameterMode parameter_mode,
IndexAdvanceMode advance_mode = IndexAdvanceMode::kPre) {
return BuildFastLoop(VariableList(0, zone()), start_index, end_index, body,
increment, parameter_mode, advance_mode);
}
enum class ForEachDirection { kForward, kReverse };
using FastFixedArrayForEachBody =
......@@ -3387,8 +3376,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
Label* doesnt_fit, int base_size,
ParameterMode mode);
void InitializeFieldsWithRoot(Node* object, Node* start_offset,
Node* end_offset, RootIndex root);
void InitializeFieldsWithRoot(TNode<HeapObject> object,
TNode<IntPtrT> start_offset,
TNode<IntPtrT> end_offset, RootIndex root);
Node* RelationalComparison(Operation op, SloppyTNode<Object> left,
SloppyTNode<Object> right,
......
......@@ -3882,30 +3882,28 @@ void AccessorAssembler::GenerateCloneObjectIC() {
// Just copy the fields as raw data (pretending that there are no mutable
// HeapNumbers). This doesn't need write barriers.
BuildFastLoop(
BuildFastLoop<IntPtrT>(
source_start, source_size,
[=](Node* field_index) {
TNode<IntPtrT> field_offset =
TimesTaggedSize(UncheckedCast<IntPtrT>(field_index));
[=](TNode<IntPtrT> field_index) {
TNode<IntPtrT> field_offset = TimesTaggedSize(field_index);
TNode<TaggedT> field =
LoadObjectField<TaggedT>(CAST(source), field_offset);
TNode<IntPtrT> result_offset =
IntPtrAdd(field_offset, field_offset_difference);
StoreObjectFieldNoWriteBarrier(object, result_offset, field);
},
1, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
1, IndexAdvanceMode::kPost);
// If mutable HeapNumbers can occur, we need to go through the {object}
// again here and properly clone them. We use a second loop here to
// ensure that the GC (and heap verifier) always sees properly initialized
// objects, i.e. never hits undefined values in double fields.
if (!FLAG_unbox_double_fields) {
BuildFastLoop(
BuildFastLoop<IntPtrT>(
source_start, source_size,
[=](Node* field_index) {
TNode<IntPtrT> result_offset =
IntPtrAdd(TimesTaggedSize(UncheckedCast<IntPtrT>(field_index)),
field_offset_difference);
[=](TNode<IntPtrT> field_index) {
TNode<IntPtrT> result_offset = IntPtrAdd(
TimesTaggedSize(field_index), field_offset_difference);
TNode<Object> field = LoadObjectField(object, result_offset);
Label if_done(this), if_mutableheapnumber(this, Label::kDeferred);
GotoIf(TaggedIsSmi(field), &if_done);
......@@ -3919,7 +3917,7 @@ void AccessorAssembler::GenerateCloneObjectIC() {
}
BIND(&if_done);
},
1, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
1, IndexAdvanceMode::kPost);
}
Return(object);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment