Commit 8a1a2867 authored by Igor Sheludko's avatar Igor Sheludko Committed by Commit Bot

[csa] Remove ParameterMode from CSA::BuildFastLoop

Bug: v8:9708
Change-Id: I305cc007a4e7302c8587b999cbb11f23ced4cfd3
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1800579
Commit-Queue: Igor Sheludko <ishell@chromium.org>
Reviewed-by: 's avatarLeszek Swirski <leszeks@chromium.org>
Cr-Commit-Position: refs/heads/master@{#63735}
parent 6cf125a9
......@@ -262,16 +262,17 @@ Node* ArgumentsBuiltinsAssembler::EmitFastNewSloppyArguments(Node* context,
TVARIABLE(IntPtrT, current_argument,
Signed(arguments.AtIndexPtr(info.argument_count, mode)));
VariableList var_list1({&current_argument}, zone());
mapped_offset = UncheckedCast<IntPtrT>(BuildFastLoop(
mapped_offset = BuildFastLoop<IntPtrT>(
var_list1, argument_offset, mapped_offset,
[this, elements, &current_argument](Node* offset) {
[&](TNode<IntPtrT> offset) {
Increment(&current_argument, kSystemPointerSize);
TNode<Object> arg = LoadBufferObject(
ReinterpretCast<RawPtrT>(current_argument.value()), 0);
StoreNoWriteBarrier(MachineRepresentation::kTagged, elements, offset,
arg);
return;
},
-kTaggedSize, INTPTR_PARAMETERS));
-kTaggedSize);
// Copy the parameter slots and the holes in the arguments.
// We need to fill in mapped_count slots. They index the context,
......@@ -295,9 +296,9 @@ Node* ArgumentsBuiltinsAssembler::EmitFastNewSloppyArguments(Node* context,
IntPtrConstant(kParameterMapHeaderSize - FixedArray::kHeaderSize));
TNode<IntPtrT> zero_offset = ElementOffsetFromIndex(
zero, PACKED_ELEMENTS, mode, FixedArray::kHeaderSize - kHeapObjectTag);
BuildFastLoop(
BuildFastLoop<IntPtrT>(
var_list2, mapped_offset, zero_offset,
[=, &context_index](Node* offset) {
[&](TNode<IntPtrT> offset) {
StoreNoWriteBarrier(MachineRepresentation::kTagged, elements, offset,
the_hole);
StoreNoWriteBarrier(MachineRepresentation::kTagged,
......@@ -305,7 +306,7 @@ Node* ArgumentsBuiltinsAssembler::EmitFastNewSloppyArguments(Node* context,
BIntToSmi(context_index.value()));
Increment(&context_index);
},
-kTaggedSize, INTPTR_PARAMETERS);
-kTaggedSize);
result.Bind(argument_object);
Goto(&done);
......
......@@ -224,16 +224,8 @@ void ArrayBuiltinsAssembler::VisitAllTypedArrayElements(
ForEachDirection direction, TNode<JSTypedArray> typed_array) {
VariableList list({&a_, &k_, &to_}, zone());
FastLoopBody body = [&](Node* index) {
GotoIf(IsDetachedBuffer(CAST(array_buffer)), detached);
TNode<RawPtrT> data_ptr = LoadJSTypedArrayBackingStore(typed_array);
auto value = LoadFixedTypedArrayElementAsTagged(
data_ptr, index, source_elements_kind_, SMI_PARAMETERS);
k_.Bind(index);
a_.Bind(processor(this, value, index));
};
Node* start = SmiConstant(0);
Node* end = len_;
TNode<Smi> start = SmiConstant(0);
TNode<Smi> end = CAST(len_);
IndexAdvanceMode advance_mode = IndexAdvanceMode::kPost;
int incr = 1;
if (direction == ForEachDirection::kReverse) {
......@@ -241,8 +233,17 @@ void ArrayBuiltinsAssembler::VisitAllTypedArrayElements(
advance_mode = IndexAdvanceMode::kPre;
incr = -1;
}
BuildFastLoop(list, start, end, body, incr, ParameterMode::SMI_PARAMETERS,
advance_mode);
BuildFastLoop<Smi>(
list, start, end,
[&](TNode<Smi> index) {
GotoIf(IsDetachedBuffer(CAST(array_buffer)), detached);
TNode<RawPtrT> data_ptr = LoadJSTypedArrayBackingStore(typed_array);
TNode<Object> value = LoadFixedTypedArrayElementAsTagged(
data_ptr, index, source_elements_kind_, SMI_PARAMETERS);
k_.Bind(index);
a_.Bind(processor(this, value, index));
},
incr, advance_mode);
}
// Perform ArraySpeciesCreate (ES6 #sec-arrayspeciescreate).
......
......@@ -259,7 +259,7 @@ void BaseCollectionsAssembler::AddConstructorEntriesFromFastJSArray(
&if_doubles);
BIND(&if_smiorobjects);
{
auto set_entry = [&](Node* index) {
auto set_entry = [&](TNode<IntPtrT> index) {
TNode<Object> element = LoadAndNormalizeFixedArrayElement(
CAST(elements), UncheckedCast<IntPtrT>(index));
AddConstructorEntry(variant, context, collection, add_func, element,
......@@ -270,8 +270,8 @@ void BaseCollectionsAssembler::AddConstructorEntriesFromFastJSArray(
// elements, a fast loop is used. This assumes that adding an element
// to the collection does not call user code that could mutate the elements
// or collection.
BuildFastLoop(IntPtrConstant(0), length, set_entry, 1,
ParameterMode::INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
BuildFastLoop<IntPtrT>(IntPtrConstant(0), length, set_entry, 1,
IndexAdvanceMode::kPost);
Goto(&exit);
}
BIND(&if_doubles);
......@@ -286,13 +286,13 @@ void BaseCollectionsAssembler::AddConstructorEntriesFromFastJSArray(
element);
} else {
DCHECK(variant == kSet || variant == kWeakSet);
auto set_entry = [&](Node* index) {
auto set_entry = [&](TNode<IntPtrT> index) {
TNode<Object> entry = LoadAndNormalizeFixedDoubleArrayElement(
elements, UncheckedCast<IntPtrT>(index));
AddConstructorEntry(variant, context, collection, add_func, entry);
};
BuildFastLoop(IntPtrConstant(0), length, set_entry, 1,
ParameterMode::INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
BuildFastLoop<IntPtrT>(IntPtrConstant(0), length, set_entry, 1,
IndexAdvanceMode::kPost);
Goto(&exit);
}
}
......
......@@ -263,13 +263,12 @@ TNode<Context> ConstructorBuiltinsAssembler::EmitFastNewFunctionContext(
TNode<Oddball> undefined = UndefinedConstant();
TNode<IntPtrT> start_offset = IntPtrConstant(Context::kTodoHeaderSize);
CodeStubAssembler::VariableList vars(0, zone());
BuildFastLoop(
BuildFastLoop<IntPtrT>(
vars, start_offset, size,
[=](SloppyTNode<IntPtrT> offset) {
StoreObjectFieldNoWriteBarrier(
function_context, UncheckedCast<IntPtrT>(offset), undefined);
[=](TNode<IntPtrT> offset) {
StoreObjectFieldNoWriteBarrier(function_context, offset, undefined);
},
kTaggedSize, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
kTaggedSize, IndexAdvanceMode::kPost);
return function_context;
}
......@@ -571,18 +570,18 @@ TNode<HeapObject> ConstructorBuiltinsAssembler::EmitCreateShallowObjectLiteral(
BIND(&continue_with_write_barrier);
{
Comment("Copy in-object properties slow");
BuildFastLoop(
BuildFastLoop<IntPtrT>(
offset.value(), instance_size,
[=](SloppyTNode<IntPtrT> offset) {
[=](TNode<IntPtrT> offset) {
// TODO(ishell): value decompression is not necessary here.
TNode<Object> field = LoadObjectField(boilerplate, offset);
StoreObjectFieldNoWriteBarrier(copy, offset, field);
},
kTaggedSize, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
kTaggedSize, IndexAdvanceMode::kPost);
Comment("Copy mutable HeapNumber values");
BuildFastLoop(
BuildFastLoop<IntPtrT>(
offset.value(), instance_size,
[=](SloppyTNode<IntPtrT> offset) {
[=](TNode<IntPtrT> offset) {
TNode<Object> field = LoadObjectField(copy, offset);
Label copy_heap_number(this, Label::kDeferred), continue_loop(this);
// We only have to clone complex field values.
......@@ -601,7 +600,7 @@ TNode<HeapObject> ConstructorBuiltinsAssembler::EmitCreateShallowObjectLiteral(
}
BIND(&continue_loop);
},
kTaggedSize, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
kTaggedSize, IndexAdvanceMode::kPost);
Goto(&done_init);
}
BIND(&done_init);
......
......@@ -61,7 +61,8 @@ TF_BUILTIN(StringToLowerCaseIntl, IntlBuiltinsAssembler) {
Node* const dst_ptr = PointerToSeqStringData(dst);
TVARIABLE(IntPtrT, var_cursor, IntPtrConstant(0));
TNode<RawPtrT> const start_address = to_direct.PointerToData(&call_c);
TNode<IntPtrT> const start_address =
ReinterpretCast<IntPtrT>(to_direct.PointerToData(&call_c));
TNode<IntPtrT> const end_address =
Signed(IntPtrAdd(start_address, ChangeUint32ToWord(length)));
......@@ -71,9 +72,9 @@ TF_BUILTIN(StringToLowerCaseIntl, IntlBuiltinsAssembler) {
VARIABLE(var_did_change, MachineRepresentation::kWord32, Int32Constant(0));
VariableList push_vars({&var_cursor, &var_did_change}, zone());
BuildFastLoop(
BuildFastLoop<IntPtrT>(
push_vars, start_address, end_address,
[=, &var_cursor, &var_did_change](Node* current) {
[&](TNode<IntPtrT> current) {
TNode<Uint8T> c = Load<Uint8T>(current);
TNode<Uint8T> lower =
Load<Uint8T>(to_lower_table_addr, ChangeInt32ToIntPtr(c));
......@@ -85,7 +86,7 @@ TF_BUILTIN(StringToLowerCaseIntl, IntlBuiltinsAssembler) {
Increment(&var_cursor);
},
kCharSize, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
kCharSize, IndexAdvanceMode::kPost);
// Return the original string if it remained unchanged in order to preserve
// e.g. internalization and private symbols (such as the preserved object
......
......@@ -679,9 +679,9 @@ TNode<HeapObject> RegExpBuiltinsAssembler::RegExpExecInternal(
TVARIABLE(IntPtrT, var_to_offset, to_offset);
VariableList vars({&var_to_offset}, zone());
BuildFastLoop(
BuildFastLoop<IntPtrT>(
vars, IntPtrZero(), limit_offset,
[=, &var_to_offset](Node* offset) {
[&](TNode<IntPtrT> offset) {
TNode<Int32T> value = UncheckedCast<Int32T>(Load(
MachineType::Int32(), static_offsets_vector_address, offset));
TNode<Smi> smi_value = SmiFromInt32(value);
......@@ -689,7 +689,7 @@ TNode<HeapObject> RegExpBuiltinsAssembler::RegExpExecInternal(
var_to_offset.value(), smi_value);
Increment(&var_to_offset, kTaggedSize);
},
kInt32Size, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
kInt32Size, IndexAdvanceMode::kPost);
}
var_result = match_info;
......
......@@ -1366,9 +1366,9 @@ TNode<JSArray> StringBuiltinsAssembler::StringToArray(
TNode<IntPtrT> string_data_offset = to_direct.offset();
TNode<FixedArray> cache = SingleCharacterStringCacheConstant();
BuildFastLoop(
BuildFastLoop<IntPtrT>(
IntPtrConstant(0), length,
[&](Node* index) {
[&](TNode<IntPtrT> index) {
// TODO(jkummerow): Implement a CSA version of DisallowHeapAllocation
// and use that to guard ToDirectStringAssembler.PointerToData().
CSA_ASSERT(this, WordEqual(to_direct.PointerToData(&call_runtime),
......@@ -1385,7 +1385,7 @@ TNode<JSArray> StringBuiltinsAssembler::StringToArray(
StoreFixedArrayElement(elements, index, entry);
},
1, ParameterMode::INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
1, IndexAdvanceMode::kPost);
TNode<Map> array_map = LoadJSArrayElementsMap(PACKED_ELEMENTS, context);
result_array = AllocateJSArray(array_map, elements, length_smi);
......
......@@ -735,9 +735,9 @@ TF_BUILTIN(TypedArrayOf, TypedArrayBuiltinsAssembler) {
DispatchTypedArrayByElementsKind(
elements_kind,
[&](ElementsKind kind, int size, int typed_array_fun_index) {
BuildFastLoop(
BuildFastLoop<IntPtrT>(
IntPtrConstant(0), length,
[&](Node* index) {
[&](TNode<IntPtrT> index) {
TNode<Object> item = args.AtIndex(index, INTPTR_PARAMETERS);
Node* value =
PrepareValueForWriteToTypedArray(item, kind, context);
......@@ -755,7 +755,7 @@ TF_BUILTIN(TypedArrayOf, TypedArrayBuiltinsAssembler) {
StoreElement(backing_store, kind, index, value,
INTPTR_PARAMETERS);
},
1, ParameterMode::INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
1, IndexAdvanceMode::kPost);
});
// 8. Return newObj.
......@@ -948,9 +948,9 @@ TF_BUILTIN(TypedArrayFrom, TypedArrayBuiltinsAssembler) {
TNode<Int32T> elements_kind = LoadElementsKind(target_obj.value());
// 7e/13 : Copy the elements
BuildFastLoop(
BuildFastLoop<Smi>(
SmiConstant(0), final_length.value(),
[&](Node* index) {
[&](TNode<Smi> index) {
TNode<Object> const k_value =
GetProperty(context, final_source.value(), index);
......@@ -978,7 +978,7 @@ TF_BUILTIN(TypedArrayFrom, TypedArrayBuiltinsAssembler) {
SMI_PARAMETERS);
});
},
1, ParameterMode::SMI_PARAMETERS, IndexAdvanceMode::kPost);
1, IndexAdvanceMode::kPost);
args.PopAndReturn(target_obj.value());
......
This diff is collapsed.
......@@ -1663,8 +1663,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
void BuildAppendJSArray(ElementsKind kind, Node* array, Node* value,
Label* bailout);
void StoreFieldsNoWriteBarrier(Node* start_address, Node* end_address,
Node* value);
void StoreFieldsNoWriteBarrier(TNode<IntPtrT> start_address,
TNode<IntPtrT> end_address,
TNode<Object> value);
Node* AllocateCellWithValue(Node* value,
WriteBarrierMode mode = UPDATE_WRITE_BARRIER);
......@@ -1763,7 +1764,7 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
TNode<CollectionType> AllocateSmallOrderedHashTable(TNode<IntPtrT> capacity);
Node* AllocateStruct(Node* map, AllocationFlags flags = kNone);
void InitializeStructBody(Node* object, Node* map, Node* size,
void InitializeStructBody(TNode<HeapObject> object, TNode<IntPtrT> size,
int start_offset = Struct::kHeaderSize);
TNode<JSObject> AllocateJSObjectFromMap(
......@@ -1772,14 +1773,17 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
SlackTrackingMode slack_tracking_mode = kNoSlackTracking);
void InitializeJSObjectFromMap(
Node* object, Node* map, Node* instance_size, Node* properties = nullptr,
SloppyTNode<HeapObject> object, SloppyTNode<Map> map,
SloppyTNode<IntPtrT> instance_size, Node* properties = nullptr,
Node* elements = nullptr,
SlackTrackingMode slack_tracking_mode = kNoSlackTracking);
void InitializeJSObjectBodyWithSlackTracking(Node* object, Node* map,
Node* instance_size);
void InitializeJSObjectBodyWithSlackTracking(
SloppyTNode<HeapObject> object, SloppyTNode<Map> map,
SloppyTNode<IntPtrT> instance_size);
void InitializeJSObjectBodyNoSlackTracking(
Node* object, Node* map, Node* instance_size,
SloppyTNode<HeapObject> object, SloppyTNode<Map> map,
SloppyTNode<IntPtrT> instance_size,
int start_offset = JSObject::kHeaderSize);
TNode<BoolT> IsValidFastJSArrayCapacity(Node* capacity,
......@@ -3310,39 +3314,24 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
enum class IndexAdvanceMode { kPre, kPost };
// TODO(v8:9708): typify index parameter.
using FastLoopBody = std::function<void(Node* index)>;
template <typename TIndex>
using FastLoopBody = std::function<void(TNode<TIndex> index)>;
template <typename TIndex>
TNode<TIndex> BuildFastLoop(
const VariableList& var_list, TNode<TIndex> start_index,
TNode<TIndex> end_index, const FastLoopBody& body, int increment,
TNode<TIndex> end_index, const FastLoopBody<TIndex>& body, int increment,
IndexAdvanceMode advance_mode = IndexAdvanceMode::kPre);
template <typename TIndex>
TNode<TIndex> BuildFastLoop(
TNode<TIndex> start_index, TNode<TIndex> end_index,
const FastLoopBody& body, int increment,
const FastLoopBody<TIndex>& body, int increment,
IndexAdvanceMode advance_mode = IndexAdvanceMode::kPre) {
return BuildFastLoop(VariableList(0, zone()), start_index, end_index, body,
increment, advance_mode);
}
// TODO(v8:9708): remove once all uses are ported.
Node* BuildFastLoop(const VariableList& var_list, Node* start_index,
Node* end_index, const FastLoopBody& body, int increment,
ParameterMode parameter_mode,
IndexAdvanceMode advance_mode = IndexAdvanceMode::kPre);
// TODO(v8:9708): remove once all uses are ported.
Node* BuildFastLoop(Node* start_index, Node* end_index,
const FastLoopBody& body, int increment,
ParameterMode parameter_mode,
IndexAdvanceMode advance_mode = IndexAdvanceMode::kPre) {
return BuildFastLoop(VariableList(0, zone()), start_index, end_index, body,
increment, parameter_mode, advance_mode);
}
enum class ForEachDirection { kForward, kReverse };
using FastFixedArrayForEachBody =
......@@ -3387,8 +3376,9 @@ class V8_EXPORT_PRIVATE CodeStubAssembler
Label* doesnt_fit, int base_size,
ParameterMode mode);
void InitializeFieldsWithRoot(Node* object, Node* start_offset,
Node* end_offset, RootIndex root);
void InitializeFieldsWithRoot(TNode<HeapObject> object,
TNode<IntPtrT> start_offset,
TNode<IntPtrT> end_offset, RootIndex root);
Node* RelationalComparison(Operation op, SloppyTNode<Object> left,
SloppyTNode<Object> right,
......
......@@ -3882,30 +3882,28 @@ void AccessorAssembler::GenerateCloneObjectIC() {
// Just copy the fields as raw data (pretending that there are no mutable
// HeapNumbers). This doesn't need write barriers.
BuildFastLoop(
BuildFastLoop<IntPtrT>(
source_start, source_size,
[=](Node* field_index) {
TNode<IntPtrT> field_offset =
TimesTaggedSize(UncheckedCast<IntPtrT>(field_index));
[=](TNode<IntPtrT> field_index) {
TNode<IntPtrT> field_offset = TimesTaggedSize(field_index);
TNode<TaggedT> field =
LoadObjectField<TaggedT>(CAST(source), field_offset);
TNode<IntPtrT> result_offset =
IntPtrAdd(field_offset, field_offset_difference);
StoreObjectFieldNoWriteBarrier(object, result_offset, field);
},
1, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
1, IndexAdvanceMode::kPost);
// If mutable HeapNumbers can occur, we need to go through the {object}
// again here and properly clone them. We use a second loop here to
// ensure that the GC (and heap verifier) always sees properly initialized
// objects, i.e. never hits undefined values in double fields.
if (!FLAG_unbox_double_fields) {
BuildFastLoop(
BuildFastLoop<IntPtrT>(
source_start, source_size,
[=](Node* field_index) {
TNode<IntPtrT> result_offset =
IntPtrAdd(TimesTaggedSize(UncheckedCast<IntPtrT>(field_index)),
field_offset_difference);
[=](TNode<IntPtrT> field_index) {
TNode<IntPtrT> result_offset = IntPtrAdd(
TimesTaggedSize(field_index), field_offset_difference);
TNode<Object> field = LoadObjectField(object, result_offset);
Label if_done(this), if_mutableheapnumber(this, Label::kDeferred);
GotoIf(TaggedIsSmi(field), &if_done);
......@@ -3919,7 +3917,7 @@ void AccessorAssembler::GenerateCloneObjectIC() {
}
BIND(&if_done);
},
1, INTPTR_PARAMETERS, IndexAdvanceMode::kPost);
1, IndexAdvanceMode::kPost);
}
Return(object);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment