Commit cbdb3738 authored by danno's avatar danno Committed by Commit bot

[turbofan] Add FixedArray peephole optimizations to CodeStubAssembler

Previously, CodeStubAssembler macros performing FixedArray element accesses had
to compute offsets to elements explicitly with a fair amount of duplicated
code. Furthermore, any peephole optimizations that could produce better code--
like recognizing constant indices or combining array index computation with Smi
untagging--were also duplicated.

This change factors the code to compute FixedArray index offsets into a common
routine in the CodeStubAssembler that applies standard peephole optimizations to
all accesses. In order to do this, it also introduces limited introspection into
the up-until-now opaque Node* type exported from code-assembler.h, allowing
Nodes to be queried whether they are constant and extracting their constant
value in that case.

Review-Url: https://codereview.chromium.org/1989363004
Cr-Commit-Position: refs/heads/master@{#36370}
parent ad7939e7
This diff is collapsed.
...@@ -32,6 +32,8 @@ class CodeStubAssembler : public compiler::CodeAssembler { ...@@ -32,6 +32,8 @@ class CodeStubAssembler : public compiler::CodeAssembler {
CodeStubAssembler(Isolate* isolate, Zone* zone, int parameter_count, CodeStubAssembler(Isolate* isolate, Zone* zone, int parameter_count,
Code::Flags flags, const char* name); Code::Flags flags, const char* name);
enum ParameterMode { INTEGER_PARAMETERS, SMI_PARAMETERS };
compiler::Node* BooleanMapConstant(); compiler::Node* BooleanMapConstant();
compiler::Node* EmptyStringConstant(); compiler::Node* EmptyStringConstant();
compiler::Node* HeapNumberMapConstant(); compiler::Node* HeapNumberMapConstant();
...@@ -134,14 +136,10 @@ class CodeStubAssembler : public compiler::CodeAssembler { ...@@ -134,14 +136,10 @@ class CodeStubAssembler : public compiler::CodeAssembler {
compiler::Node* AllocateUninitializedFixedArray(compiler::Node* length); compiler::Node* AllocateUninitializedFixedArray(compiler::Node* length);
// Load an array element from a FixedArray. // Load an array element from a FixedArray.
compiler::Node* LoadFixedArrayElementInt32Index(compiler::Node* object, compiler::Node* LoadFixedArrayElement(
compiler::Node* int32_index, compiler::Node* object, compiler::Node* int32_index,
int additional_offset = 0); int additional_offset = 0,
compiler::Node* LoadFixedArrayElementSmiIndex(compiler::Node* object, ParameterMode parameter_mode = INTEGER_PARAMETERS);
compiler::Node* smi_index,
int additional_offset = 0);
compiler::Node* LoadFixedArrayElementConstantIndex(compiler::Node* object,
int index);
// Context manipulation // Context manipulation
compiler::Node* LoadNativeContext(compiler::Node* context); compiler::Node* LoadNativeContext(compiler::Node* context);
...@@ -162,24 +160,14 @@ class CodeStubAssembler : public compiler::CodeAssembler { ...@@ -162,24 +160,14 @@ class CodeStubAssembler : public compiler::CodeAssembler {
compiler::Node* StoreMapNoWriteBarrier(compiler::Node* object, compiler::Node* StoreMapNoWriteBarrier(compiler::Node* object,
compiler::Node* map); compiler::Node* map);
// Store an array element to a FixedArray. // Store an array element to a FixedArray.
compiler::Node* StoreFixedArrayElementInt32Index(compiler::Node* object, compiler::Node* StoreFixedArrayElement(
compiler::Node* index, compiler::Node* object, compiler::Node* index, compiler::Node* value,
compiler::Node* value); WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER,
compiler::Node* StoreFixedArrayElementNoWriteBarrier(compiler::Node* object, ParameterMode parameter_mode = INTEGER_PARAMETERS);
compiler::Node* index,
compiler::Node* value); compiler::Node* StoreFixedDoubleArrayElement(
compiler::Node* StoreFixedDoubleArrayElementInt32Index(compiler::Node* object, compiler::Node* object, compiler::Node* index, compiler::Node* value,
compiler::Node* index, ParameterMode parameter_mode = INTEGER_PARAMETERS);
compiler::Node* value);
compiler::Node* StoreFixedArrayElementInt32Index(compiler::Node* object,
int index,
compiler::Node* value);
compiler::Node* StoreFixedArrayElementNoWriteBarrier(compiler::Node* object,
int index,
compiler::Node* value);
compiler::Node* StoreFixedDoubleArrayElementInt32Index(compiler::Node* object,
int index,
compiler::Node* value);
// Allocate a HeapNumber without initializing its value. // Allocate a HeapNumber without initializing its value.
compiler::Node* AllocateHeapNumber(); compiler::Node* AllocateHeapNumber();
...@@ -191,8 +179,10 @@ class CodeStubAssembler : public compiler::CodeAssembler { ...@@ -191,8 +179,10 @@ class CodeStubAssembler : public compiler::CodeAssembler {
compiler::Node* AllocateSeqTwoByteString(int length); compiler::Node* AllocateSeqTwoByteString(int length);
// Allocated an JSArray // Allocated an JSArray
compiler::Node* AllocateJSArray(ElementsKind kind, compiler::Node* array_map, compiler::Node* AllocateJSArray(ElementsKind kind, compiler::Node* array_map,
int capacity, int length, compiler::Node* capacity,
compiler::Node* allocation_site = nullptr); compiler::Node* length,
compiler::Node* allocation_site = nullptr,
ParameterMode mode = INTEGER_PARAMETERS);
// Allocation site manipulation // Allocation site manipulation
void InitializeAllocationMemento(compiler::Node* base_allocation, void InitializeAllocationMemento(compiler::Node* base_allocation,
...@@ -254,6 +244,10 @@ class CodeStubAssembler : public compiler::CodeAssembler { ...@@ -254,6 +244,10 @@ class CodeStubAssembler : public compiler::CodeAssembler {
compiler::Node* object); compiler::Node* object);
private: private:
compiler::Node* ElementOffsetFromIndex(compiler::Node* index,
ElementsKind kind, ParameterMode mode,
int base_size = 0);
compiler::Node* AllocateRawAligned(compiler::Node* size_in_bytes, compiler::Node* AllocateRawAligned(compiler::Node* size_in_bytes,
AllocationFlags flags, AllocationFlags flags,
compiler::Node* top_address, compiler::Node* top_address,
......
...@@ -3496,8 +3496,7 @@ void LoadApiGetterStub::GenerateAssembly(CodeStubAssembler* assembler) const { ...@@ -3496,8 +3496,7 @@ void LoadApiGetterStub::GenerateAssembly(CodeStubAssembler* assembler) const {
Node* descriptors = assembler->LoadMapDescriptors(map); Node* descriptors = assembler->LoadMapDescriptors(map);
Node* offset = Node* offset =
assembler->Int32Constant(DescriptorArray::ToValueIndex(index())); assembler->Int32Constant(DescriptorArray::ToValueIndex(index()));
Node* callback = Node* callback = assembler->LoadFixedArrayElement(descriptors, offset);
assembler->LoadFixedArrayElementInt32Index(descriptors, offset);
assembler->TailCallStub(CodeFactory::ApiGetter(isolate()), context, receiver, assembler->TailCallStub(CodeFactory::ApiGetter(isolate()), context, receiver,
holder, callback); holder, callback);
} }
...@@ -3888,9 +3887,10 @@ compiler::Node* FastCloneShallowObjectStub::GenerateFastPath( ...@@ -3888,9 +3887,10 @@ compiler::Node* FastCloneShallowObjectStub::GenerateFastPath(
Node* undefined = assembler->UndefinedConstant(); Node* undefined = assembler->UndefinedConstant();
Node* literals_array = Node* literals_array =
assembler->LoadObjectField(closure, JSFunction::kLiteralsOffset); assembler->LoadObjectField(closure, JSFunction::kLiteralsOffset);
Node* allocation_site = assembler->LoadFixedArrayElementSmiIndex( Node* allocation_site = assembler->LoadFixedArrayElement(
literals_array, literals_index, literals_array, literals_index,
LiteralsArray::kFirstLiteralIndex * kPointerSize); LiteralsArray::kFirstLiteralIndex * kPointerSize,
CodeStubAssembler::SMI_PARAMETERS);
assembler->GotoIf(assembler->WordEqual(allocation_site, undefined), assembler->GotoIf(assembler->WordEqual(allocation_site, undefined),
call_runtime); call_runtime);
...@@ -4488,9 +4488,10 @@ void ArrayNoArgumentConstructorStub::GenerateAssembly( ...@@ -4488,9 +4488,10 @@ void ArrayNoArgumentConstructorStub::GenerateAssembly(
: nullptr; : nullptr;
Node* array_map = Node* array_map =
assembler->LoadJSArrayElementsMap(elements_kind(), native_context); assembler->LoadJSArrayElementsMap(elements_kind(), native_context);
Node* array = assembler->AllocateJSArray(elements_kind(), array_map, Node* array = assembler->AllocateJSArray(
JSArray::kPreallocatedArrayElements, elements_kind(), array_map,
0, allocation_site); assembler->IntPtrConstant(JSArray::kPreallocatedArrayElements),
assembler->IntPtrConstant(0), allocation_site);
assembler->Return(array); assembler->Return(array);
} }
...@@ -4501,9 +4502,10 @@ void InternalArrayNoArgumentConstructorStub::GenerateAssembly( ...@@ -4501,9 +4502,10 @@ void InternalArrayNoArgumentConstructorStub::GenerateAssembly(
assembler->Parameter( assembler->Parameter(
ArrayNoArgumentConstructorDescriptor::kFunctionIndex), ArrayNoArgumentConstructorDescriptor::kFunctionIndex),
JSFunction::kPrototypeOrInitialMapOffset); JSFunction::kPrototypeOrInitialMapOffset);
Node* array = assembler->AllocateJSArray(elements_kind(), array_map, Node* array = assembler->AllocateJSArray(
JSArray::kPreallocatedArrayElements, elements_kind(), array_map,
0, nullptr); assembler->IntPtrConstant(JSArray::kPreallocatedArrayElements),
assembler->IntPtrConstant(0), nullptr);
assembler->Return(array); assembler->Return(array);
} }
......
...@@ -10,6 +10,7 @@ ...@@ -10,6 +10,7 @@
#include "src/compiler/graph.h" #include "src/compiler/graph.h"
#include "src/compiler/instruction-selector.h" #include "src/compiler/instruction-selector.h"
#include "src/compiler/linkage.h" #include "src/compiler/linkage.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/pipeline.h" #include "src/compiler/pipeline.h"
#include "src/compiler/raw-machine-assembler.h" #include "src/compiler/raw-machine-assembler.h"
#include "src/compiler/schedule.h" #include "src/compiler/schedule.h"
...@@ -87,10 +88,14 @@ bool CodeAssembler::IsFloat64RoundTruncateSupported() const { ...@@ -87,10 +88,14 @@ bool CodeAssembler::IsFloat64RoundTruncateSupported() const {
return raw_assembler_->machine()->Float64RoundTruncate().IsSupported(); return raw_assembler_->machine()->Float64RoundTruncate().IsSupported();
} }
Node* CodeAssembler::Int32Constant(int value) { Node* CodeAssembler::Int32Constant(int32_t value) {
return raw_assembler_->Int32Constant(value); return raw_assembler_->Int32Constant(value);
} }
Node* CodeAssembler::Int64Constant(int64_t value) {
return raw_assembler_->Int64Constant(value);
}
Node* CodeAssembler::IntPtrConstant(intptr_t value) { Node* CodeAssembler::IntPtrConstant(intptr_t value) {
return raw_assembler_->IntPtrConstant(value); return raw_assembler_->IntPtrConstant(value);
} }
...@@ -123,6 +128,30 @@ Node* CodeAssembler::NaNConstant() { ...@@ -123,6 +128,30 @@ Node* CodeAssembler::NaNConstant() {
return LoadRoot(Heap::kNanValueRootIndex); return LoadRoot(Heap::kNanValueRootIndex);
} }
bool CodeAssembler::ToInt32Constant(Node* node, int32_t& out_value) {
Int64Matcher m(node);
if (m.HasValue() &&
m.IsInRange(std::numeric_limits<int32_t>::min(),
std::numeric_limits<int32_t>::max())) {
out_value = static_cast<int32_t>(m.Value());
return true;
}
return false;
}
bool CodeAssembler::ToInt64Constant(Node* node, int64_t& out_value) {
Int64Matcher m(node);
if (m.HasValue()) out_value = m.Value();
return m.HasValue();
}
bool CodeAssembler::ToIntPtrConstant(Node* node, intptr_t& out_value) {
IntPtrMatcher m(node);
if (m.HasValue()) out_value = m.Value();
return m.HasValue();
}
Node* CodeAssembler::Parameter(int value) { Node* CodeAssembler::Parameter(int value) {
return raw_assembler_->Parameter(value); return raw_assembler_->Parameter(value);
} }
......
...@@ -190,7 +190,8 @@ class CodeAssembler { ...@@ -190,7 +190,8 @@ class CodeAssembler {
// =========================================================================== // ===========================================================================
// Constants. // Constants.
Node* Int32Constant(int value); Node* Int32Constant(int32_t value);
Node* Int64Constant(int64_t value);
Node* IntPtrConstant(intptr_t value); Node* IntPtrConstant(intptr_t value);
Node* NumberConstant(double value); Node* NumberConstant(double value);
Node* SmiConstant(Smi* value); Node* SmiConstant(Smi* value);
...@@ -200,6 +201,10 @@ class CodeAssembler { ...@@ -200,6 +201,10 @@ class CodeAssembler {
Node* Float64Constant(double value); Node* Float64Constant(double value);
Node* NaNConstant(); Node* NaNConstant();
bool ToInt32Constant(Node* node, int32_t& out_value);
bool ToInt64Constant(Node* node, int64_t& out_value);
bool ToIntPtrConstant(Node* node, intptr_t& out_value);
Node* Parameter(int value); Node* Parameter(int value);
void Return(Node* value); void Return(Node* value);
......
...@@ -720,7 +720,7 @@ Node* InterpreterAssembler::ExportRegisterFile(Node* array) { ...@@ -720,7 +720,7 @@ Node* InterpreterAssembler::ExportRegisterFile(Node* array) {
Int32Sub(Int32Constant(Register(0).ToOperand()), index); Int32Sub(Int32Constant(Register(0).ToOperand()), index);
Node* value = LoadRegister(ChangeInt32ToIntPtr(reg_index)); Node* value = LoadRegister(ChangeInt32ToIntPtr(reg_index));
StoreFixedArrayElementInt32Index(array, index, value); StoreFixedArrayElement(array, index, value);
var_index.Bind(Int32Add(index, Int32Constant(1))); var_index.Bind(Int32Add(index, Int32Constant(1)));
Goto(&loop); Goto(&loop);
...@@ -750,13 +750,13 @@ Node* InterpreterAssembler::ImportRegisterFile(Node* array) { ...@@ -750,13 +750,13 @@ Node* InterpreterAssembler::ImportRegisterFile(Node* array) {
Node* condition = Int32LessThan(index, RegisterCount()); Node* condition = Int32LessThan(index, RegisterCount());
GotoUnless(condition, &done_loop); GotoUnless(condition, &done_loop);
Node* value = LoadFixedArrayElementInt32Index(array, index); Node* value = LoadFixedArrayElement(array, index);
Node* reg_index = Node* reg_index =
Int32Sub(Int32Constant(Register(0).ToOperand()), index); Int32Sub(Int32Constant(Register(0).ToOperand()), index);
StoreRegister(value, ChangeInt32ToIntPtr(reg_index)); StoreRegister(value, ChangeInt32ToIntPtr(reg_index));
StoreFixedArrayElementInt32Index(array, index, StaleRegisterConstant()); StoreFixedArrayElement(array, index, StaleRegisterConstant());
var_index.Bind(Int32Add(index, Int32Constant(1))); var_index.Bind(Int32Add(index, Int32Constant(1)));
Goto(&loop); Goto(&loop);
......
...@@ -1645,7 +1645,8 @@ void Interpreter::DoForInNext(InterpreterAssembler* assembler) { ...@@ -1645,7 +1645,8 @@ void Interpreter::DoForInNext(InterpreterAssembler* assembler) {
Node* cache_array = __ LoadRegister(cache_array_reg); Node* cache_array = __ LoadRegister(cache_array_reg);
// Load the next key from the enumeration array. // Load the next key from the enumeration array.
Node* key = __ LoadFixedArrayElementSmiIndex(cache_array, index); Node* key = __ LoadFixedArrayElement(cache_array, index, 0,
CodeStubAssembler::SMI_PARAMETERS);
// Check if we can use the for-in fast path potentially using the enum cache. // Check if we can use the for-in fast path potentially using the enum cache.
Label if_fast(assembler), if_slow(assembler, Label::kDeferred); Label if_fast(assembler), if_slow(assembler, Label::kDeferred);
...@@ -1665,8 +1666,8 @@ void Interpreter::DoForInNext(InterpreterAssembler* assembler) { ...@@ -1665,8 +1666,8 @@ void Interpreter::DoForInNext(InterpreterAssembler* assembler) {
Node* type_feedback_vector = __ LoadTypeFeedbackVector(); Node* type_feedback_vector = __ LoadTypeFeedbackVector();
Node* megamorphic_sentinel = Node* megamorphic_sentinel =
__ HeapConstant(TypeFeedbackVector::MegamorphicSentinel(isolate_)); __ HeapConstant(TypeFeedbackVector::MegamorphicSentinel(isolate_));
__ StoreFixedArrayElementNoWriteBarrier(type_feedback_vector, vector_index, __ StoreFixedArrayElement(type_feedback_vector, vector_index,
megamorphic_sentinel); megamorphic_sentinel, SKIP_WRITE_BARRIER);
// Need to filter the {key} for the {receiver}. // Need to filter the {key} for the {receiver}.
Node* context = __ GetContext(); Node* context = __ GetContext();
......
...@@ -246,8 +246,9 @@ TEST(FixedArrayAccessSmiIndex) { ...@@ -246,8 +246,9 @@ TEST(FixedArrayAccessSmiIndex) {
CodeStubAssemblerTester m(isolate, descriptor); CodeStubAssemblerTester m(isolate, descriptor);
Handle<FixedArray> array = isolate->factory()->NewFixedArray(5); Handle<FixedArray> array = isolate->factory()->NewFixedArray(5);
array->set(4, Smi::FromInt(733)); array->set(4, Smi::FromInt(733));
m.Return(m.LoadFixedArrayElementSmiIndex(m.HeapConstant(array), m.Return(m.LoadFixedArrayElement(m.HeapConstant(array),
m.SmiTag(m.Int32Constant(4)))); m.SmiTag(m.Int32Constant(4)), 0,
CodeStubAssembler::SMI_PARAMETERS));
Handle<Code> code = m.GenerateCode(); Handle<Code> code = m.GenerateCode();
FunctionTester ft(descriptor, code); FunctionTester ft(descriptor, code);
MaybeHandle<Object> result = ft.Call(); MaybeHandle<Object> result = ft.Call();
...@@ -361,6 +362,33 @@ TEST(SplitEdgeSwitchMerge) { ...@@ -361,6 +362,33 @@ TEST(SplitEdgeSwitchMerge) {
USE(m.GenerateCode()); USE(m.GenerateCode());
} }
TEST(TestToConstant) {
Isolate* isolate(CcTest::InitIsolateOnce());
VoidDescriptor descriptor(isolate);
CodeStubAssemblerTester m(isolate, descriptor);
int32_t value32;
int64_t value64;
Node* a = m.Int32Constant(5);
CHECK(m.ToInt32Constant(a, value32));
CHECK(m.ToInt64Constant(a, value64));
a = m.Int64Constant(static_cast<int64_t>(1) << 32);
CHECK(!m.ToInt32Constant(a, value32));
CHECK(m.ToInt64Constant(a, value64));
a = m.Int64Constant(13);
CHECK(m.ToInt32Constant(a, value32));
CHECK(m.ToInt64Constant(a, value64));
a = m.UndefinedConstant();
CHECK(!m.ToInt32Constant(a, value32));
CHECK(!m.ToInt64Constant(a, value64));
a = m.UndefinedConstant();
CHECK(!m.ToInt32Constant(a, value32));
CHECK(!m.ToInt64Constant(a, value64));
}
} // namespace compiler } // namespace compiler
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8
...@@ -520,9 +520,9 @@ TARGET_TEST_F(InterpreterAssemblerTest, SmiTag) { ...@@ -520,9 +520,9 @@ TARGET_TEST_F(InterpreterAssemblerTest, SmiTag) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) { TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
InterpreterAssemblerForTest m(this, bytecode); InterpreterAssemblerForTest m(this, bytecode);
Node* value = m.Int32Constant(44); Node* value = m.Int32Constant(44);
EXPECT_THAT( EXPECT_THAT(m.SmiTag(value),
m.SmiTag(value), IsIntPtrConstant(static_cast<intptr_t>(44)
IsWordShl(value, IsIntPtrConstant(kSmiShiftSize + kSmiTagSize))); << (kSmiShiftSize + kSmiTagSize)));
EXPECT_THAT( EXPECT_THAT(
m.SmiUntag(value), m.SmiUntag(value),
IsWordSar(value, IsIntPtrConstant(kSmiShiftSize + kSmiTagSize))); IsWordSar(value, IsIntPtrConstant(kSmiShiftSize + kSmiTagSize)));
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment