Commit cbdb3738 authored by danno's avatar danno Committed by Commit bot

[turbofan] Add FixedArray peephole optimizations to CodeStubAssembler

Previously, CodeStubAssembler macros performing FixedArray element accesses had
to compute offsets to elements explicitly with a fair amount of duplicated
code. Furthermore, any peephole optimizations that could produce better code--
like recognizing constant indices or combining array index computation with Smi
untagging--were also duplicated.

This change factors the code to compute FixedArray index offsets into a common
routine in the CodeStubAssembler that applies standard peephole optimizations to
all accesses. In order to do this, it also introduces limited introspection into
the up-until-now opaque Node* type exported from code-assembler.h, allowing
Nodes to be queried whether they are constant and extracting their constant
value in that case.

Review-Url: https://codereview.chromium.org/1989363004
Cr-Commit-Position: refs/heads/master@{#36370}
parent ad7939e7
This diff is collapsed.
......@@ -32,6 +32,8 @@ class CodeStubAssembler : public compiler::CodeAssembler {
CodeStubAssembler(Isolate* isolate, Zone* zone, int parameter_count,
Code::Flags flags, const char* name);
enum ParameterMode { INTEGER_PARAMETERS, SMI_PARAMETERS };
compiler::Node* BooleanMapConstant();
compiler::Node* EmptyStringConstant();
compiler::Node* HeapNumberMapConstant();
......@@ -134,14 +136,10 @@ class CodeStubAssembler : public compiler::CodeAssembler {
compiler::Node* AllocateUninitializedFixedArray(compiler::Node* length);
// Load an array element from a FixedArray.
compiler::Node* LoadFixedArrayElementInt32Index(compiler::Node* object,
compiler::Node* int32_index,
int additional_offset = 0);
compiler::Node* LoadFixedArrayElementSmiIndex(compiler::Node* object,
compiler::Node* smi_index,
int additional_offset = 0);
compiler::Node* LoadFixedArrayElementConstantIndex(compiler::Node* object,
int index);
compiler::Node* LoadFixedArrayElement(
compiler::Node* object, compiler::Node* int32_index,
int additional_offset = 0,
ParameterMode parameter_mode = INTEGER_PARAMETERS);
// Context manipulation
compiler::Node* LoadNativeContext(compiler::Node* context);
......@@ -162,24 +160,14 @@ class CodeStubAssembler : public compiler::CodeAssembler {
compiler::Node* StoreMapNoWriteBarrier(compiler::Node* object,
compiler::Node* map);
// Store an array element to a FixedArray.
compiler::Node* StoreFixedArrayElementInt32Index(compiler::Node* object,
compiler::Node* index,
compiler::Node* value);
compiler::Node* StoreFixedArrayElementNoWriteBarrier(compiler::Node* object,
compiler::Node* index,
compiler::Node* value);
compiler::Node* StoreFixedDoubleArrayElementInt32Index(compiler::Node* object,
compiler::Node* index,
compiler::Node* value);
compiler::Node* StoreFixedArrayElementInt32Index(compiler::Node* object,
int index,
compiler::Node* value);
compiler::Node* StoreFixedArrayElementNoWriteBarrier(compiler::Node* object,
int index,
compiler::Node* value);
compiler::Node* StoreFixedDoubleArrayElementInt32Index(compiler::Node* object,
int index,
compiler::Node* value);
compiler::Node* StoreFixedArrayElement(
compiler::Node* object, compiler::Node* index, compiler::Node* value,
WriteBarrierMode barrier_mode = UPDATE_WRITE_BARRIER,
ParameterMode parameter_mode = INTEGER_PARAMETERS);
compiler::Node* StoreFixedDoubleArrayElement(
compiler::Node* object, compiler::Node* index, compiler::Node* value,
ParameterMode parameter_mode = INTEGER_PARAMETERS);
// Allocate a HeapNumber without initializing its value.
compiler::Node* AllocateHeapNumber();
......@@ -191,8 +179,10 @@ class CodeStubAssembler : public compiler::CodeAssembler {
compiler::Node* AllocateSeqTwoByteString(int length);
// Allocated an JSArray
compiler::Node* AllocateJSArray(ElementsKind kind, compiler::Node* array_map,
int capacity, int length,
compiler::Node* allocation_site = nullptr);
compiler::Node* capacity,
compiler::Node* length,
compiler::Node* allocation_site = nullptr,
ParameterMode mode = INTEGER_PARAMETERS);
// Allocation site manipulation
void InitializeAllocationMemento(compiler::Node* base_allocation,
......@@ -254,6 +244,10 @@ class CodeStubAssembler : public compiler::CodeAssembler {
compiler::Node* object);
private:
compiler::Node* ElementOffsetFromIndex(compiler::Node* index,
ElementsKind kind, ParameterMode mode,
int base_size = 0);
compiler::Node* AllocateRawAligned(compiler::Node* size_in_bytes,
AllocationFlags flags,
compiler::Node* top_address,
......
......@@ -3496,8 +3496,7 @@ void LoadApiGetterStub::GenerateAssembly(CodeStubAssembler* assembler) const {
Node* descriptors = assembler->LoadMapDescriptors(map);
Node* offset =
assembler->Int32Constant(DescriptorArray::ToValueIndex(index()));
Node* callback =
assembler->LoadFixedArrayElementInt32Index(descriptors, offset);
Node* callback = assembler->LoadFixedArrayElement(descriptors, offset);
assembler->TailCallStub(CodeFactory::ApiGetter(isolate()), context, receiver,
holder, callback);
}
......@@ -3888,9 +3887,10 @@ compiler::Node* FastCloneShallowObjectStub::GenerateFastPath(
Node* undefined = assembler->UndefinedConstant();
Node* literals_array =
assembler->LoadObjectField(closure, JSFunction::kLiteralsOffset);
Node* allocation_site = assembler->LoadFixedArrayElementSmiIndex(
Node* allocation_site = assembler->LoadFixedArrayElement(
literals_array, literals_index,
LiteralsArray::kFirstLiteralIndex * kPointerSize);
LiteralsArray::kFirstLiteralIndex * kPointerSize,
CodeStubAssembler::SMI_PARAMETERS);
assembler->GotoIf(assembler->WordEqual(allocation_site, undefined),
call_runtime);
......@@ -4488,9 +4488,10 @@ void ArrayNoArgumentConstructorStub::GenerateAssembly(
: nullptr;
Node* array_map =
assembler->LoadJSArrayElementsMap(elements_kind(), native_context);
Node* array = assembler->AllocateJSArray(elements_kind(), array_map,
JSArray::kPreallocatedArrayElements,
0, allocation_site);
Node* array = assembler->AllocateJSArray(
elements_kind(), array_map,
assembler->IntPtrConstant(JSArray::kPreallocatedArrayElements),
assembler->IntPtrConstant(0), allocation_site);
assembler->Return(array);
}
......@@ -4501,9 +4502,10 @@ void InternalArrayNoArgumentConstructorStub::GenerateAssembly(
assembler->Parameter(
ArrayNoArgumentConstructorDescriptor::kFunctionIndex),
JSFunction::kPrototypeOrInitialMapOffset);
Node* array = assembler->AllocateJSArray(elements_kind(), array_map,
JSArray::kPreallocatedArrayElements,
0, nullptr);
Node* array = assembler->AllocateJSArray(
elements_kind(), array_map,
assembler->IntPtrConstant(JSArray::kPreallocatedArrayElements),
assembler->IntPtrConstant(0), nullptr);
assembler->Return(array);
}
......
......@@ -10,6 +10,7 @@
#include "src/compiler/graph.h"
#include "src/compiler/instruction-selector.h"
#include "src/compiler/linkage.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/pipeline.h"
#include "src/compiler/raw-machine-assembler.h"
#include "src/compiler/schedule.h"
......@@ -87,10 +88,14 @@ bool CodeAssembler::IsFloat64RoundTruncateSupported() const {
return raw_assembler_->machine()->Float64RoundTruncate().IsSupported();
}
Node* CodeAssembler::Int32Constant(int value) {
Node* CodeAssembler::Int32Constant(int32_t value) {
return raw_assembler_->Int32Constant(value);
}
Node* CodeAssembler::Int64Constant(int64_t value) {
return raw_assembler_->Int64Constant(value);
}
Node* CodeAssembler::IntPtrConstant(intptr_t value) {
return raw_assembler_->IntPtrConstant(value);
}
......@@ -123,6 +128,30 @@ Node* CodeAssembler::NaNConstant() {
return LoadRoot(Heap::kNanValueRootIndex);
}
bool CodeAssembler::ToInt32Constant(Node* node, int32_t& out_value) {
Int64Matcher m(node);
if (m.HasValue() &&
m.IsInRange(std::numeric_limits<int32_t>::min(),
std::numeric_limits<int32_t>::max())) {
out_value = static_cast<int32_t>(m.Value());
return true;
}
return false;
}
bool CodeAssembler::ToInt64Constant(Node* node, int64_t& out_value) {
Int64Matcher m(node);
if (m.HasValue()) out_value = m.Value();
return m.HasValue();
}
bool CodeAssembler::ToIntPtrConstant(Node* node, intptr_t& out_value) {
IntPtrMatcher m(node);
if (m.HasValue()) out_value = m.Value();
return m.HasValue();
}
Node* CodeAssembler::Parameter(int value) {
return raw_assembler_->Parameter(value);
}
......
......@@ -190,7 +190,8 @@ class CodeAssembler {
// ===========================================================================
// Constants.
Node* Int32Constant(int value);
Node* Int32Constant(int32_t value);
Node* Int64Constant(int64_t value);
Node* IntPtrConstant(intptr_t value);
Node* NumberConstant(double value);
Node* SmiConstant(Smi* value);
......@@ -200,6 +201,10 @@ class CodeAssembler {
Node* Float64Constant(double value);
Node* NaNConstant();
bool ToInt32Constant(Node* node, int32_t& out_value);
bool ToInt64Constant(Node* node, int64_t& out_value);
bool ToIntPtrConstant(Node* node, intptr_t& out_value);
Node* Parameter(int value);
void Return(Node* value);
......
......@@ -720,7 +720,7 @@ Node* InterpreterAssembler::ExportRegisterFile(Node* array) {
Int32Sub(Int32Constant(Register(0).ToOperand()), index);
Node* value = LoadRegister(ChangeInt32ToIntPtr(reg_index));
StoreFixedArrayElementInt32Index(array, index, value);
StoreFixedArrayElement(array, index, value);
var_index.Bind(Int32Add(index, Int32Constant(1)));
Goto(&loop);
......@@ -750,13 +750,13 @@ Node* InterpreterAssembler::ImportRegisterFile(Node* array) {
Node* condition = Int32LessThan(index, RegisterCount());
GotoUnless(condition, &done_loop);
Node* value = LoadFixedArrayElementInt32Index(array, index);
Node* value = LoadFixedArrayElement(array, index);
Node* reg_index =
Int32Sub(Int32Constant(Register(0).ToOperand()), index);
StoreRegister(value, ChangeInt32ToIntPtr(reg_index));
StoreFixedArrayElementInt32Index(array, index, StaleRegisterConstant());
StoreFixedArrayElement(array, index, StaleRegisterConstant());
var_index.Bind(Int32Add(index, Int32Constant(1)));
Goto(&loop);
......
......@@ -1645,7 +1645,8 @@ void Interpreter::DoForInNext(InterpreterAssembler* assembler) {
Node* cache_array = __ LoadRegister(cache_array_reg);
// Load the next key from the enumeration array.
Node* key = __ LoadFixedArrayElementSmiIndex(cache_array, index);
Node* key = __ LoadFixedArrayElement(cache_array, index, 0,
CodeStubAssembler::SMI_PARAMETERS);
// Check if we can use the for-in fast path potentially using the enum cache.
Label if_fast(assembler), if_slow(assembler, Label::kDeferred);
......@@ -1665,8 +1666,8 @@ void Interpreter::DoForInNext(InterpreterAssembler* assembler) {
Node* type_feedback_vector = __ LoadTypeFeedbackVector();
Node* megamorphic_sentinel =
__ HeapConstant(TypeFeedbackVector::MegamorphicSentinel(isolate_));
__ StoreFixedArrayElementNoWriteBarrier(type_feedback_vector, vector_index,
megamorphic_sentinel);
__ StoreFixedArrayElement(type_feedback_vector, vector_index,
megamorphic_sentinel, SKIP_WRITE_BARRIER);
// Need to filter the {key} for the {receiver}.
Node* context = __ GetContext();
......
......@@ -246,8 +246,9 @@ TEST(FixedArrayAccessSmiIndex) {
CodeStubAssemblerTester m(isolate, descriptor);
Handle<FixedArray> array = isolate->factory()->NewFixedArray(5);
array->set(4, Smi::FromInt(733));
m.Return(m.LoadFixedArrayElementSmiIndex(m.HeapConstant(array),
m.SmiTag(m.Int32Constant(4))));
m.Return(m.LoadFixedArrayElement(m.HeapConstant(array),
m.SmiTag(m.Int32Constant(4)), 0,
CodeStubAssembler::SMI_PARAMETERS));
Handle<Code> code = m.GenerateCode();
FunctionTester ft(descriptor, code);
MaybeHandle<Object> result = ft.Call();
......@@ -361,6 +362,33 @@ TEST(SplitEdgeSwitchMerge) {
USE(m.GenerateCode());
}
TEST(TestToConstant) {
Isolate* isolate(CcTest::InitIsolateOnce());
VoidDescriptor descriptor(isolate);
CodeStubAssemblerTester m(isolate, descriptor);
int32_t value32;
int64_t value64;
Node* a = m.Int32Constant(5);
CHECK(m.ToInt32Constant(a, value32));
CHECK(m.ToInt64Constant(a, value64));
a = m.Int64Constant(static_cast<int64_t>(1) << 32);
CHECK(!m.ToInt32Constant(a, value32));
CHECK(m.ToInt64Constant(a, value64));
a = m.Int64Constant(13);
CHECK(m.ToInt32Constant(a, value32));
CHECK(m.ToInt64Constant(a, value64));
a = m.UndefinedConstant();
CHECK(!m.ToInt32Constant(a, value32));
CHECK(!m.ToInt64Constant(a, value64));
a = m.UndefinedConstant();
CHECK(!m.ToInt32Constant(a, value32));
CHECK(!m.ToInt64Constant(a, value64));
}
} // namespace compiler
} // namespace internal
} // namespace v8
......@@ -520,9 +520,9 @@ TARGET_TEST_F(InterpreterAssemblerTest, SmiTag) {
TRACED_FOREACH(interpreter::Bytecode, bytecode, kBytecodes) {
InterpreterAssemblerForTest m(this, bytecode);
Node* value = m.Int32Constant(44);
EXPECT_THAT(
m.SmiTag(value),
IsWordShl(value, IsIntPtrConstant(kSmiShiftSize + kSmiTagSize)));
EXPECT_THAT(m.SmiTag(value),
IsIntPtrConstant(static_cast<intptr_t>(44)
<< (kSmiShiftSize + kSmiTagSize)));
EXPECT_THAT(
m.SmiUntag(value),
IsWordSar(value, IsIntPtrConstant(kSmiShiftSize + kSmiTagSize)));
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment