Commit a66e3e57 authored by Georg Schmid's avatar Georg Schmid Committed by Commit Bot

[csa] Tweak CSA pipeline to eliminate more redundant checks

- Lower LoadObjectField to LoadFromObject
- Mark LoadFromObject and StoreToObject as non-allocating
- Use optimizable BitcastTaggedSignedToWord in TaggedIsNotSmi check

R=jarin@chromium.org, tebbi@chromium.org

Change-Id: I42992d46597be795aee3702018f7efd93fcc6ebf
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1657926
Commit-Queue: Georg Schmid <gsps@google.com>
Reviewed-by: 's avatarTobias Tebbi <tebbi@chromium.org>
Cr-Commit-Position: refs/heads/master@{#62173}
parent eab992fd
......@@ -3017,3 +3017,8 @@ transitioning macro ToStringImpl(context: Context, o: Object): String {
}
unreachable;
}
macro VerifiedUnreachable(): never {
StaticAssert(false);
unreachable;
}
......@@ -975,9 +975,12 @@ TNode<BoolT> CodeStubAssembler::TaggedIsSmi(TNode<MaybeObject> a) {
}
TNode<BoolT> CodeStubAssembler::TaggedIsNotSmi(SloppyTNode<Object> a) {
return WordNotEqual(
WordAnd(BitcastTaggedToWord(a), IntPtrConstant(kSmiTagMask)),
IntPtrConstant(0));
// Although BitcastTaggedSignedToWord is generally unsafe on HeapObjects, we
// can nonetheless use it to inspect the Smi tag. The assumption here is that
// the GC will not exchange Smis for HeapObjects or vice-versa.
TNode<IntPtrT> a_bitcast = BitcastTaggedSignedToWord(UncheckedCast<Smi>(a));
return WordNotEqual(WordAnd(a_bitcast, IntPtrConstant(kSmiTagMask)),
IntPtrConstant(0));
}
TNode<BoolT> CodeStubAssembler::TaggedIsPositiveSmi(SloppyTNode<Object> a) {
......@@ -1394,14 +1397,15 @@ Node* CodeStubAssembler::LoadBufferObject(Node* buffer, int offset,
Node* CodeStubAssembler::LoadObjectField(SloppyTNode<HeapObject> object,
int offset, MachineType type) {
CSA_ASSERT(this, IsStrong(object));
return Load(type, object, IntPtrConstant(offset - kHeapObjectTag));
return LoadFromObject(type, object, IntPtrConstant(offset - kHeapObjectTag));
}
Node* CodeStubAssembler::LoadObjectField(SloppyTNode<HeapObject> object,
SloppyTNode<IntPtrT> offset,
MachineType type) {
CSA_ASSERT(this, IsStrong(object));
return Load(type, object, IntPtrSub(offset, IntPtrConstant(kHeapObjectTag)));
return LoadFromObject(type, object,
IntPtrSub(offset, IntPtrConstant(kHeapObjectTag)));
}
TNode<IntPtrT> CodeStubAssembler::LoadAndUntagObjectField(
......
......@@ -41,7 +41,8 @@ LoadRepresentation LoadRepresentationOf(Operator const* op) {
IrOpcode::kWord64AtomicLoad == op->opcode() ||
IrOpcode::kWord32AtomicPairLoad == op->opcode() ||
IrOpcode::kPoisonedLoad == op->opcode() ||
IrOpcode::kUnalignedLoad == op->opcode());
IrOpcode::kUnalignedLoad == op->opcode() ||
IrOpcode::kLoadFromObject == op->opcode());
return OpParameter<LoadRepresentation>(op);
}
......
......@@ -108,6 +108,7 @@ bool CanAllocate(const Node* node) {
case IrOpcode::kLoad:
case IrOpcode::kLoadElement:
case IrOpcode::kLoadField:
case IrOpcode::kLoadFromObject:
case IrOpcode::kPoisonedLoad:
case IrOpcode::kProtectedLoad:
case IrOpcode::kProtectedStore:
......@@ -118,6 +119,7 @@ bool CanAllocate(const Node* node) {
case IrOpcode::kStore:
case IrOpcode::kStoreElement:
case IrOpcode::kStoreField:
case IrOpcode::kStoreToObject:
case IrOpcode::kTaggedPoisonOnSpeculation:
case IrOpcode::kUnalignedLoad:
case IrOpcode::kUnalignedStore:
......
......@@ -530,6 +530,36 @@ TEST(TestLoadEliminationVariableNoWrite) {
asm_tester.GenerateCode();
}
TEST(TestRedundantArrayElementCheck) {
CcTest::InitializeVM();
Isolate* isolate(CcTest::i_isolate());
i::HandleScope scope(isolate);
Handle<Context> context =
Utils::OpenHandle(*v8::Isolate::GetCurrent()->GetCurrentContext());
CodeAssemblerTester asm_tester(isolate);
TestTorqueAssembler m(asm_tester.state());
{
m.Return(m.TestRedundantArrayElementCheck(
m.UncheckedCast<Context>(m.HeapConstant(context))));
}
asm_tester.GenerateCode();
}
TEST(TestRedundantSmiCheck) {
CcTest::InitializeVM();
Isolate* isolate(CcTest::i_isolate());
i::HandleScope scope(isolate);
Handle<Context> context =
Utils::OpenHandle(*v8::Isolate::GetCurrent()->GetCurrentContext());
CodeAssemblerTester asm_tester(isolate);
TestTorqueAssembler m(asm_tester.state());
{
m.Return(m.TestRedundantSmiCheck(
m.UncheckedCast<Context>(m.HeapConstant(context))));
}
asm_tester.GenerateCode();
}
} // namespace compiler
} // namespace internal
} // namespace v8
......@@ -937,4 +937,34 @@ namespace test {
StaticAssert(WordEqual(u1, u2));
}
@export
macro TestRedundantArrayElementCheck(implicit context: Context)(): Smi {
const a = kEmptyFixedArray;
for (let i: Smi = 0; i < a.length; i++) {
if (a.objects[i] == Hole) {
if (a.objects[i] == Hole) {
return -1;
} else {
StaticAssert(false);
}
}
}
return 1;
}
@export
macro TestRedundantSmiCheck(implicit context: Context)(): Smi {
const a = kEmptyFixedArray;
const x = a.objects[1];
typeswitch (x) {
case (Smi): {
Cast<Smi>(x) otherwise VerifiedUnreachable();
return -1;
}
case (Object): {
}
}
return 1;
}
}
......@@ -1119,6 +1119,7 @@ class IsStoreElementMatcher final : public TestNodeMatcher {
LOAD_MATCHER(Load)
LOAD_MATCHER(UnalignedLoad)
LOAD_MATCHER(PoisonedLoad)
LOAD_MATCHER(LoadFromObject)
#define STORE_MATCHER(kStore) \
class Is##kStore##Matcher final : public TestNodeMatcher { \
......@@ -2037,6 +2038,16 @@ Matcher<Node*> IsUnalignedLoad(const Matcher<LoadRepresentation>& rep_matcher,
control_matcher));
}
Matcher<Node*> IsLoadFromObject(const Matcher<LoadRepresentation>& rep_matcher,
const Matcher<Node*>& base_matcher,
const Matcher<Node*>& index_matcher,
const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher) {
return MakeMatcher(new IsLoadFromObjectMatcher(rep_matcher, base_matcher,
index_matcher, effect_matcher,
control_matcher));
}
Matcher<Node*> IsStore(const Matcher<StoreRepresentation>& rep_matcher,
const Matcher<Node*>& base_matcher,
const Matcher<Node*>& index_matcher,
......
......@@ -333,6 +333,11 @@ Matcher<Node*> IsUnalignedLoad(const Matcher<LoadRepresentation>& rep_matcher,
const Matcher<Node*>& index_matcher,
const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher);
Matcher<Node*> IsLoadFromObject(const Matcher<LoadRepresentation>& rep_matcher,
const Matcher<Node*>& base_matcher,
const Matcher<Node*>& index_matcher,
const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher);
Matcher<Node*> IsStore(const Matcher<StoreRepresentation>& rep_matcher,
const Matcher<Node*>& base_matcher,
const Matcher<Node*>& index_matcher,
......
......@@ -63,6 +63,15 @@ Matcher<Node*> InterpreterAssemblerTest::InterpreterAssemblerForTest::IsLoad(
return ::i::compiler::IsLoad(rep_matcher, base_matcher, index_matcher, _, _);
}
Matcher<Node*>
InterpreterAssemblerTest::InterpreterAssemblerForTest::IsLoadFromObject(
const Matcher<c::LoadRepresentation>& rep_matcher,
const Matcher<Node*>& base_matcher, const Matcher<Node*>& index_matcher) {
CHECK_NE(PoisoningMitigationLevel::kPoisonAll, poisoning_level());
return ::i::compiler::IsLoadFromObject(rep_matcher, base_matcher,
index_matcher, _, _);
}
Matcher<Node*> InterpreterAssemblerTest::InterpreterAssemblerForTest::IsStore(
const Matcher<c::StoreRepresentation>& rep_matcher,
const Matcher<Node*>& base_matcher, const Matcher<Node*>& index_matcher,
......@@ -436,7 +445,7 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadConstantPoolEntry) {
Node* load_constant = m.LoadConstantPoolEntry(index);
#ifdef V8_COMPRESS_POINTERS
Matcher<Node*> constant_pool_matcher =
IsChangeCompressedToTagged(m.IsLoad(
IsChangeCompressedToTagged(m.IsLoadFromObject(
MachineType::AnyCompressed(),
c::IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
c::IsIntPtrConstant(BytecodeArray::kConstantPoolOffset -
......@@ -448,7 +457,7 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadConstantPoolEntry) {
kHeapObjectTag),
LoadSensitivity::kCritical)));
#else
Matcher<Node*> constant_pool_matcher = m.IsLoad(
Matcher<Node*> constant_pool_matcher = m.IsLoadFromObject(
MachineType::AnyTagged(),
c::IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
c::IsIntPtrConstant(BytecodeArray::kConstantPoolOffset -
......@@ -466,7 +475,7 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadConstantPoolEntry) {
Node* load_constant = m.LoadConstantPoolEntry(index);
#if V8_COMPRESS_POINTERS
Matcher<Node*> constant_pool_matcher =
IsChangeCompressedToTagged(m.IsLoad(
IsChangeCompressedToTagged(m.IsLoadFromObject(
MachineType::AnyCompressed(),
c::IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
c::IsIntPtrConstant(BytecodeArray::kConstantPoolOffset -
......@@ -480,7 +489,7 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadConstantPoolEntry) {
c::IsWordShl(index, c::IsIntPtrConstant(kTaggedSizeLog2))),
LoadSensitivity::kCritical)));
#else
Matcher<Node*> constant_pool_matcher = m.IsLoad(
Matcher<Node*> constant_pool_matcher = m.IsLoadFromObject(
MachineType::AnyTagged(),
c::IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
c::IsIntPtrConstant(BytecodeArray::kConstantPoolOffset -
......@@ -506,13 +515,13 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadObjectField) {
int offset = 16;
Node* load_field = m.LoadObjectField(object, offset);
#ifdef V8_COMPRESS_POINTERS
EXPECT_THAT(load_field, IsChangeCompressedToTagged(m.IsLoad(
EXPECT_THAT(load_field, IsChangeCompressedToTagged(m.IsLoadFromObject(
MachineType::AnyCompressed(), object,
c::IsIntPtrConstant(offset - kHeapObjectTag))));
#else
EXPECT_THAT(load_field,
m.IsLoad(MachineType::AnyTagged(), object,
c::IsIntPtrConstant(offset - kHeapObjectTag)));
EXPECT_THAT(load_field, m.IsLoadFromObject(
MachineType::AnyTagged(), object,
c::IsIntPtrConstant(offset - kHeapObjectTag)));
#endif
}
}
......@@ -593,21 +602,21 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadFeedbackVector) {
kSystemPointerSize)));
#ifdef V8_COMPRESS_POINTERS
Matcher<Node*> load_vector_cell_matcher = IsChangeCompressedToTagged(
m.IsLoad(MachineType::AnyCompressed(), load_function_matcher,
c::IsIntPtrConstant(JSFunction::kFeedbackCellOffset -
kHeapObjectTag)));
m.IsLoadFromObject(MachineType::AnyCompressed(), load_function_matcher,
c::IsIntPtrConstant(JSFunction::kFeedbackCellOffset -
kHeapObjectTag)));
EXPECT_THAT(load_feedback_vector,
IsChangeCompressedToTagged(m.IsLoad(
IsChangeCompressedToTagged(m.IsLoadFromObject(
MachineType::AnyCompressed(), load_vector_cell_matcher,
c::IsIntPtrConstant(Cell::kValueOffset - kHeapObjectTag))));
#else
Matcher<Node*> load_vector_cell_matcher = m.IsLoad(
Matcher<Node*> load_vector_cell_matcher = m.IsLoadFromObject(
MachineType::AnyTagged(), load_function_matcher,
c::IsIntPtrConstant(JSFunction::kFeedbackCellOffset - kHeapObjectTag));
EXPECT_THAT(
load_feedback_vector,
m.IsLoad(MachineType::AnyTagged(), load_vector_cell_matcher,
c::IsIntPtrConstant(Cell::kValueOffset - kHeapObjectTag)));
EXPECT_THAT(load_feedback_vector,
m.IsLoadFromObject(
MachineType::AnyTagged(), load_vector_cell_matcher,
c::IsIntPtrConstant(Cell::kValueOffset - kHeapObjectTag)));
#endif
}
}
......
......@@ -44,6 +44,10 @@ class InterpreterAssemblerTest : public TestWithIsolateAndZone {
const Matcher<compiler::Node*>& base_matcher,
const Matcher<compiler::Node*>& index_matcher,
LoadSensitivity needs_poisoning = LoadSensitivity::kSafe);
Matcher<compiler::Node*> IsLoadFromObject(
const Matcher<compiler::LoadRepresentation>& rep_matcher,
const Matcher<compiler::Node*>& base_matcher,
const Matcher<compiler::Node*>& index_matcher);
Matcher<compiler::Node*> IsStore(
const Matcher<compiler::StoreRepresentation>& rep_matcher,
const Matcher<compiler::Node*>& base_matcher,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment