Commit 64daad8e authored by Yang Guo's avatar Yang Guo Committed by Commit Bot

Revert "[csa] Tweak CSA pipeline to eliminate more redundant checks"

This reverts commit a66e3e57.

Reason for revert: Likely to have caused UBSAN issues: https://ci.chromium.org/p/v8/builders/ci/V8%20Linux64%20UBSan/6671

Original change's description:
> [csa] Tweak CSA pipeline to eliminate more redundant checks
> 
> - Lower LoadObjectField to LoadFromObject
> - Mark LoadFromObject and StoreToObject as non-allocating
> - Use optimizable BitcastTaggedSignedToWord in TaggedIsNotSmi check
> 
> R=​jarin@chromium.org, tebbi@chromium.org
> 
> Change-Id: I42992d46597be795aee3702018f7efd93fcc6ebf
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1657926
> Commit-Queue: Georg Schmid <gsps@google.com>
> Reviewed-by: Tobias Tebbi <tebbi@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#62173}

TBR=jarin@chromium.org,tebbi@chromium.org,gsps@google.com

Change-Id: I0a1c0515a8a61d32f77a392f1efc0751b6aae2a1
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1660485Reviewed-by: 's avatarYang Guo <yangguo@chromium.org>
Commit-Queue: Yang Guo <yangguo@chromium.org>
Cr-Commit-Position: refs/heads/master@{#62179}
parent ab99a74c
...@@ -3017,8 +3017,3 @@ transitioning macro ToStringImpl(context: Context, o: Object): String { ...@@ -3017,8 +3017,3 @@ transitioning macro ToStringImpl(context: Context, o: Object): String {
} }
unreachable; unreachable;
} }
macro VerifiedUnreachable(): never {
StaticAssert(false);
unreachable;
}
...@@ -975,12 +975,9 @@ TNode<BoolT> CodeStubAssembler::TaggedIsSmi(TNode<MaybeObject> a) { ...@@ -975,12 +975,9 @@ TNode<BoolT> CodeStubAssembler::TaggedIsSmi(TNode<MaybeObject> a) {
} }
TNode<BoolT> CodeStubAssembler::TaggedIsNotSmi(SloppyTNode<Object> a) { TNode<BoolT> CodeStubAssembler::TaggedIsNotSmi(SloppyTNode<Object> a) {
// Although BitcastTaggedSignedToWord is generally unsafe on HeapObjects, we return WordNotEqual(
// can nonetheless use it to inspect the Smi tag. The assumption here is that WordAnd(BitcastTaggedToWord(a), IntPtrConstant(kSmiTagMask)),
// the GC will not exchange Smis for HeapObjects or vice-versa. IntPtrConstant(0));
TNode<IntPtrT> a_bitcast = BitcastTaggedSignedToWord(UncheckedCast<Smi>(a));
return WordNotEqual(WordAnd(a_bitcast, IntPtrConstant(kSmiTagMask)),
IntPtrConstant(0));
} }
TNode<BoolT> CodeStubAssembler::TaggedIsPositiveSmi(SloppyTNode<Object> a) { TNode<BoolT> CodeStubAssembler::TaggedIsPositiveSmi(SloppyTNode<Object> a) {
...@@ -1397,15 +1394,14 @@ Node* CodeStubAssembler::LoadBufferObject(Node* buffer, int offset, ...@@ -1397,15 +1394,14 @@ Node* CodeStubAssembler::LoadBufferObject(Node* buffer, int offset,
Node* CodeStubAssembler::LoadObjectField(SloppyTNode<HeapObject> object, Node* CodeStubAssembler::LoadObjectField(SloppyTNode<HeapObject> object,
int offset, MachineType type) { int offset, MachineType type) {
CSA_ASSERT(this, IsStrong(object)); CSA_ASSERT(this, IsStrong(object));
return LoadFromObject(type, object, IntPtrConstant(offset - kHeapObjectTag)); return Load(type, object, IntPtrConstant(offset - kHeapObjectTag));
} }
Node* CodeStubAssembler::LoadObjectField(SloppyTNode<HeapObject> object, Node* CodeStubAssembler::LoadObjectField(SloppyTNode<HeapObject> object,
SloppyTNode<IntPtrT> offset, SloppyTNode<IntPtrT> offset,
MachineType type) { MachineType type) {
CSA_ASSERT(this, IsStrong(object)); CSA_ASSERT(this, IsStrong(object));
return LoadFromObject(type, object, return Load(type, object, IntPtrSub(offset, IntPtrConstant(kHeapObjectTag)));
IntPtrSub(offset, IntPtrConstant(kHeapObjectTag)));
} }
TNode<IntPtrT> CodeStubAssembler::LoadAndUntagObjectField( TNode<IntPtrT> CodeStubAssembler::LoadAndUntagObjectField(
......
...@@ -41,8 +41,7 @@ LoadRepresentation LoadRepresentationOf(Operator const* op) { ...@@ -41,8 +41,7 @@ LoadRepresentation LoadRepresentationOf(Operator const* op) {
IrOpcode::kWord64AtomicLoad == op->opcode() || IrOpcode::kWord64AtomicLoad == op->opcode() ||
IrOpcode::kWord32AtomicPairLoad == op->opcode() || IrOpcode::kWord32AtomicPairLoad == op->opcode() ||
IrOpcode::kPoisonedLoad == op->opcode() || IrOpcode::kPoisonedLoad == op->opcode() ||
IrOpcode::kUnalignedLoad == op->opcode() || IrOpcode::kUnalignedLoad == op->opcode());
IrOpcode::kLoadFromObject == op->opcode());
return OpParameter<LoadRepresentation>(op); return OpParameter<LoadRepresentation>(op);
} }
......
...@@ -108,7 +108,6 @@ bool CanAllocate(const Node* node) { ...@@ -108,7 +108,6 @@ bool CanAllocate(const Node* node) {
case IrOpcode::kLoad: case IrOpcode::kLoad:
case IrOpcode::kLoadElement: case IrOpcode::kLoadElement:
case IrOpcode::kLoadField: case IrOpcode::kLoadField:
case IrOpcode::kLoadFromObject:
case IrOpcode::kPoisonedLoad: case IrOpcode::kPoisonedLoad:
case IrOpcode::kProtectedLoad: case IrOpcode::kProtectedLoad:
case IrOpcode::kProtectedStore: case IrOpcode::kProtectedStore:
...@@ -119,7 +118,6 @@ bool CanAllocate(const Node* node) { ...@@ -119,7 +118,6 @@ bool CanAllocate(const Node* node) {
case IrOpcode::kStore: case IrOpcode::kStore:
case IrOpcode::kStoreElement: case IrOpcode::kStoreElement:
case IrOpcode::kStoreField: case IrOpcode::kStoreField:
case IrOpcode::kStoreToObject:
case IrOpcode::kTaggedPoisonOnSpeculation: case IrOpcode::kTaggedPoisonOnSpeculation:
case IrOpcode::kUnalignedLoad: case IrOpcode::kUnalignedLoad:
case IrOpcode::kUnalignedStore: case IrOpcode::kUnalignedStore:
......
...@@ -530,36 +530,6 @@ TEST(TestLoadEliminationVariableNoWrite) { ...@@ -530,36 +530,6 @@ TEST(TestLoadEliminationVariableNoWrite) {
asm_tester.GenerateCode(); asm_tester.GenerateCode();
} }
TEST(TestRedundantArrayElementCheck) {
CcTest::InitializeVM();
Isolate* isolate(CcTest::i_isolate());
i::HandleScope scope(isolate);
Handle<Context> context =
Utils::OpenHandle(*v8::Isolate::GetCurrent()->GetCurrentContext());
CodeAssemblerTester asm_tester(isolate);
TestTorqueAssembler m(asm_tester.state());
{
m.Return(m.TestRedundantArrayElementCheck(
m.UncheckedCast<Context>(m.HeapConstant(context))));
}
asm_tester.GenerateCode();
}
TEST(TestRedundantSmiCheck) {
CcTest::InitializeVM();
Isolate* isolate(CcTest::i_isolate());
i::HandleScope scope(isolate);
Handle<Context> context =
Utils::OpenHandle(*v8::Isolate::GetCurrent()->GetCurrentContext());
CodeAssemblerTester asm_tester(isolate);
TestTorqueAssembler m(asm_tester.state());
{
m.Return(m.TestRedundantSmiCheck(
m.UncheckedCast<Context>(m.HeapConstant(context))));
}
asm_tester.GenerateCode();
}
} // namespace compiler } // namespace compiler
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8
...@@ -937,34 +937,4 @@ namespace test { ...@@ -937,34 +937,4 @@ namespace test {
StaticAssert(WordEqual(u1, u2)); StaticAssert(WordEqual(u1, u2));
} }
@export
macro TestRedundantArrayElementCheck(implicit context: Context)(): Smi {
const a = kEmptyFixedArray;
for (let i: Smi = 0; i < a.length; i++) {
if (a.objects[i] == Hole) {
if (a.objects[i] == Hole) {
return -1;
} else {
StaticAssert(false);
}
}
}
return 1;
}
@export
macro TestRedundantSmiCheck(implicit context: Context)(): Smi {
const a = kEmptyFixedArray;
const x = a.objects[1];
typeswitch (x) {
case (Smi): {
Cast<Smi>(x) otherwise VerifiedUnreachable();
return -1;
}
case (Object): {
}
}
return 1;
}
} }
...@@ -1119,7 +1119,6 @@ class IsStoreElementMatcher final : public TestNodeMatcher { ...@@ -1119,7 +1119,6 @@ class IsStoreElementMatcher final : public TestNodeMatcher {
LOAD_MATCHER(Load) LOAD_MATCHER(Load)
LOAD_MATCHER(UnalignedLoad) LOAD_MATCHER(UnalignedLoad)
LOAD_MATCHER(PoisonedLoad) LOAD_MATCHER(PoisonedLoad)
LOAD_MATCHER(LoadFromObject)
#define STORE_MATCHER(kStore) \ #define STORE_MATCHER(kStore) \
class Is##kStore##Matcher final : public TestNodeMatcher { \ class Is##kStore##Matcher final : public TestNodeMatcher { \
...@@ -2038,16 +2037,6 @@ Matcher<Node*> IsUnalignedLoad(const Matcher<LoadRepresentation>& rep_matcher, ...@@ -2038,16 +2037,6 @@ Matcher<Node*> IsUnalignedLoad(const Matcher<LoadRepresentation>& rep_matcher,
control_matcher)); control_matcher));
} }
Matcher<Node*> IsLoadFromObject(const Matcher<LoadRepresentation>& rep_matcher,
const Matcher<Node*>& base_matcher,
const Matcher<Node*>& index_matcher,
const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher) {
return MakeMatcher(new IsLoadFromObjectMatcher(rep_matcher, base_matcher,
index_matcher, effect_matcher,
control_matcher));
}
Matcher<Node*> IsStore(const Matcher<StoreRepresentation>& rep_matcher, Matcher<Node*> IsStore(const Matcher<StoreRepresentation>& rep_matcher,
const Matcher<Node*>& base_matcher, const Matcher<Node*>& base_matcher,
const Matcher<Node*>& index_matcher, const Matcher<Node*>& index_matcher,
......
...@@ -333,11 +333,6 @@ Matcher<Node*> IsUnalignedLoad(const Matcher<LoadRepresentation>& rep_matcher, ...@@ -333,11 +333,6 @@ Matcher<Node*> IsUnalignedLoad(const Matcher<LoadRepresentation>& rep_matcher,
const Matcher<Node*>& index_matcher, const Matcher<Node*>& index_matcher,
const Matcher<Node*>& effect_matcher, const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher); const Matcher<Node*>& control_matcher);
Matcher<Node*> IsLoadFromObject(const Matcher<LoadRepresentation>& rep_matcher,
const Matcher<Node*>& base_matcher,
const Matcher<Node*>& index_matcher,
const Matcher<Node*>& effect_matcher,
const Matcher<Node*>& control_matcher);
Matcher<Node*> IsStore(const Matcher<StoreRepresentation>& rep_matcher, Matcher<Node*> IsStore(const Matcher<StoreRepresentation>& rep_matcher,
const Matcher<Node*>& base_matcher, const Matcher<Node*>& base_matcher,
const Matcher<Node*>& index_matcher, const Matcher<Node*>& index_matcher,
......
...@@ -63,15 +63,6 @@ Matcher<Node*> InterpreterAssemblerTest::InterpreterAssemblerForTest::IsLoad( ...@@ -63,15 +63,6 @@ Matcher<Node*> InterpreterAssemblerTest::InterpreterAssemblerForTest::IsLoad(
return ::i::compiler::IsLoad(rep_matcher, base_matcher, index_matcher, _, _); return ::i::compiler::IsLoad(rep_matcher, base_matcher, index_matcher, _, _);
} }
Matcher<Node*>
InterpreterAssemblerTest::InterpreterAssemblerForTest::IsLoadFromObject(
const Matcher<c::LoadRepresentation>& rep_matcher,
const Matcher<Node*>& base_matcher, const Matcher<Node*>& index_matcher) {
CHECK_NE(PoisoningMitigationLevel::kPoisonAll, poisoning_level());
return ::i::compiler::IsLoadFromObject(rep_matcher, base_matcher,
index_matcher, _, _);
}
Matcher<Node*> InterpreterAssemblerTest::InterpreterAssemblerForTest::IsStore( Matcher<Node*> InterpreterAssemblerTest::InterpreterAssemblerForTest::IsStore(
const Matcher<c::StoreRepresentation>& rep_matcher, const Matcher<c::StoreRepresentation>& rep_matcher,
const Matcher<Node*>& base_matcher, const Matcher<Node*>& index_matcher, const Matcher<Node*>& base_matcher, const Matcher<Node*>& index_matcher,
...@@ -445,7 +436,7 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadConstantPoolEntry) { ...@@ -445,7 +436,7 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadConstantPoolEntry) {
Node* load_constant = m.LoadConstantPoolEntry(index); Node* load_constant = m.LoadConstantPoolEntry(index);
#ifdef V8_COMPRESS_POINTERS #ifdef V8_COMPRESS_POINTERS
Matcher<Node*> constant_pool_matcher = Matcher<Node*> constant_pool_matcher =
IsChangeCompressedToTagged(m.IsLoadFromObject( IsChangeCompressedToTagged(m.IsLoad(
MachineType::AnyCompressed(), MachineType::AnyCompressed(),
c::IsParameter(InterpreterDispatchDescriptor::kBytecodeArray), c::IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
c::IsIntPtrConstant(BytecodeArray::kConstantPoolOffset - c::IsIntPtrConstant(BytecodeArray::kConstantPoolOffset -
...@@ -457,7 +448,7 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadConstantPoolEntry) { ...@@ -457,7 +448,7 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadConstantPoolEntry) {
kHeapObjectTag), kHeapObjectTag),
LoadSensitivity::kCritical))); LoadSensitivity::kCritical)));
#else #else
Matcher<Node*> constant_pool_matcher = m.IsLoadFromObject( Matcher<Node*> constant_pool_matcher = m.IsLoad(
MachineType::AnyTagged(), MachineType::AnyTagged(),
c::IsParameter(InterpreterDispatchDescriptor::kBytecodeArray), c::IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
c::IsIntPtrConstant(BytecodeArray::kConstantPoolOffset - c::IsIntPtrConstant(BytecodeArray::kConstantPoolOffset -
...@@ -475,7 +466,7 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadConstantPoolEntry) { ...@@ -475,7 +466,7 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadConstantPoolEntry) {
Node* load_constant = m.LoadConstantPoolEntry(index); Node* load_constant = m.LoadConstantPoolEntry(index);
#if V8_COMPRESS_POINTERS #if V8_COMPRESS_POINTERS
Matcher<Node*> constant_pool_matcher = Matcher<Node*> constant_pool_matcher =
IsChangeCompressedToTagged(m.IsLoadFromObject( IsChangeCompressedToTagged(m.IsLoad(
MachineType::AnyCompressed(), MachineType::AnyCompressed(),
c::IsParameter(InterpreterDispatchDescriptor::kBytecodeArray), c::IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
c::IsIntPtrConstant(BytecodeArray::kConstantPoolOffset - c::IsIntPtrConstant(BytecodeArray::kConstantPoolOffset -
...@@ -489,7 +480,7 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadConstantPoolEntry) { ...@@ -489,7 +480,7 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadConstantPoolEntry) {
c::IsWordShl(index, c::IsIntPtrConstant(kTaggedSizeLog2))), c::IsWordShl(index, c::IsIntPtrConstant(kTaggedSizeLog2))),
LoadSensitivity::kCritical))); LoadSensitivity::kCritical)));
#else #else
Matcher<Node*> constant_pool_matcher = m.IsLoadFromObject( Matcher<Node*> constant_pool_matcher = m.IsLoad(
MachineType::AnyTagged(), MachineType::AnyTagged(),
c::IsParameter(InterpreterDispatchDescriptor::kBytecodeArray), c::IsParameter(InterpreterDispatchDescriptor::kBytecodeArray),
c::IsIntPtrConstant(BytecodeArray::kConstantPoolOffset - c::IsIntPtrConstant(BytecodeArray::kConstantPoolOffset -
...@@ -515,13 +506,13 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadObjectField) { ...@@ -515,13 +506,13 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadObjectField) {
int offset = 16; int offset = 16;
Node* load_field = m.LoadObjectField(object, offset); Node* load_field = m.LoadObjectField(object, offset);
#ifdef V8_COMPRESS_POINTERS #ifdef V8_COMPRESS_POINTERS
EXPECT_THAT(load_field, IsChangeCompressedToTagged(m.IsLoadFromObject( EXPECT_THAT(load_field, IsChangeCompressedToTagged(m.IsLoad(
MachineType::AnyCompressed(), object, MachineType::AnyCompressed(), object,
c::IsIntPtrConstant(offset - kHeapObjectTag)))); c::IsIntPtrConstant(offset - kHeapObjectTag))));
#else #else
EXPECT_THAT(load_field, m.IsLoadFromObject( EXPECT_THAT(load_field,
MachineType::AnyTagged(), object, m.IsLoad(MachineType::AnyTagged(), object,
c::IsIntPtrConstant(offset - kHeapObjectTag))); c::IsIntPtrConstant(offset - kHeapObjectTag)));
#endif #endif
} }
} }
...@@ -602,21 +593,21 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadFeedbackVector) { ...@@ -602,21 +593,21 @@ TARGET_TEST_F(InterpreterAssemblerTest, LoadFeedbackVector) {
kSystemPointerSize))); kSystemPointerSize)));
#ifdef V8_COMPRESS_POINTERS #ifdef V8_COMPRESS_POINTERS
Matcher<Node*> load_vector_cell_matcher = IsChangeCompressedToTagged( Matcher<Node*> load_vector_cell_matcher = IsChangeCompressedToTagged(
m.IsLoadFromObject(MachineType::AnyCompressed(), load_function_matcher, m.IsLoad(MachineType::AnyCompressed(), load_function_matcher,
c::IsIntPtrConstant(JSFunction::kFeedbackCellOffset - c::IsIntPtrConstant(JSFunction::kFeedbackCellOffset -
kHeapObjectTag))); kHeapObjectTag)));
EXPECT_THAT(load_feedback_vector, EXPECT_THAT(load_feedback_vector,
IsChangeCompressedToTagged(m.IsLoadFromObject( IsChangeCompressedToTagged(m.IsLoad(
MachineType::AnyCompressed(), load_vector_cell_matcher, MachineType::AnyCompressed(), load_vector_cell_matcher,
c::IsIntPtrConstant(Cell::kValueOffset - kHeapObjectTag)))); c::IsIntPtrConstant(Cell::kValueOffset - kHeapObjectTag))));
#else #else
Matcher<Node*> load_vector_cell_matcher = m.IsLoadFromObject( Matcher<Node*> load_vector_cell_matcher = m.IsLoad(
MachineType::AnyTagged(), load_function_matcher, MachineType::AnyTagged(), load_function_matcher,
c::IsIntPtrConstant(JSFunction::kFeedbackCellOffset - kHeapObjectTag)); c::IsIntPtrConstant(JSFunction::kFeedbackCellOffset - kHeapObjectTag));
EXPECT_THAT(load_feedback_vector, EXPECT_THAT(
m.IsLoadFromObject( load_feedback_vector,
MachineType::AnyTagged(), load_vector_cell_matcher, m.IsLoad(MachineType::AnyTagged(), load_vector_cell_matcher,
c::IsIntPtrConstant(Cell::kValueOffset - kHeapObjectTag))); c::IsIntPtrConstant(Cell::kValueOffset - kHeapObjectTag)));
#endif #endif
} }
} }
......
...@@ -44,10 +44,6 @@ class InterpreterAssemblerTest : public TestWithIsolateAndZone { ...@@ -44,10 +44,6 @@ class InterpreterAssemblerTest : public TestWithIsolateAndZone {
const Matcher<compiler::Node*>& base_matcher, const Matcher<compiler::Node*>& base_matcher,
const Matcher<compiler::Node*>& index_matcher, const Matcher<compiler::Node*>& index_matcher,
LoadSensitivity needs_poisoning = LoadSensitivity::kSafe); LoadSensitivity needs_poisoning = LoadSensitivity::kSafe);
Matcher<compiler::Node*> IsLoadFromObject(
const Matcher<compiler::LoadRepresentation>& rep_matcher,
const Matcher<compiler::Node*>& base_matcher,
const Matcher<compiler::Node*>& index_matcher);
Matcher<compiler::Node*> IsStore( Matcher<compiler::Node*> IsStore(
const Matcher<compiler::StoreRepresentation>& rep_matcher, const Matcher<compiler::StoreRepresentation>& rep_matcher,
const Matcher<compiler::Node*>& base_matcher, const Matcher<compiler::Node*>& base_matcher,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment