Commit d59c308a authored by Tobias Tebbi's avatar Tobias Tebbi Committed by Commit Bot

[csa] use Turbofan's AllocateRaw node and the MemoryOptimizer

A first step towards using Turbofan's MemoryOptimizer for CSA/Torque.

Change-Id: I2ce9eaa602ea6f19a679e1cf5516c309a5a5051b
Reviewed-on: https://chromium-review.googlesource.com/c/1406675Reviewed-by: 's avatarJakob Gruber <jgruber@chromium.org>
Reviewed-by: 's avatarJaroslav Sevcik <jarin@chromium.org>
Commit-Queue: Tobias Tebbi <tebbi@chromium.org>
Cr-Commit-Position: refs/heads/master@{#58830}
parent 5f8a3e1e
......@@ -1216,6 +1216,11 @@ TNode<HeapObject> CodeStubAssembler::Allocate(TNode<IntPtrT> size_in_bytes,
AllocationFlags flags) {
Comment("Allocate");
bool const new_space = !(flags & kPretenured);
if (!(flags & kDoubleAlignment) && !(flags & kAllowLargeObjectAllocation)) {
return OptimizedAllocate(size_in_bytes, new_space
? PretenureFlag::NOT_TENURED
: PretenureFlag::TENURED);
}
TNode<ExternalReference> top_address = ExternalConstant(
new_space
? ExternalReference::new_space_allocation_top_address(isolate())
......
......@@ -1072,6 +1072,12 @@ void CodeAssembler::GotoIfException(Node* node, Label* if_exception,
raw_assembler()->AddNode(raw_assembler()->common()->IfSuccess(), node);
}
TNode<HeapObject> CodeAssembler::OptimizedAllocate(TNode<IntPtrT> size,
PretenureFlag pretenure) {
return UncheckedCast<HeapObject>(
raw_assembler()->OptimizedAllocate(size, pretenure));
}
void CodeAssembler::HandleException(Node* node) {
if (state_->exception_handler_labels_.size() == 0) return;
CodeAssemblerExceptionHandlerLabel* label =
......
......@@ -1342,6 +1342,9 @@ class V8_EXPORT_PRIVATE CodeAssembler {
void GotoIfException(Node* node, Label* if_exception,
Variable* exception_var = nullptr);
TNode<HeapObject> OptimizedAllocate(TNode<IntPtrT> size,
PretenureFlag pretenure);
// Helpers which delegate to RawMachineAssembler.
Factory* factory() const;
Isolate* isolate() const;
......
......@@ -206,6 +206,12 @@ Node* GraphAssembler::BitcastWordToTagged(Node* value) {
current_effect_, current_control_);
}
Node* GraphAssembler::BitcastTaggedToWord(Node* value) {
return current_effect_ =
graph()->NewNode(machine()->BitcastTaggedToWord(), value,
current_effect_, current_control_);
}
Node* GraphAssembler::Word32PoisonOnSpeculation(Node* value) {
return current_effect_ =
graph()->NewNode(machine()->Word32PoisonOnSpeculation(), value,
......
......@@ -217,6 +217,7 @@ class GraphAssembler {
Node* ToNumber(Node* value);
Node* BitcastWordToTagged(Node* value);
Node* BitcastTaggedToWord(Node* value);
Node* Allocate(PretenureFlag pretenure, Node* size);
Node* LoadField(FieldAccess const&, Node* object);
Node* LoadElement(ElementAccess const&, Node* object, Node* index);
......
......@@ -97,21 +97,55 @@ void MemoryOptimizer::VisitNode(Node* node, AllocationState const* state) {
return VisitStoreElement(node, state);
case IrOpcode::kStoreField:
return VisitStoreField(node, state);
case IrOpcode::kBitcastTaggedToWord:
case IrOpcode::kBitcastWordToTagged:
case IrOpcode::kComment:
case IrOpcode::kDebugAbort:
case IrOpcode::kDebugBreak:
case IrOpcode::kDeoptimizeIf:
case IrOpcode::kDeoptimizeUnless:
case IrOpcode::kIfException:
case IrOpcode::kLoad:
case IrOpcode::kPoisonedLoad:
case IrOpcode::kProtectedLoad:
case IrOpcode::kUnalignedLoad:
case IrOpcode::kStore:
case IrOpcode::kProtectedStore:
case IrOpcode::kUnalignedStore:
case IrOpcode::kRetain:
case IrOpcode::kStore:
case IrOpcode::kTaggedPoisonOnSpeculation:
case IrOpcode::kUnalignedLoad:
case IrOpcode::kUnalignedStore:
case IrOpcode::kUnsafePointerAdd:
case IrOpcode::kDebugBreak:
case IrOpcode::kUnreachable:
case IrOpcode::kWord32AtomicAdd:
case IrOpcode::kWord32AtomicAnd:
case IrOpcode::kWord32AtomicCompareExchange:
case IrOpcode::kWord32AtomicExchange:
case IrOpcode::kWord32AtomicLoad:
case IrOpcode::kWord32AtomicOr:
case IrOpcode::kWord32AtomicPairAdd:
case IrOpcode::kWord32AtomicPairAnd:
case IrOpcode::kWord32AtomicPairCompareExchange:
case IrOpcode::kWord32AtomicPairExchange:
case IrOpcode::kWord32AtomicPairLoad:
case IrOpcode::kWord32AtomicPairOr:
case IrOpcode::kWord32AtomicPairStore:
case IrOpcode::kWord32AtomicPairSub:
case IrOpcode::kWord32AtomicPairXor:
case IrOpcode::kWord32AtomicStore:
case IrOpcode::kWord32AtomicSub:
case IrOpcode::kWord32AtomicXor:
case IrOpcode::kWord32PoisonOnSpeculation:
case IrOpcode::kWord64AtomicAdd:
case IrOpcode::kWord64AtomicAnd:
case IrOpcode::kWord64AtomicCompareExchange:
case IrOpcode::kWord64AtomicExchange:
case IrOpcode::kWord64AtomicLoad:
case IrOpcode::kWord64AtomicOr:
case IrOpcode::kWord64AtomicStore:
case IrOpcode::kWord64AtomicSub:
case IrOpcode::kWord64AtomicXor:
case IrOpcode::kWord64PoisonOnSpeculation:
// These operations cannot trigger GC.
return VisitOtherEffect(node, state);
default:
break;
......@@ -250,7 +284,8 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
CallDescriptor::kCanUseRoots, Operator::kNoThrow);
allocate_operator_.set(common()->Call(call_descriptor));
}
Node* vfalse = __ Call(allocate_operator_.get(), target, size);
Node* vfalse = __ BitcastTaggedToWord(
__ Call(allocate_operator_.get(), target, size));
vfalse = __ IntSub(vfalse, __ IntPtrConstant(kHeapObjectTag));
__ Goto(&done, vfalse);
}
......
......@@ -2127,6 +2127,10 @@ MaybeHandle<Code> Pipeline::GenerateCodeForCodeStub(
pipeline.Run<PrintGraphPhase>("Machine");
}
// Optimize memory access and allocation operations.
pipeline.Run<MemoryOptimizationPhase>();
pipeline.RunPrintAndVerify(MemoryOptimizationPhase::phase_name(), true);
pipeline.Run<CsaOptimizationPhase>();
pipeline.RunPrintAndVerify(CsaOptimizationPhase::phase_name(), true);
......
......@@ -23,6 +23,7 @@ RawMachineAssembler::RawMachineAssembler(
schedule_(new (zone()) Schedule(zone())),
machine_(zone(), word, flags, alignment_requirements),
common_(zone()),
simplified_(zone()),
call_descriptor_(call_descriptor),
target_parameter_(nullptr),
parameters_(parameter_count(), zone()),
......@@ -57,6 +58,11 @@ Node* RawMachineAssembler::RelocatableIntPtrConstant(intptr_t value,
: RelocatableInt32Constant(static_cast<int>(value), rmode);
}
Node* RawMachineAssembler::OptimizedAllocate(Node* size,
PretenureFlag pretenure) {
return AddNode(simplified()->AllocateRaw(Type::Any(), pretenure), size);
}
Schedule* RawMachineAssembler::Export() {
// Compute the correct codegen order.
DCHECK(schedule_->rpo_order()->empty());
......
......@@ -12,6 +12,7 @@
#include "src/compiler/machine-operator.h"
#include "src/compiler/node.h"
#include "src/compiler/operator.h"
#include "src/compiler/simplified-operator.h"
#include "src/globals.h"
#include "src/heap/factory.h"
#include "src/isolate.h"
......@@ -55,6 +56,7 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
Zone* zone() const { return graph()->zone(); }
MachineOperatorBuilder* machine() { return &machine_; }
CommonOperatorBuilder* common() { return &common_; }
SimplifiedOperatorBuilder* simplified() { return &simplified_; }
CallDescriptor* call_descriptor() const { return call_descriptor_; }
PoisoningMitigationLevel poisoning_level() const { return poisoning_level_; }
......@@ -145,6 +147,8 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
}
Node* Retain(Node* value) { return AddNode(common()->Retain(), value); }
Node* OptimizedAllocate(Node* size, PretenureFlag pretenure);
// Unaligned memory operations
Node* UnalignedLoad(MachineType type, Node* base) {
return UnalignedLoad(type, base, IntPtrConstant(0));
......@@ -995,6 +999,7 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
Schedule* schedule_;
MachineOperatorBuilder machine_;
CommonOperatorBuilder common_;
SimplifiedOperatorBuilder simplified_;
CallDescriptor* call_descriptor_;
Node* target_parameter_;
NodeVector parameters_;
......
......@@ -771,6 +771,8 @@ DebugInfo::SideEffectState BuiltinGetSideEffectState(Builtins::Name id) {
case Builtins::kRegExpConstructor:
// Internal.
case Builtins::kStrictPoisonPillThrower:
case Builtins::kAllocateInNewSpace:
case Builtins::kAllocateInOldSpace:
return DebugInfo::kHasNoSideEffect;
// Set builtins.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment