Commit e045a066 authored by bmeurer's avatar bmeurer Committed by Commit bot

[turbofan] Run everything after representation selection concurrently.

Further refactor the pipeline to even run the first scheduler (part of
the effect control linearization) concurrently. This temporarily
disables most of the write barrier elimination, but we will get back to
that later.

Review-Url: https://codereview.chromium.org/1926023002
Cr-Commit-Position: refs/heads/master@{#35861}
parent e7e7124c
......@@ -2661,6 +2661,44 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET);
}
// static
void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r1 : requested object size (tagged)
// -- cp : context
// -----------------------------------
__ AssertSmi(r1);
Label runtime;
__ SmiUntag(r1);
__ Allocate(r1, r0, r2, r3, &runtime, NO_ALLOCATION_FLAGS);
__ Ret();
__ bind(&runtime);
__ SmiTag(r1);
__ Push(r1);
__ TailCallRuntime(Runtime::kAllocateInNewSpace);
}
// static
void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r1 : requested object size (tagged)
// -- cp : context
// -----------------------------------
__ AssertSmi(r1);
Label runtime;
__ SmiUntag(r1);
__ Allocate(r1, r0, r2, r3, &runtime, PRETENURE);
__ Ret();
__ bind(&runtime);
__ SmiTag(r1);
__ Push(r1);
__ Push(Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
__ TailCallRuntime(Runtime::kAllocateInTargetSpace);
}
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
......
......@@ -249,7 +249,7 @@ SIMD128_TYPES(SIMD128_ALLOC_DESC)
void AllocateDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r0};
Register registers[] = {r1};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
......
......@@ -2751,6 +2751,46 @@ void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
__ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
}
// static
void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
ASM_LOCATION("Builtins::Generate_AllocateInNewSpace");
// ----------- S t a t e -------------
// -- x1 : requested object size (tagged)
// -- cp : context
// -----------------------------------
__ AssertSmi(x1);
Label runtime;
__ SmiUntag(x1);
__ Allocate(x1, x0, x2, x3, &runtime, NO_ALLOCATION_FLAGS);
__ Ret();
__ Bind(&runtime);
__ SmiTag(x1);
__ Push(x1);
__ TailCallRuntime(Runtime::kAllocateInNewSpace);
}
// static
void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) {
ASM_LOCATION("Builtins::Generate_AllocateInOldSpace");
// ----------- S t a t e -------------
// -- x1 : requested object size (tagged)
// -- cp : context
// -----------------------------------
__ AssertSmi(x1);
Label runtime;
__ SmiUntag(x1);
__ Allocate(x1, x0, x2, x3, &runtime, PRETENURE);
__ Ret();
__ Bind(&runtime);
__ SmiTag(x1);
__ Push(x1);
__ Push(Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
__ TailCallRuntime(Runtime::kAllocateInTargetSpace);
}
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
ASM_LOCATION("Builtins::Generate_ArgumentsAdaptorTrampoline");
......
......@@ -274,7 +274,7 @@ SIMD128_TYPES(SIMD128_ALLOC_DESC)
void AllocateDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {x0};
Register registers[] = {x1};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
......
......@@ -172,6 +172,9 @@ inline bool operator&(BuiltinExtraArguments lhs, BuiltinExtraArguments rhs) {
// Define list of builtins implemented in assembly.
#define BUILTIN_LIST_A(V) \
V(AllocateInNewSpace, BUILTIN, UNINITIALIZED, kNoExtraICState) \
V(AllocateInOldSpace, BUILTIN, UNINITIALIZED, kNoExtraICState) \
\
V(ArgumentsAdaptorTrampoline, BUILTIN, UNINITIALIZED, kNoExtraICState) \
\
V(ConstructedNonConstructable, BUILTIN, UNINITIALIZED, kNoExtraICState) \
......@@ -439,6 +442,8 @@ class Builtins {
static void Generate_Adaptor(MacroAssembler* masm,
CFunctionId id,
BuiltinExtraArguments extra_args);
static void Generate_AllocateInNewSpace(MacroAssembler* masm);
static void Generate_AllocateInOldSpace(MacroAssembler* masm);
static void Generate_ConstructedNonConstructable(MacroAssembler* masm);
static void Generate_CompileLazy(MacroAssembler* masm);
static void Generate_CompileBaseline(MacroAssembler* masm);
......
......@@ -516,8 +516,10 @@ SIMD128_TYPES(SIMD128_ALLOC)
// static
Callable CodeFactory::Allocate(Isolate* isolate, PretenureFlag pretenure_flag) {
AllocateStub stub(isolate, pretenure_flag);
return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
return Callable(pretenure_flag == NOT_TENURED
? isolate->builtins()->AllocateInNewSpace()
: isolate->builtins()->AllocateInOldSpace(),
AllocateDescriptor(isolate));
}
// static
......
......@@ -1289,16 +1289,6 @@ Handle<Code> TransitionElementsKindStub::GenerateCode() {
return DoGenerateCode(this);
}
template <>
HValue* CodeStubGraphBuilder<AllocateStub>::BuildCodeStub() {
HValue* result =
Add<HAllocate>(GetParameter(0), HType::Tagged(),
casted_stub()->pretenure_flag(), JS_OBJECT_TYPE);
return result;
}
Handle<Code> AllocateStub::GenerateCode() { return DoGenerateCode(this); }
HValue* CodeStubGraphBuilderBase::BuildArrayConstructor(
ElementsKind kind,
AllocationSiteOverrideMode override_mode,
......
......@@ -4006,10 +4006,6 @@ void AllocateMutableHeapNumberStub::InitializeDescriptor(
SIMD128_TYPES(SIMD128_INIT_DESC)
#undef SIMD128_INIT_DESC
void AllocateStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
descriptor->Initialize();
}
void ToBooleanICStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
descriptor->Initialize(FUNCTION_ADDR(Runtime_ToBooleanIC_Miss));
descriptor->SetMissHandler(ExternalReference(
......
......@@ -57,7 +57,6 @@ namespace internal {
V(VectorStoreIC) \
V(VectorKeyedStoreIC) \
/* HydrogenCodeStubs */ \
V(Allocate) \
V(ArrayNArgumentsConstructor) \
V(ArrayNoArgumentConstructor) \
V(ArraySingleArgumentConstructor) \
......@@ -2750,23 +2749,6 @@ class AllocateMutableHeapNumberStub : public TurboFanCodeStub {
SIMD128_TYPES(SIMD128_ALLOC_STUB)
#undef SIMD128_ALLOC_STUB
class AllocateStub final : public HydrogenCodeStub {
public:
AllocateStub(Isolate* isolate, PretenureFlag pretenure_flag)
: HydrogenCodeStub(isolate) {
set_sub_minor_key(PretenureFlagBits::encode(pretenure_flag));
}
PretenureFlag pretenure_flag() const {
return PretenureFlagBits::decode(sub_minor_key());
}
private:
typedef BitField<PretenureFlag, 0, 1> PretenureFlagBits;
DEFINE_CALL_INTERFACE_DESCRIPTOR(Allocate);
DEFINE_HYDROGEN_CODE_STUB(Allocate, HydrogenCodeStub);
};
class ArrayConstructorStubBase : public HydrogenCodeStub {
public:
ArrayConstructorStubBase(Isolate* isolate,
......
This diff is collapsed.
......@@ -27,38 +27,24 @@ class ChangeLowering final : public Reducer {
Reduction Reduce(Node* node) final;
private:
Node* HeapNumberValueIndexConstant();
Node* SmiShiftBitsConstant();
Node* ChangeInt32ToFloat64(Node* value);
Node* ChangeInt32ToSmi(Node* value);
Node* ChangeSmiToFloat64(Node* value);
Node* ChangeSmiToWord32(Node* value);
Node* ChangeUint32ToFloat64(Node* value);
Node* ChangeUint32ToSmi(Node* value);
Node* LoadHeapNumberValue(Node* value, Node* control);
Node* TestNotSmi(Node* value);
Reduction ChangeBitToBool(Node* value, Node* control);
Reduction ChangeBoolToBit(Node* value);
Reduction ChangeFloat64ToTagged(Node* value, Node* control);
Reduction ChangeInt31ToTagged(Node* value, Node* control);
Reduction ChangeInt32ToTagged(Node* value, Node* control);
Reduction ChangeTaggedSignedToInt32(Node* value);
Reduction ChangeUint32ToTagged(Node* value, Node* control);
Reduction ReduceChangeBitToBool(Node* value, Node* control);
Reduction ReduceChangeBoolToBit(Node* value);
Reduction ReduceChangeInt31ToTagged(Node* value, Node* control);
Reduction ReduceChangeTaggedSignedToInt32(Node* value);
Reduction LoadField(Node* node);
Reduction StoreField(Node* node);
Reduction LoadElement(Node* node);
Reduction StoreElement(Node* node);
Reduction Allocate(Node* node);
Reduction ReduceLoadField(Node* node);
Reduction ReduceStoreField(Node* node);
Reduction ReduceLoadElement(Node* node);
Reduction ReduceStoreElement(Node* node);
Reduction ReduceAllocate(Node* node);
Node* IsSmi(Node* value);
Node* LoadHeapObjectMap(Node* object, Node* control);
Node* LoadMapBitField(Node* map);
Node* LoadMapInstanceType(Node* map);
Reduction ObjectIsSmi(Node* node);
Reduction ReduceObjectIsSmi(Node* node);
Node* ComputeIndex(const ElementAccess& access, Node* const key);
Graph* graph() const;
......
......@@ -17,7 +17,7 @@ Node** CommonNodeCache::FindExternalConstant(ExternalReference value) {
Node** CommonNodeCache::FindHeapConstant(Handle<HeapObject> value) {
return heap_constants_.Find(zone(), bit_cast<intptr_t>(value.location()));
return heap_constants_.Find(zone(), bit_cast<intptr_t>(value.address()));
}
......
......@@ -513,6 +513,16 @@ Constant::Constant(RelocatablePtrConstantInfo info)
}
#endif
Handle<HeapObject> Constant::ToHeapObject() const {
DCHECK_EQ(kHeapObject, type());
Handle<HeapObject> value(
bit_cast<HeapObject**>(static_cast<intptr_t>(value_)));
if (value->IsConsString()) {
value = String::Flatten(Handle<String>::cast(value), TENURED);
}
return value;
}
std::ostream& operator<<(std::ostream& os, const Constant& constant) {
switch (constant.type()) {
case Constant::kInt32:
......
......@@ -1000,10 +1000,7 @@ class Constant final {
return RpoNumber::FromInt(static_cast<int>(value_));
}
Handle<HeapObject> ToHeapObject() const {
DCHECK_EQ(kHeapObject, type());
return bit_cast<Handle<HeapObject> >(static_cast<intptr_t>(value_));
}
Handle<HeapObject> ToHeapObject() const;
private:
Type type_;
......
......@@ -81,9 +81,6 @@ Node* JSGraph::NaNConstant() {
Node* JSGraph::HeapConstant(Handle<HeapObject> value) {
if (value->IsConsString()) {
value = String::Flatten(Handle<String>::cast(value), TENURED);
}
Node** loc = cache_.FindHeapConstant(value);
if (*loc == nullptr) {
*loc = graph()->NewNode(common()->HeapConstant(value));
......
......@@ -1302,13 +1302,11 @@ bool Pipeline::CreateGraph() {
}
// Type the graph.
base::SmartPointer<Typer> typer;
typer.Reset(new Typer(isolate(), data->graph(),
info()->is_deoptimization_enabled()
? Typer::kDeoptimizationEnabled
: Typer::kNoFlags,
info()->dependencies()));
Run<TyperPhase>(typer.get());
Typer typer(isolate(), data->graph(), info()->is_deoptimization_enabled()
? Typer::kDeoptimizationEnabled
: Typer::kNoFlags,
info()->dependencies());
Run<TyperPhase>(&typer);
RunPrintAndVerify("Typed");
BeginPhaseKind("lowering");
......@@ -1335,16 +1333,26 @@ bool Pipeline::CreateGraph() {
Run<EarlyOptimizationPhase>();
RunPrintAndVerify("Early optimized");
EndPhaseKind();
return true;
}
bool Pipeline::OptimizeGraph(Linkage* linkage) {
PipelineData* data = this->data_;
BeginPhaseKind("block building");
Run<EffectControlLinearizationPhase>();
RunPrintAndVerify("Effect and control linearized");
RunPrintAndVerify("Effect and control linearized", true);
Run<BranchEliminationPhase>();
RunPrintAndVerify("Branch conditions eliminated");
RunPrintAndVerify("Branch conditions eliminated", true);
// Optimize control flow.
if (FLAG_turbo_cf_optimization) {
Run<ControlFlowOptimizationPhase>();
RunPrintAndVerify("Control flow optimized");
RunPrintAndVerify("Control flow optimized", true);
}
// Lower changes that have been inserted before.
......@@ -1352,19 +1360,6 @@ bool Pipeline::CreateGraph() {
// TODO(jarin, rossberg): Remove UNTYPED once machine typing works.
RunPrintAndVerify("Late optimized", true);
// Kill the Typer and thereby uninstall the decorator (if any).
typer.Reset(nullptr);
EndPhaseKind();
return true;
}
bool Pipeline::OptimizeGraph(Linkage* linkage) {
PipelineData* data = this->data_;
BeginPhaseKind("block building");
Run<LateGraphTrimmingPhase>();
// TODO(jarin, rossberg): Remove UNTYPED once machine typing works.
RunPrintAndVerify("Late trimmed", true);
......
......@@ -43,6 +43,10 @@ class HandleBase {
V8_INLINE bool is_null() const { return location_ == nullptr; }
// Returns the raw address where this handle is stored. This should only be
// used for hashing handles; do not ever try to dereference it.
V8_INLINE Address address() const { return bit_cast<Address>(location_); }
protected:
// Provides the C++ dereference operator.
V8_INLINE Object* operator*() const {
......@@ -132,14 +136,14 @@ class Handle final : public HandleBase {
// Provide function object for location equality comparison.
struct equal_to : public std::binary_function<Handle<T>, Handle<T>, bool> {
V8_INLINE bool operator()(Handle<T> lhs, Handle<T> rhs) const {
return lhs.location() == rhs.location();
return lhs.address() == rhs.address();
}
};
// Provide function object for location hashing.
struct hash : public std::unary_function<Handle<T>, size_t> {
V8_INLINE size_t operator()(Handle<T> const& handle) const {
return base::hash<void*>()(handle.location());
return base::hash<void*>()(handle.address());
}
};
......
......@@ -2618,6 +2618,48 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET);
}
// static
void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- edx : requested object size (tagged)
// -- esi : context
// -----------------------------------
__ AssertSmi(edx);
Label runtime;
__ SmiUntag(edx);
__ Allocate(edx, eax, ecx, edi, &runtime, NO_ALLOCATION_FLAGS);
__ Ret();
__ bind(&runtime);
__ SmiTag(edx);
__ PopReturnAddressTo(ecx);
__ Push(edx);
__ PushReturnAddressFrom(ecx);
__ TailCallRuntime(Runtime::kAllocateInNewSpace);
}
// static
void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- edx : requested object size (tagged)
// -- esi : context
// -----------------------------------
__ AssertSmi(edx);
Label runtime;
__ SmiUntag(edx);
__ Allocate(edx, eax, ecx, edi, &runtime, PRETENURE);
__ Ret();
__ bind(&runtime);
__ SmiTag(edx);
__ PopReturnAddressTo(ecx);
__ Push(edx);
__ Push(Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
__ PushReturnAddressFrom(ecx);
__ TailCallRuntime(Runtime::kAllocateInTargetSpace);
}
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
......
......@@ -253,7 +253,7 @@ SIMD128_TYPES(SIMD128_ALLOC_DESC)
void AllocateDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {eax};
Register registers[] = {edx};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
......
......@@ -2736,6 +2736,44 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET);
}
// static
void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : requested object size (tagged)
// -- cp : context
// -----------------------------------
__ AssertSmi(a0);
Label runtime;
__ SmiUntag(a0);
__ Allocate(a0, v0, a1, a2, &runtime, NO_ALLOCATION_FLAGS);
__ Ret();
__ bind(&runtime);
__ SmiTag(a0);
__ Push(a0);
__ TailCallRuntime(Runtime::kAllocateInNewSpace);
}
// static
void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : requested object size (tagged)
// -- cp : context
// -----------------------------------
__ AssertSmi(a0);
Label runtime;
__ SmiUntag(a0);
__ Allocate(a0, v0, a1, a2, &runtime, PRETENURE);
__ Ret();
__ bind(&runtime);
__ SmiTag(a0);
__ Push(a0);
__ Push(Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
__ TailCallRuntime(Runtime::kAllocateInTargetSpace);
}
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// State setup as expected by MacroAssembler::InvokePrologue.
......
......@@ -2724,6 +2724,44 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET);
}
// static
void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : requested object size (tagged)
// -- cp : context
// -----------------------------------
__ AssertSmi(a0);
Label runtime;
__ SmiUntag(a0);
__ Allocate(a0, v0, a1, a2, &runtime, NO_ALLOCATION_FLAGS);
__ Ret();
__ bind(&runtime);
__ SmiTag(a0);
__ Push(a0);
__ TailCallRuntime(Runtime::kAllocateInNewSpace);
}
// static
void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : requested object size (tagged)
// -- cp : context
// -----------------------------------
__ AssertSmi(a0);
Label runtime;
__ SmiUntag(a0);
__ Allocate(a0, v0, a1, a2, &runtime, PRETENURE);
__ Ret();
__ bind(&runtime);
__ SmiTag(a0);
__ Push(a0);
__ Push(Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
__ TailCallRuntime(Runtime::kAllocateInTargetSpace);
}
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// State setup as expected by MacroAssembler::InvokePrologue.
......
......@@ -2042,6 +2042,48 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
__ PushReturnAddressFrom(rcx);
}
// static
void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rdx : requested object size (tagged)
// -- rsi : context
// -----------------------------------
__ AssertSmi(rdx);
Label runtime;
__ SmiToInteger64(rdx, rdx);
__ Allocate(rdx, rax, rcx, rdi, &runtime, NO_ALLOCATION_FLAGS);
__ Ret();
__ bind(&runtime);
__ Integer32ToSmi(rdx, rdx);
__ PopReturnAddressTo(rcx);
__ Push(rdx);
__ PushReturnAddressFrom(rcx);
__ TailCallRuntime(Runtime::kAllocateInNewSpace);
}
// static
void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rdx : requested object size (tagged)
// -- rsi : context
// -----------------------------------
__ AssertSmi(rdx);
Label runtime;
__ SmiToInteger64(rdx, rdx);
__ Allocate(rdx, rax, rcx, rdi, &runtime, PRETENURE);
__ Ret();
__ bind(&runtime);
__ Integer32ToSmi(rdx, rdx);
__ PopReturnAddressTo(rcx);
__ Push(rdx);
__ Push(Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
__ PushReturnAddressFrom(rcx);
__ TailCallRuntime(Runtime::kAllocateInTargetSpace);
}
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e -------------
......
......@@ -244,7 +244,7 @@ SIMD128_TYPES(SIMD128_ALLOC_DESC)
void AllocateDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {rax};
Register registers[] = {rdx};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
......
......@@ -1370,7 +1370,7 @@ TEST(LowerStoreField_to_store) {
CHECK_EQ(IrOpcode::kStore, store->opcode());
CHECK_EQ(t.p1, store->InputAt(2));
StoreRepresentation rep = StoreRepresentationOf(store->op());
CHECK_EQ(kNoWriteBarrier, rep.write_barrier_kind());
CHECK_EQ(kFullWriteBarrier, rep.write_barrier_kind());
}
}
......@@ -1435,7 +1435,7 @@ TEST(LowerStoreElement_to_store) {
CHECK_EQ(IrOpcode::kStore, store->opcode());
CHECK_EQ(t.p2, store->InputAt(2));
StoreRepresentation rep = StoreRepresentationOf(store->op());
CHECK_EQ(kNoWriteBarrier, rep.write_barrier_kind());
CHECK_EQ(kFullWriteBarrier, rep.write_barrier_kind());
}
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment