Commit d1b3d426 authored by bmeurer's avatar bmeurer Committed by Commit bot

[turbofan] Run everything after representation selection concurrently.

Further refactor the pipeline to even run the first scheduler (part of
the effect control linearization) concurrently. This temporarily
disables most of the write barrier elimination, but we will get back to
that later.

Drive-by-fix: Remove the dead code from ChangeLowering, and stack
allocate the Typer in the pipeline. Also migrate the AllocateStub to a
native code builtin, so that we have the code object + a handle to it
available all the time.

CQ_INCLUDE_TRYBOTS=tryserver.v8:v8_linux64_tsan_rel
R=mstarzinger@chromium.org
BUG=v8:4969
LOG=n

Review-Url: https://codereview.chromium.org/1926023002
Cr-Commit-Position: refs/heads/master@{#35918}
parent 987bd9cc
...@@ -2661,6 +2661,40 @@ void Builtins::Generate_Construct(MacroAssembler* masm) { ...@@ -2661,6 +2661,40 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET); RelocInfo::CODE_TARGET);
} }
// static
void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r1 : requested object size (untagged)
// -- lr : return address
// -----------------------------------
Label runtime;
__ Allocate(r1, r0, r2, r3, &runtime, NO_ALLOCATION_FLAGS);
__ Ret();
__ bind(&runtime);
__ SmiTag(r1);
__ Push(r1);
__ Move(cp, Smi::FromInt(0));
__ TailCallRuntime(Runtime::kAllocateInNewSpace);
}
// static
void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- r1 : requested object size (untagged)
// -- lr : return address
// -----------------------------------
Label runtime;
__ Allocate(r1, r0, r2, r3, &runtime, PRETENURE);
__ Ret();
__ bind(&runtime);
__ SmiTag(r1);
__ Move(r2, Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
__ Push(r1, r2);
__ Move(cp, Smi::FromInt(0));
__ TailCallRuntime(Runtime::kAllocateInTargetSpace);
}
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
......
...@@ -247,13 +247,6 @@ void AllocateHeapNumberDescriptor::InitializePlatformSpecific( ...@@ -247,13 +247,6 @@ void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
SIMD128_TYPES(SIMD128_ALLOC_DESC) SIMD128_TYPES(SIMD128_ALLOC_DESC)
#undef SIMD128_ALLOC_DESC #undef SIMD128_ALLOC_DESC
void AllocateDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {r0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void ArrayConstructorConstantArgCountDescriptor::InitializePlatformSpecific( void ArrayConstructorConstantArgCountDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) { CallInterfaceDescriptorData* data) {
// register state // register state
......
...@@ -19,6 +19,7 @@ const Register kReturnRegister1 = {Register::kCode_r1}; ...@@ -19,6 +19,7 @@ const Register kReturnRegister1 = {Register::kCode_r1};
const Register kReturnRegister2 = {Register::kCode_r2}; const Register kReturnRegister2 = {Register::kCode_r2};
const Register kJSFunctionRegister = {Register::kCode_r1}; const Register kJSFunctionRegister = {Register::kCode_r1};
const Register kContextRegister = {Register::kCode_r7}; const Register kContextRegister = {Register::kCode_r7};
const Register kAllocateSizeRegister = {Register::kCode_r1};
const Register kInterpreterAccumulatorRegister = {Register::kCode_r0}; const Register kInterpreterAccumulatorRegister = {Register::kCode_r0};
const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_r5}; const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_r5};
const Register kInterpreterBytecodeArrayRegister = {Register::kCode_r6}; const Register kInterpreterBytecodeArrayRegister = {Register::kCode_r6};
......
...@@ -2751,6 +2751,42 @@ void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) { ...@@ -2751,6 +2751,42 @@ void Builtins::Generate_InterpreterPushArgsAndConstruct(MacroAssembler* masm) {
__ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET); __ Jump(masm->isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET);
} }
// static
void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
ASM_LOCATION("Builtins::Generate_AllocateInNewSpace");
// ----------- S t a t e -------------
// -- x1 : requested object size (untagged)
// -- lr : return address
// -----------------------------------
Label runtime;
__ Allocate(x1, x0, x2, x3, &runtime, NO_ALLOCATION_FLAGS);
__ Ret();
__ Bind(&runtime);
__ SmiTag(x1);
__ Push(x1);
__ Move(cp, Smi::FromInt(0));
__ TailCallRuntime(Runtime::kAllocateInNewSpace);
}
// static
void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) {
ASM_LOCATION("Builtins::Generate_AllocateInOldSpace");
// ----------- S t a t e -------------
// -- x1 : requested object size (untagged)
// -- lr : return address
// -----------------------------------
Label runtime;
__ Allocate(x1, x0, x2, x3, &runtime, PRETENURE);
__ Ret();
__ Bind(&runtime);
__ SmiTag(x1);
__ Move(x2, Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
__ Push(x1, x2);
__ Move(cp, Smi::FromInt(0));
__ TailCallRuntime(Runtime::kAllocateInTargetSpace);
}
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
ASM_LOCATION("Builtins::Generate_ArgumentsAdaptorTrampoline"); ASM_LOCATION("Builtins::Generate_ArgumentsAdaptorTrampoline");
......
...@@ -272,13 +272,6 @@ void AllocateHeapNumberDescriptor::InitializePlatformSpecific( ...@@ -272,13 +272,6 @@ void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
SIMD128_TYPES(SIMD128_ALLOC_DESC) SIMD128_TYPES(SIMD128_ALLOC_DESC)
#undef SIMD128_ALLOC_DESC #undef SIMD128_ALLOC_DESC
void AllocateDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {x0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void ArrayConstructorConstantArgCountDescriptor::InitializePlatformSpecific( void ArrayConstructorConstantArgCountDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) { CallInterfaceDescriptorData* data) {
// x1: function // x1: function
......
...@@ -39,6 +39,7 @@ namespace internal { ...@@ -39,6 +39,7 @@ namespace internal {
#define kReturnRegister2 x2 #define kReturnRegister2 x2
#define kJSFunctionRegister x1 #define kJSFunctionRegister x1
#define kContextRegister cp #define kContextRegister cp
#define kAllocateSizeRegister x1
#define kInterpreterAccumulatorRegister x0 #define kInterpreterAccumulatorRegister x0
#define kInterpreterBytecodeOffsetRegister x19 #define kInterpreterBytecodeOffsetRegister x19
#define kInterpreterBytecodeArrayRegister x20 #define kInterpreterBytecodeArrayRegister x20
......
...@@ -175,6 +175,9 @@ inline bool operator&(BuiltinExtraArguments lhs, BuiltinExtraArguments rhs) { ...@@ -175,6 +175,9 @@ inline bool operator&(BuiltinExtraArguments lhs, BuiltinExtraArguments rhs) {
// Define list of builtins implemented in assembly. // Define list of builtins implemented in assembly.
#define BUILTIN_LIST_A(V) \ #define BUILTIN_LIST_A(V) \
V(AllocateInNewSpace, BUILTIN, UNINITIALIZED, kNoExtraICState) \
V(AllocateInOldSpace, BUILTIN, UNINITIALIZED, kNoExtraICState) \
\
V(ArgumentsAdaptorTrampoline, BUILTIN, UNINITIALIZED, kNoExtraICState) \ V(ArgumentsAdaptorTrampoline, BUILTIN, UNINITIALIZED, kNoExtraICState) \
\ \
V(ConstructedNonConstructable, BUILTIN, UNINITIALIZED, kNoExtraICState) \ V(ConstructedNonConstructable, BUILTIN, UNINITIALIZED, kNoExtraICState) \
...@@ -443,6 +446,8 @@ class Builtins { ...@@ -443,6 +446,8 @@ class Builtins {
static void Generate_Adaptor(MacroAssembler* masm, static void Generate_Adaptor(MacroAssembler* masm,
CFunctionId id, CFunctionId id,
BuiltinExtraArguments extra_args); BuiltinExtraArguments extra_args);
static void Generate_AllocateInNewSpace(MacroAssembler* masm);
static void Generate_AllocateInOldSpace(MacroAssembler* masm);
static void Generate_ConstructedNonConstructable(MacroAssembler* masm); static void Generate_ConstructedNonConstructable(MacroAssembler* masm);
static void Generate_CompileLazy(MacroAssembler* masm); static void Generate_CompileLazy(MacroAssembler* masm);
static void Generate_CompileBaseline(MacroAssembler* masm); static void Generate_CompileBaseline(MacroAssembler* masm);
......
...@@ -514,12 +514,6 @@ Callable CodeFactory::AllocateMutableHeapNumber(Isolate* isolate) { ...@@ -514,12 +514,6 @@ Callable CodeFactory::AllocateMutableHeapNumber(Isolate* isolate) {
SIMD128_TYPES(SIMD128_ALLOC) SIMD128_TYPES(SIMD128_ALLOC)
#undef SIMD128_ALLOC #undef SIMD128_ALLOC
// static
Callable CodeFactory::Allocate(Isolate* isolate, PretenureFlag pretenure_flag) {
AllocateStub stub(isolate, pretenure_flag);
return Callable(stub.GetCode(), stub.GetCallInterfaceDescriptor());
}
// static // static
Callable CodeFactory::ArgumentAdaptor(Isolate* isolate) { Callable CodeFactory::ArgumentAdaptor(Isolate* isolate) {
return Callable(isolate->builtins()->ArgumentsAdaptorTrampoline(), return Callable(isolate->builtins()->ArgumentsAdaptorTrampoline(),
......
...@@ -135,7 +135,6 @@ class CodeFactory final { ...@@ -135,7 +135,6 @@ class CodeFactory final {
static Callable Allocate##Type(Isolate* isolate); static Callable Allocate##Type(Isolate* isolate);
SIMD128_TYPES(SIMD128_ALLOC) SIMD128_TYPES(SIMD128_ALLOC)
#undef SIMD128_ALLOC #undef SIMD128_ALLOC
static Callable Allocate(Isolate* isolate, PretenureFlag pretenure_flag);
static Callable ArgumentAdaptor(Isolate* isolate); static Callable ArgumentAdaptor(Isolate* isolate);
static Callable Call(Isolate* isolate, static Callable Call(Isolate* isolate,
......
...@@ -1289,16 +1289,6 @@ Handle<Code> TransitionElementsKindStub::GenerateCode() { ...@@ -1289,16 +1289,6 @@ Handle<Code> TransitionElementsKindStub::GenerateCode() {
return DoGenerateCode(this); return DoGenerateCode(this);
} }
template <>
HValue* CodeStubGraphBuilder<AllocateStub>::BuildCodeStub() {
HValue* result =
Add<HAllocate>(GetParameter(0), HType::Tagged(),
casted_stub()->pretenure_flag(), JS_OBJECT_TYPE);
return result;
}
Handle<Code> AllocateStub::GenerateCode() { return DoGenerateCode(this); }
HValue* CodeStubGraphBuilderBase::BuildArrayConstructor( HValue* CodeStubGraphBuilderBase::BuildArrayConstructor(
ElementsKind kind, ElementsKind kind,
AllocationSiteOverrideMode override_mode, AllocationSiteOverrideMode override_mode,
......
...@@ -4056,10 +4056,6 @@ void AllocateMutableHeapNumberStub::InitializeDescriptor( ...@@ -4056,10 +4056,6 @@ void AllocateMutableHeapNumberStub::InitializeDescriptor(
SIMD128_TYPES(SIMD128_INIT_DESC) SIMD128_TYPES(SIMD128_INIT_DESC)
#undef SIMD128_INIT_DESC #undef SIMD128_INIT_DESC
void AllocateStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
descriptor->Initialize();
}
void ToBooleanICStub::InitializeDescriptor(CodeStubDescriptor* descriptor) { void ToBooleanICStub::InitializeDescriptor(CodeStubDescriptor* descriptor) {
descriptor->Initialize(FUNCTION_ADDR(Runtime_ToBooleanIC_Miss)); descriptor->Initialize(FUNCTION_ADDR(Runtime_ToBooleanIC_Miss));
descriptor->SetMissHandler(ExternalReference( descriptor->SetMissHandler(ExternalReference(
......
...@@ -57,7 +57,6 @@ namespace internal { ...@@ -57,7 +57,6 @@ namespace internal {
V(VectorStoreIC) \ V(VectorStoreIC) \
V(VectorKeyedStoreIC) \ V(VectorKeyedStoreIC) \
/* HydrogenCodeStubs */ \ /* HydrogenCodeStubs */ \
V(Allocate) \
V(ArrayNArgumentsConstructor) \ V(ArrayNArgumentsConstructor) \
V(ArrayNoArgumentConstructor) \ V(ArrayNoArgumentConstructor) \
V(ArraySingleArgumentConstructor) \ V(ArraySingleArgumentConstructor) \
...@@ -2758,23 +2757,6 @@ class AllocateMutableHeapNumberStub : public TurboFanCodeStub { ...@@ -2758,23 +2757,6 @@ class AllocateMutableHeapNumberStub : public TurboFanCodeStub {
SIMD128_TYPES(SIMD128_ALLOC_STUB) SIMD128_TYPES(SIMD128_ALLOC_STUB)
#undef SIMD128_ALLOC_STUB #undef SIMD128_ALLOC_STUB
class AllocateStub final : public HydrogenCodeStub {
public:
AllocateStub(Isolate* isolate, PretenureFlag pretenure_flag)
: HydrogenCodeStub(isolate) {
set_sub_minor_key(PretenureFlagBits::encode(pretenure_flag));
}
PretenureFlag pretenure_flag() const {
return PretenureFlagBits::decode(sub_minor_key());
}
private:
typedef BitField<PretenureFlag, 0, 1> PretenureFlagBits;
DEFINE_CALL_INTERFACE_DESCRIPTOR(Allocate);
DEFINE_HYDROGEN_CODE_STUB(Allocate, HydrogenCodeStub);
};
class ArrayConstructorStubBase : public HydrogenCodeStub { class ArrayConstructorStubBase : public HydrogenCodeStub {
public: public:
ArrayConstructorStubBase(Isolate* isolate, ArrayConstructorStubBase(Isolate* isolate,
......
This diff is collapsed.
...@@ -27,38 +27,24 @@ class ChangeLowering final : public Reducer { ...@@ -27,38 +27,24 @@ class ChangeLowering final : public Reducer {
Reduction Reduce(Node* node) final; Reduction Reduce(Node* node) final;
private: private:
Node* HeapNumberValueIndexConstant();
Node* SmiShiftBitsConstant(); Node* SmiShiftBitsConstant();
Node* ChangeInt32ToFloat64(Node* value);
Node* ChangeInt32ToSmi(Node* value); Node* ChangeInt32ToSmi(Node* value);
Node* ChangeSmiToFloat64(Node* value);
Node* ChangeSmiToWord32(Node* value); Node* ChangeSmiToWord32(Node* value);
Node* ChangeUint32ToFloat64(Node* value); Node* ChangeUint32ToFloat64(Node* value);
Node* ChangeUint32ToSmi(Node* value);
Node* LoadHeapNumberValue(Node* value, Node* control);
Node* TestNotSmi(Node* value);
Reduction ChangeBitToBool(Node* value, Node* control); Reduction ReduceChangeBitToBool(Node* value, Node* control);
Reduction ChangeBoolToBit(Node* value); Reduction ReduceChangeBoolToBit(Node* value);
Reduction ChangeFloat64ToTagged(Node* value, Node* control); Reduction ReduceChangeInt31ToTagged(Node* value, Node* control);
Reduction ChangeInt31ToTagged(Node* value, Node* control); Reduction ReduceChangeTaggedSignedToInt32(Node* value);
Reduction ChangeInt32ToTagged(Node* value, Node* control);
Reduction ChangeTaggedSignedToInt32(Node* value);
Reduction ChangeUint32ToTagged(Node* value, Node* control);
Reduction LoadField(Node* node); Reduction ReduceLoadField(Node* node);
Reduction StoreField(Node* node); Reduction ReduceStoreField(Node* node);
Reduction LoadElement(Node* node); Reduction ReduceLoadElement(Node* node);
Reduction StoreElement(Node* node); Reduction ReduceStoreElement(Node* node);
Reduction Allocate(Node* node); Reduction ReduceAllocate(Node* node);
Node* IsSmi(Node* value); Reduction ReduceObjectIsSmi(Node* node);
Node* LoadHeapObjectMap(Node* object, Node* control);
Node* LoadMapBitField(Node* map);
Node* LoadMapInstanceType(Node* map);
Reduction ObjectIsSmi(Node* node);
Node* ComputeIndex(const ElementAccess& access, Node* const key); Node* ComputeIndex(const ElementAccess& access, Node* const key);
Graph* graph() const; Graph* graph() const;
...@@ -68,7 +54,7 @@ class ChangeLowering final : public Reducer { ...@@ -68,7 +54,7 @@ class ChangeLowering final : public Reducer {
MachineOperatorBuilder* machine() const; MachineOperatorBuilder* machine() const;
JSGraph* const jsgraph_; JSGraph* const jsgraph_;
SetOncePointer<const Operator> allocate_heap_number_operator_; SetOncePointer<const Operator> allocate_operator_;
}; };
} // namespace compiler } // namespace compiler
......
...@@ -17,7 +17,7 @@ Node** CommonNodeCache::FindExternalConstant(ExternalReference value) { ...@@ -17,7 +17,7 @@ Node** CommonNodeCache::FindExternalConstant(ExternalReference value) {
Node** CommonNodeCache::FindHeapConstant(Handle<HeapObject> value) { Node** CommonNodeCache::FindHeapConstant(Handle<HeapObject> value) {
return heap_constants_.Find(zone(), bit_cast<intptr_t>(value.location())); return heap_constants_.Find(zone(), bit_cast<intptr_t>(value.address()));
} }
......
...@@ -839,9 +839,9 @@ EffectControlLinearizer::ValueEffectControl ...@@ -839,9 +839,9 @@ EffectControlLinearizer::ValueEffectControl
EffectControlLinearizer::AllocateHeapNumberWithValue(Node* value, Node* effect, EffectControlLinearizer::AllocateHeapNumberWithValue(Node* value, Node* effect,
Node* control) { Node* control) {
effect = graph()->NewNode(common()->BeginRegion(), effect); effect = graph()->NewNode(common()->BeginRegion(), effect);
Node* result = effect = Node* result = effect = graph()->NewNode(
graph()->NewNode(simplified()->Allocate(NOT_TENURED), simplified()->Allocate(NOT_TENURED),
jsgraph()->Constant(HeapNumber::kSize), effect, control); jsgraph()->Int32Constant(HeapNumber::kSize), effect, control);
effect = graph()->NewNode(simplified()->StoreField(AccessBuilder::ForMap()), effect = graph()->NewNode(simplified()->StoreField(AccessBuilder::ForMap()),
result, jsgraph()->HeapNumberMapConstant(), effect, result, jsgraph()->HeapNumberMapConstant(), effect,
control); control);
......
...@@ -513,6 +513,16 @@ Constant::Constant(RelocatablePtrConstantInfo info) ...@@ -513,6 +513,16 @@ Constant::Constant(RelocatablePtrConstantInfo info)
} }
#endif #endif
Handle<HeapObject> Constant::ToHeapObject() const {
DCHECK_EQ(kHeapObject, type());
Handle<HeapObject> value(
bit_cast<HeapObject**>(static_cast<intptr_t>(value_)));
if (value->IsConsString()) {
value = String::Flatten(Handle<String>::cast(value), TENURED);
}
return value;
}
std::ostream& operator<<(std::ostream& os, const Constant& constant) { std::ostream& operator<<(std::ostream& os, const Constant& constant) {
switch (constant.type()) { switch (constant.type()) {
case Constant::kInt32: case Constant::kInt32:
......
...@@ -1000,10 +1000,7 @@ class Constant final { ...@@ -1000,10 +1000,7 @@ class Constant final {
return RpoNumber::FromInt(static_cast<int>(value_)); return RpoNumber::FromInt(static_cast<int>(value_));
} }
Handle<HeapObject> ToHeapObject() const { Handle<HeapObject> ToHeapObject() const;
DCHECK_EQ(kHeapObject, type());
return bit_cast<Handle<HeapObject> >(static_cast<intptr_t>(value_));
}
private: private:
Type type_; Type type_;
......
...@@ -14,6 +14,15 @@ namespace compiler { ...@@ -14,6 +14,15 @@ namespace compiler {
#define CACHED(name, expr) \ #define CACHED(name, expr) \
cached_nodes_[name] ? cached_nodes_[name] : (cached_nodes_[name] = (expr)) cached_nodes_[name] ? cached_nodes_[name] : (cached_nodes_[name] = (expr))
Node* JSGraph::AllocateInNewSpaceStubConstant() {
return CACHED(kAllocateInNewSpaceStubConstant,
HeapConstant(isolate()->builtins()->AllocateInNewSpace()));
}
Node* JSGraph::AllocateInOldSpaceStubConstant() {
return CACHED(kAllocateInOldSpaceStubConstant,
HeapConstant(isolate()->builtins()->AllocateInOldSpace()));
}
Node* JSGraph::CEntryStubConstant(int result_size) { Node* JSGraph::CEntryStubConstant(int result_size) {
if (result_size == 1) { if (result_size == 1) {
...@@ -81,9 +90,6 @@ Node* JSGraph::NaNConstant() { ...@@ -81,9 +90,6 @@ Node* JSGraph::NaNConstant() {
Node* JSGraph::HeapConstant(Handle<HeapObject> value) { Node* JSGraph::HeapConstant(Handle<HeapObject> value) {
if (value->IsConsString()) {
value = String::Flatten(Handle<String>::cast(value), TENURED);
}
Node** loc = cache_.FindHeapConstant(value); Node** loc = cache_.FindHeapConstant(value);
if (*loc == nullptr) { if (*loc == nullptr) {
*loc = graph()->NewNode(common()->HeapConstant(value)); *loc = graph()->NewNode(common()->HeapConstant(value));
......
...@@ -39,6 +39,8 @@ class JSGraph : public ZoneObject { ...@@ -39,6 +39,8 @@ class JSGraph : public ZoneObject {
} }
// Canonicalized global constants. // Canonicalized global constants.
Node* AllocateInNewSpaceStubConstant();
Node* AllocateInOldSpaceStubConstant();
Node* CEntryStubConstant(int result_size); Node* CEntryStubConstant(int result_size);
Node* EmptyFixedArrayConstant(); Node* EmptyFixedArrayConstant();
Node* HeapNumberMapConstant(); Node* HeapNumberMapConstant();
...@@ -140,6 +142,8 @@ class JSGraph : public ZoneObject { ...@@ -140,6 +142,8 @@ class JSGraph : public ZoneObject {
private: private:
enum CachedNode { enum CachedNode {
kAllocateInNewSpaceStubConstant,
kAllocateInOldSpaceStubConstant,
kCEntryStubConstant, kCEntryStubConstant,
kEmptyFixedArrayConstant, kEmptyFixedArrayConstant,
kHeapNumberMapConstant, kHeapNumberMapConstant,
......
...@@ -404,6 +404,35 @@ CallDescriptor* Linkage::GetStubCallDescriptor( ...@@ -404,6 +404,35 @@ CallDescriptor* Linkage::GetStubCallDescriptor(
descriptor.DebugName(isolate)); descriptor.DebugName(isolate));
} }
// static
CallDescriptor* Linkage::GetAllocateCallDescriptor(Zone* zone) {
LocationSignature::Builder locations(zone, 1, 1);
MachineSignature::Builder types(zone, 1, 1);
locations.AddParam(regloc(kAllocateSizeRegister));
types.AddParam(MachineType::Int32());
locations.AddReturn(regloc(kReturnRegister0));
types.AddReturn(MachineType::AnyTagged());
// The target for allocate calls is a code object.
MachineType target_type = MachineType::AnyTagged();
LinkageLocation target_loc = LinkageLocation::ForAnyRegister();
return new (zone) CallDescriptor( // --
CallDescriptor::kCallCodeObject, // kind
target_type, // target MachineType
target_loc, // target location
types.Build(), // machine_sig
locations.Build(), // location_sig
0, // stack_parameter_count
Operator::kNoThrow, // properties
kNoCalleeSaved, // callee-saved registers
kNoCalleeSaved, // callee-saved fp
CallDescriptor::kCanUseRoots, // flags
"Allocate");
}
// static
CallDescriptor* Linkage::GetBytecodeDispatchCallDescriptor( CallDescriptor* Linkage::GetBytecodeDispatchCallDescriptor(
Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor, Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor,
int stack_parameter_count) { int stack_parameter_count) {
......
...@@ -333,6 +333,7 @@ class Linkage : public ZoneObject { ...@@ -333,6 +333,7 @@ class Linkage : public ZoneObject {
MachineType return_type = MachineType::AnyTagged(), MachineType return_type = MachineType::AnyTagged(),
size_t return_count = 1); size_t return_count = 1);
static CallDescriptor* GetAllocateCallDescriptor(Zone* zone);
static CallDescriptor* GetBytecodeDispatchCallDescriptor( static CallDescriptor* GetBytecodeDispatchCallDescriptor(
Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor, Isolate* isolate, Zone* zone, const CallInterfaceDescriptor& descriptor,
int stack_parameter_count); int stack_parameter_count);
......
...@@ -1314,13 +1314,11 @@ bool Pipeline::CreateGraph() { ...@@ -1314,13 +1314,11 @@ bool Pipeline::CreateGraph() {
} }
// Type the graph. // Type the graph.
base::SmartPointer<Typer> typer; Typer typer(isolate(), data->graph(), info()->is_deoptimization_enabled()
typer.Reset(new Typer(isolate(), data->graph(), ? Typer::kDeoptimizationEnabled
info()->is_deoptimization_enabled() : Typer::kNoFlags,
? Typer::kDeoptimizationEnabled info()->dependencies());
: Typer::kNoFlags, Run<TyperPhase>(&typer);
info()->dependencies()));
Run<TyperPhase>(typer.get());
RunPrintAndVerify("Typed"); RunPrintAndVerify("Typed");
BeginPhaseKind("lowering"); BeginPhaseKind("lowering");
...@@ -1347,16 +1345,26 @@ bool Pipeline::CreateGraph() { ...@@ -1347,16 +1345,26 @@ bool Pipeline::CreateGraph() {
Run<EarlyOptimizationPhase>(); Run<EarlyOptimizationPhase>();
RunPrintAndVerify("Early optimized"); RunPrintAndVerify("Early optimized");
EndPhaseKind();
return true;
}
bool Pipeline::OptimizeGraph(Linkage* linkage) {
PipelineData* data = this->data_;
BeginPhaseKind("block building");
Run<EffectControlLinearizationPhase>(); Run<EffectControlLinearizationPhase>();
RunPrintAndVerify("Effect and control linearized"); RunPrintAndVerify("Effect and control linearized", true);
Run<BranchEliminationPhase>(); Run<BranchEliminationPhase>();
RunPrintAndVerify("Branch conditions eliminated"); RunPrintAndVerify("Branch conditions eliminated", true);
// Optimize control flow. // Optimize control flow.
if (FLAG_turbo_cf_optimization) { if (FLAG_turbo_cf_optimization) {
Run<ControlFlowOptimizationPhase>(); Run<ControlFlowOptimizationPhase>();
RunPrintAndVerify("Control flow optimized"); RunPrintAndVerify("Control flow optimized", true);
} }
// Lower changes that have been inserted before. // Lower changes that have been inserted before.
...@@ -1364,19 +1372,6 @@ bool Pipeline::CreateGraph() { ...@@ -1364,19 +1372,6 @@ bool Pipeline::CreateGraph() {
// TODO(jarin, rossberg): Remove UNTYPED once machine typing works. // TODO(jarin, rossberg): Remove UNTYPED once machine typing works.
RunPrintAndVerify("Late optimized", true); RunPrintAndVerify("Late optimized", true);
// Kill the Typer and thereby uninstall the decorator (if any).
typer.Reset(nullptr);
EndPhaseKind();
return true;
}
bool Pipeline::OptimizeGraph(Linkage* linkage) {
PipelineData* data = this->data_;
BeginPhaseKind("block building");
Run<LateGraphTrimmingPhase>(); Run<LateGraphTrimmingPhase>();
// TODO(jarin, rossberg): Remove UNTYPED once machine typing works. // TODO(jarin, rossberg): Remove UNTYPED once machine typing works.
RunPrintAndVerify("Late trimmed", true); RunPrintAndVerify("Late trimmed", true);
......
...@@ -1108,7 +1108,7 @@ class RepresentationSelector { ...@@ -1108,7 +1108,7 @@ class RepresentationSelector {
break; break;
} }
case IrOpcode::kAllocate: { case IrOpcode::kAllocate: {
ProcessInput(node, 0, UseInfo::AnyTagged()); ProcessInput(node, 0, UseInfo::TruncatingWord32());
ProcessRemainingInputs(node, 1); ProcessRemainingInputs(node, 1);
SetOutput(node, MachineRepresentation::kTagged); SetOutput(node, MachineRepresentation::kTagged);
break; break;
......
...@@ -214,6 +214,15 @@ struct SimplifiedOperatorGlobalCache final { ...@@ -214,6 +214,15 @@ struct SimplifiedOperatorGlobalCache final {
PURE_OP_LIST(PURE) PURE_OP_LIST(PURE)
#undef PURE #undef PURE
template <PretenureFlag kPretenure>
struct AllocateOperator final : public Operator1<PretenureFlag> {
AllocateOperator()
: Operator1<PretenureFlag>(IrOpcode::kAllocate, Operator::kNoThrow,
"Allocate", 1, 1, 1, 1, 1, 0, kPretenure) {}
};
AllocateOperator<NOT_TENURED> kAllocateNotTenuredOperator;
AllocateOperator<TENURED> kAllocateTenuredOperator;
#define BUFFER_ACCESS(Type, type, TYPE, ctype, size) \ #define BUFFER_ACCESS(Type, type, TYPE, ctype, size) \
struct LoadBuffer##Type##Operator final : public Operator1<BufferAccess> { \ struct LoadBuffer##Type##Operator final : public Operator1<BufferAccess> { \
LoadBuffer##Type##Operator() \ LoadBuffer##Type##Operator() \
...@@ -258,9 +267,14 @@ const Operator* SimplifiedOperatorBuilder::ReferenceEqual(Type* type) { ...@@ -258,9 +267,14 @@ const Operator* SimplifiedOperatorBuilder::ReferenceEqual(Type* type) {
const Operator* SimplifiedOperatorBuilder::Allocate(PretenureFlag pretenure) { const Operator* SimplifiedOperatorBuilder::Allocate(PretenureFlag pretenure) {
return new (zone()) switch (pretenure) {
Operator1<PretenureFlag>(IrOpcode::kAllocate, Operator::kNoThrow, case NOT_TENURED:
"Allocate", 1, 1, 1, 1, 1, 0, pretenure); return &cache_.kAllocateNotTenuredOperator;
case TENURED:
return &cache_.kAllocateTenuredOperator;
}
UNREACHABLE();
return nullptr;
} }
......
...@@ -43,6 +43,10 @@ class HandleBase { ...@@ -43,6 +43,10 @@ class HandleBase {
V8_INLINE bool is_null() const { return location_ == nullptr; } V8_INLINE bool is_null() const { return location_ == nullptr; }
// Returns the raw address where this handle is stored. This should only be
// used for hashing handles; do not ever try to dereference it.
V8_INLINE Address address() const { return bit_cast<Address>(location_); }
protected: protected:
// Provides the C++ dereference operator. // Provides the C++ dereference operator.
V8_INLINE Object* operator*() const { V8_INLINE Object* operator*() const {
...@@ -132,14 +136,14 @@ class Handle final : public HandleBase { ...@@ -132,14 +136,14 @@ class Handle final : public HandleBase {
// Provide function object for location equality comparison. // Provide function object for location equality comparison.
struct equal_to : public std::binary_function<Handle<T>, Handle<T>, bool> { struct equal_to : public std::binary_function<Handle<T>, Handle<T>, bool> {
V8_INLINE bool operator()(Handle<T> lhs, Handle<T> rhs) const { V8_INLINE bool operator()(Handle<T> lhs, Handle<T> rhs) const {
return lhs.location() == rhs.location(); return lhs.address() == rhs.address();
} }
}; };
// Provide function object for location hashing. // Provide function object for location hashing.
struct hash : public std::unary_function<Handle<T>, size_t> { struct hash : public std::unary_function<Handle<T>, size_t> {
V8_INLINE size_t operator()(Handle<T> const& handle) const { V8_INLINE size_t operator()(Handle<T> const& handle) const {
return base::hash<void*>()(handle.location()); return base::hash<void*>()(handle.address());
} }
}; };
......
...@@ -2618,6 +2618,44 @@ void Builtins::Generate_Construct(MacroAssembler* masm) { ...@@ -2618,6 +2618,44 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET); RelocInfo::CODE_TARGET);
} }
// static
void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- edx : requested object size (untagged)
// -- esp[0] : return address
// -----------------------------------
Label runtime;
__ Allocate(edx, eax, ecx, edi, &runtime, NO_ALLOCATION_FLAGS);
__ Ret();
__ bind(&runtime);
__ SmiTag(edx);
__ PopReturnAddressTo(ecx);
__ Push(edx);
__ PushReturnAddressFrom(ecx);
__ Move(esi, Smi::FromInt(0));
__ TailCallRuntime(Runtime::kAllocateInNewSpace);
}
// static
void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- edx : requested object size (untagged)
// -- esp[0] : return address
// -----------------------------------
Label runtime;
__ Allocate(edx, eax, ecx, edi, &runtime, PRETENURE);
__ Ret();
__ bind(&runtime);
__ SmiTag(edx);
__ PopReturnAddressTo(ecx);
__ Push(edx);
__ Push(Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
__ PushReturnAddressFrom(ecx);
__ Move(esi, Smi::FromInt(0));
__ TailCallRuntime(Runtime::kAllocateInTargetSpace);
}
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
......
...@@ -251,13 +251,6 @@ void AllocateHeapNumberDescriptor::InitializePlatformSpecific( ...@@ -251,13 +251,6 @@ void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
SIMD128_TYPES(SIMD128_ALLOC_DESC) SIMD128_TYPES(SIMD128_ALLOC_DESC)
#undef SIMD128_ALLOC_DESC #undef SIMD128_ALLOC_DESC
void AllocateDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {eax};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void ArrayConstructorConstantArgCountDescriptor::InitializePlatformSpecific( void ArrayConstructorConstantArgCountDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) { CallInterfaceDescriptorData* data) {
// register state // register state
......
...@@ -19,6 +19,7 @@ const Register kReturnRegister1 = {Register::kCode_edx}; ...@@ -19,6 +19,7 @@ const Register kReturnRegister1 = {Register::kCode_edx};
const Register kReturnRegister2 = {Register::kCode_edi}; const Register kReturnRegister2 = {Register::kCode_edi};
const Register kJSFunctionRegister = {Register::kCode_edi}; const Register kJSFunctionRegister = {Register::kCode_edi};
const Register kContextRegister = {Register::kCode_esi}; const Register kContextRegister = {Register::kCode_esi};
const Register kAllocateSizeRegister = {Register::kCode_edx};
const Register kInterpreterAccumulatorRegister = {Register::kCode_eax}; const Register kInterpreterAccumulatorRegister = {Register::kCode_eax};
const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_ecx}; const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_ecx};
const Register kInterpreterBytecodeArrayRegister = {Register::kCode_edi}; const Register kInterpreterBytecodeArrayRegister = {Register::kCode_edi};
......
...@@ -58,7 +58,6 @@ class PlatformInterfaceDescriptor; ...@@ -58,7 +58,6 @@ class PlatformInterfaceDescriptor;
V(AllocateInt8x16) \ V(AllocateInt8x16) \
V(AllocateUint8x16) \ V(AllocateUint8x16) \
V(AllocateBool8x16) \ V(AllocateBool8x16) \
V(Allocate) \
V(ArrayConstructorConstantArgCount) \ V(ArrayConstructorConstantArgCount) \
V(ArrayConstructor) \ V(ArrayConstructor) \
V(InternalArrayConstructorConstantArgCount) \ V(InternalArrayConstructorConstantArgCount) \
...@@ -578,11 +577,6 @@ class AllocateMutableHeapNumberDescriptor : public CallInterfaceDescriptor { ...@@ -578,11 +577,6 @@ class AllocateMutableHeapNumberDescriptor : public CallInterfaceDescriptor {
CallInterfaceDescriptor) CallInterfaceDescriptor)
}; };
class AllocateDescriptor : public CallInterfaceDescriptor {
public:
DECLARE_DESCRIPTOR(AllocateDescriptor, CallInterfaceDescriptor)
};
class ArrayConstructorConstantArgCountDescriptor class ArrayConstructorConstantArgCountDescriptor
: public CallInterfaceDescriptor { : public CallInterfaceDescriptor {
......
...@@ -2736,6 +2736,40 @@ void Builtins::Generate_Construct(MacroAssembler* masm) { ...@@ -2736,6 +2736,40 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET); RelocInfo::CODE_TARGET);
} }
// static
void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : requested object size (untagged)
// -- ra : return address
// -----------------------------------
Label runtime;
__ Allocate(a0, v0, a1, a2, &runtime, NO_ALLOCATION_FLAGS);
__ Ret();
__ bind(&runtime);
__ SmiTag(a0);
__ Push(a0);
__ Move(cp, Smi::FromInt(0));
__ TailCallRuntime(Runtime::kAllocateInNewSpace);
}
// static
void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : requested object size (untagged)
// -- ra : return address
// -----------------------------------
Label runtime;
__ Allocate(a0, v0, a1, a2, &runtime, PRETENURE);
__ Ret();
__ bind(&runtime);
__ SmiTag(a0);
__ Move(a1, Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
__ Push(a0, a1);
__ Move(cp, Smi::FromInt(0));
__ TailCallRuntime(Runtime::kAllocateInTargetSpace);
}
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// State setup as expected by MacroAssembler::InvokePrologue. // State setup as expected by MacroAssembler::InvokePrologue.
......
...@@ -246,13 +246,6 @@ void AllocateHeapNumberDescriptor::InitializePlatformSpecific( ...@@ -246,13 +246,6 @@ void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
SIMD128_TYPES(SIMD128_ALLOC_DESC) SIMD128_TYPES(SIMD128_ALLOC_DESC)
#undef SIMD128_ALLOC_DESC #undef SIMD128_ALLOC_DESC
void AllocateDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {a0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void ArrayConstructorConstantArgCountDescriptor::InitializePlatformSpecific( void ArrayConstructorConstantArgCountDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) { CallInterfaceDescriptorData* data) {
// register state // register state
......
...@@ -18,6 +18,7 @@ const Register kReturnRegister1 = {Register::kCode_v1}; ...@@ -18,6 +18,7 @@ const Register kReturnRegister1 = {Register::kCode_v1};
const Register kReturnRegister2 = {Register::kCode_a0}; const Register kReturnRegister2 = {Register::kCode_a0};
const Register kJSFunctionRegister = {Register::kCode_a1}; const Register kJSFunctionRegister = {Register::kCode_a1};
const Register kContextRegister = {Register::kCpRegister}; const Register kContextRegister = {Register::kCpRegister};
const Register kAllocateSizeRegister = {Register::kCode_a0};
const Register kInterpreterAccumulatorRegister = {Register::kCode_v0}; const Register kInterpreterAccumulatorRegister = {Register::kCode_v0};
const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_t4}; const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_t4};
const Register kInterpreterBytecodeArrayRegister = {Register::kCode_t5}; const Register kInterpreterBytecodeArrayRegister = {Register::kCode_t5};
......
...@@ -2724,6 +2724,40 @@ void Builtins::Generate_Construct(MacroAssembler* masm) { ...@@ -2724,6 +2724,40 @@ void Builtins::Generate_Construct(MacroAssembler* masm) {
RelocInfo::CODE_TARGET); RelocInfo::CODE_TARGET);
} }
// static
void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : requested object size (untagged)
// -- ra : return address
// -----------------------------------
Label runtime;
__ Allocate(a0, v0, a1, a2, &runtime, NO_ALLOCATION_FLAGS);
__ Ret();
__ bind(&runtime);
__ SmiTag(a0);
__ Push(a0);
__ Move(cp, Smi::FromInt(0));
__ TailCallRuntime(Runtime::kAllocateInNewSpace);
}
// static
void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- a0 : requested object size (untagged)
// -- ra : return address
// -----------------------------------
Label runtime;
__ Allocate(a0, v0, a1, a2, &runtime, PRETENURE);
__ Ret();
__ bind(&runtime);
__ SmiTag(a0);
__ Move(a1, Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
__ Push(a0, a1);
__ Move(cp, Smi::FromInt(0));
__ TailCallRuntime(Runtime::kAllocateInTargetSpace);
}
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// State setup as expected by MacroAssembler::InvokePrologue. // State setup as expected by MacroAssembler::InvokePrologue.
......
...@@ -246,13 +246,6 @@ void AllocateHeapNumberDescriptor::InitializePlatformSpecific( ...@@ -246,13 +246,6 @@ void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
SIMD128_TYPES(SIMD128_ALLOC_DESC) SIMD128_TYPES(SIMD128_ALLOC_DESC)
#undef SIMD128_ALLOC_DESC #undef SIMD128_ALLOC_DESC
void AllocateDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {a0};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void ArrayConstructorConstantArgCountDescriptor::InitializePlatformSpecific( void ArrayConstructorConstantArgCountDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) { CallInterfaceDescriptorData* data) {
// register state // register state
......
...@@ -18,6 +18,7 @@ const Register kReturnRegister1 = {Register::kCode_v1}; ...@@ -18,6 +18,7 @@ const Register kReturnRegister1 = {Register::kCode_v1};
const Register kReturnRegister2 = {Register::kCode_a0}; const Register kReturnRegister2 = {Register::kCode_a0};
const Register kJSFunctionRegister = {Register::kCode_a1}; const Register kJSFunctionRegister = {Register::kCode_a1};
const Register kContextRegister = {Register::kCpRegister}; const Register kContextRegister = {Register::kCpRegister};
const Register kAllocateSizeRegister = {Register::kCode_a0};
const Register kInterpreterAccumulatorRegister = {Register::kCode_v0}; const Register kInterpreterAccumulatorRegister = {Register::kCode_v0};
const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_t0}; const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_t0};
const Register kInterpreterBytecodeArrayRegister = {Register::kCode_t1}; const Register kInterpreterBytecodeArrayRegister = {Register::kCode_t1};
......
...@@ -2042,6 +2042,44 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) { ...@@ -2042,6 +2042,44 @@ static void LeaveArgumentsAdaptorFrame(MacroAssembler* masm) {
__ PushReturnAddressFrom(rcx); __ PushReturnAddressFrom(rcx);
} }
// static
void Builtins::Generate_AllocateInNewSpace(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rdx : requested object size (untagged)
// -- rsp[0] : return address
// -----------------------------------
Label runtime;
__ Allocate(rdx, rax, rcx, rdi, &runtime, NO_ALLOCATION_FLAGS);
__ Ret();
__ bind(&runtime);
__ Integer32ToSmi(rdx, rdx);
__ PopReturnAddressTo(rcx);
__ Push(rdx);
__ PushReturnAddressFrom(rcx);
__ Move(rsi, Smi::FromInt(0));
__ TailCallRuntime(Runtime::kAllocateInNewSpace);
}
// static
void Builtins::Generate_AllocateInOldSpace(MacroAssembler* masm) {
// ----------- S t a t e -------------
// -- rdx : requested object size (untagged)
// -- rsp[0] : return address
// -----------------------------------
Label runtime;
__ Allocate(rdx, rax, rcx, rdi, &runtime, PRETENURE);
__ Ret();
__ bind(&runtime);
__ Integer32ToSmi(rdx, rdx);
__ PopReturnAddressTo(rcx);
__ Push(rdx);
__ Push(Smi::FromInt(AllocateTargetSpace::encode(OLD_SPACE)));
__ PushReturnAddressFrom(rcx);
__ Move(rsi, Smi::FromInt(0));
__ TailCallRuntime(Runtime::kAllocateInTargetSpace);
}
void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) { void Builtins::Generate_ArgumentsAdaptorTrampoline(MacroAssembler* masm) {
// ----------- S t a t e ------------- // ----------- S t a t e -------------
......
...@@ -242,13 +242,6 @@ void AllocateHeapNumberDescriptor::InitializePlatformSpecific( ...@@ -242,13 +242,6 @@ void AllocateHeapNumberDescriptor::InitializePlatformSpecific(
SIMD128_TYPES(SIMD128_ALLOC_DESC) SIMD128_TYPES(SIMD128_ALLOC_DESC)
#undef SIMD128_ALLOC_DESC #undef SIMD128_ALLOC_DESC
void AllocateDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
Register registers[] = {rax};
data->InitializePlatformSpecific(arraysize(registers), registers);
}
void ArrayConstructorConstantArgCountDescriptor::InitializePlatformSpecific( void ArrayConstructorConstantArgCountDescriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) { CallInterfaceDescriptorData* data) {
// register state // register state
......
...@@ -21,6 +21,7 @@ const Register kReturnRegister1 = {Register::kCode_rdx}; ...@@ -21,6 +21,7 @@ const Register kReturnRegister1 = {Register::kCode_rdx};
const Register kReturnRegister2 = {Register::kCode_r8}; const Register kReturnRegister2 = {Register::kCode_r8};
const Register kJSFunctionRegister = {Register::kCode_rdi}; const Register kJSFunctionRegister = {Register::kCode_rdi};
const Register kContextRegister = {Register::kCode_rsi}; const Register kContextRegister = {Register::kCode_rsi};
const Register kAllocateSizeRegister = {Register::kCode_rdx};
const Register kInterpreterAccumulatorRegister = {Register::kCode_rax}; const Register kInterpreterAccumulatorRegister = {Register::kCode_rax};
const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_r12}; const Register kInterpreterBytecodeOffsetRegister = {Register::kCode_r12};
const Register kInterpreterBytecodeArrayRegister = {Register::kCode_r14}; const Register kInterpreterBytecodeArrayRegister = {Register::kCode_r14};
......
...@@ -1370,7 +1370,7 @@ TEST(LowerStoreField_to_store) { ...@@ -1370,7 +1370,7 @@ TEST(LowerStoreField_to_store) {
CHECK_EQ(IrOpcode::kStore, store->opcode()); CHECK_EQ(IrOpcode::kStore, store->opcode());
CHECK_EQ(t.p1, store->InputAt(2)); CHECK_EQ(t.p1, store->InputAt(2));
StoreRepresentation rep = StoreRepresentationOf(store->op()); StoreRepresentation rep = StoreRepresentationOf(store->op());
CHECK_EQ(kNoWriteBarrier, rep.write_barrier_kind()); CHECK_EQ(kFullWriteBarrier, rep.write_barrier_kind());
} }
} }
...@@ -1435,7 +1435,7 @@ TEST(LowerStoreElement_to_store) { ...@@ -1435,7 +1435,7 @@ TEST(LowerStoreElement_to_store) {
CHECK_EQ(IrOpcode::kStore, store->opcode()); CHECK_EQ(IrOpcode::kStore, store->opcode());
CHECK_EQ(t.p2, store->InputAt(2)); CHECK_EQ(t.p2, store->InputAt(2));
StoreRepresentation rep = StoreRepresentationOf(store->op()); StoreRepresentation rep = StoreRepresentationOf(store->op());
CHECK_EQ(kNoWriteBarrier, rep.write_barrier_kind()); CHECK_EQ(kFullWriteBarrier, rep.write_barrier_kind());
} }
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment