Commit 820e27f9 authored by epertoso's avatar epertoso Committed by Commit bot

[turbofan] Adds an Allocate macro to the CodeStubAssembler.

The macro is currently used by AllocateHeapNumberStub and AllocateMutableHeapNumberStub, which are now turbofan code stubs.
It can be used to allocate objects in the new or old space, optionally with double alignment.

BUG=588692
LOG=y

Review URL: https://codereview.chromium.org/1735803003

Cr-Commit-Position: refs/heads/master@{#34424}
parent 4acb492e
...@@ -1184,36 +1184,6 @@ Handle<Code> TransitionElementsKindStub::GenerateCode() { ...@@ -1184,36 +1184,6 @@ Handle<Code> TransitionElementsKindStub::GenerateCode() {
} }
template <>
HValue* CodeStubGraphBuilder<AllocateHeapNumberStub>::BuildCodeStub() {
HValue* result =
Add<HAllocate>(Add<HConstant>(HeapNumber::kSize), HType::HeapNumber(),
NOT_TENURED, HEAP_NUMBER_TYPE);
AddStoreMapConstant(result, isolate()->factory()->heap_number_map());
return result;
}
Handle<Code> AllocateHeapNumberStub::GenerateCode() {
return DoGenerateCode(this);
}
template <>
HValue* CodeStubGraphBuilder<AllocateMutableHeapNumberStub>::BuildCodeStub() {
HValue* result =
Add<HAllocate>(Add<HConstant>(HeapNumber::kSize), HType::HeapObject(),
NOT_TENURED, MUTABLE_HEAP_NUMBER_TYPE);
AddStoreMapConstant(result, isolate()->factory()->mutable_heap_number_map());
return result;
}
Handle<Code> AllocateMutableHeapNumberStub::GenerateCode() {
return DoGenerateCode(this);
}
template <> template <>
HValue* CodeStubGraphBuilder<AllocateInNewSpaceStub>::BuildCodeStub() { HValue* CodeStubGraphBuilder<AllocateInNewSpaceStub>::BuildCodeStub() {
HValue* result = Add<HAllocate>(GetParameter(0), HType::Tagged(), NOT_TENURED, HValue* result = Add<HAllocate>(GetParameter(0), HType::Tagged(), NOT_TENURED,
......
...@@ -462,6 +462,31 @@ Handle<Code> TurboFanCodeStub::GenerateCode() { ...@@ -462,6 +462,31 @@ Handle<Code> TurboFanCodeStub::GenerateCode() {
return assembler.GenerateCode(); return assembler.GenerateCode();
} }
void AllocateHeapNumberStub::GenerateAssembly(
compiler::CodeStubAssembler* assembler) const {
compiler::Node* result = assembler->Allocate(
HeapNumber::kSize, compiler::CodeStubAssembler::kNone);
compiler::Node* map_offset =
assembler->IntPtrConstant(HeapObject::kMapOffset - kHeapObjectTag);
compiler::Node* map = assembler->IntPtrAdd(result, map_offset);
assembler->StoreNoWriteBarrier(
MachineRepresentation::kTagged, map,
assembler->HeapConstant(isolate()->factory()->heap_number_map()));
assembler->Return(result);
}
void AllocateMutableHeapNumberStub::GenerateAssembly(
compiler::CodeStubAssembler* assembler) const {
compiler::Node* result = assembler->Allocate(
HeapNumber::kSize, compiler::CodeStubAssembler::kNone);
compiler::Node* map_offset =
assembler->IntPtrConstant(HeapObject::kMapOffset - kHeapObjectTag);
compiler::Node* map = assembler->IntPtrAdd(result, map_offset);
assembler->StoreNoWriteBarrier(
MachineRepresentation::kTagged, map,
assembler->HeapConstant(isolate()->factory()->mutable_heap_number_map()));
assembler->Return(result);
}
void StringLengthStub::GenerateAssembly( void StringLengthStub::GenerateAssembly(
compiler::CodeStubAssembler* assembler) const { compiler::CodeStubAssembler* assembler) const {
......
...@@ -59,8 +59,6 @@ namespace internal { ...@@ -59,8 +59,6 @@ namespace internal {
V(VectorStoreIC) \ V(VectorStoreIC) \
V(VectorKeyedStoreIC) \ V(VectorKeyedStoreIC) \
/* HydrogenCodeStubs */ \ /* HydrogenCodeStubs */ \
V(AllocateHeapNumber) \
V(AllocateMutableHeapNumber) \
V(AllocateInNewSpace) \ V(AllocateInNewSpace) \
V(ArrayNArgumentsConstructor) \ V(ArrayNArgumentsConstructor) \
V(ArrayNoArgumentConstructor) \ V(ArrayNoArgumentConstructor) \
...@@ -100,6 +98,8 @@ namespace internal { ...@@ -100,6 +98,8 @@ namespace internal {
V(KeyedLoadIC) \ V(KeyedLoadIC) \
V(LoadIC) \ V(LoadIC) \
/* TurboFanCodeStubs */ \ /* TurboFanCodeStubs */ \
V(AllocateHeapNumber) \
V(AllocateMutableHeapNumber) \
V(StringLength) \ V(StringLength) \
V(StrictEqual) \ V(StrictEqual) \
V(ToBoolean) \ V(ToBoolean) \
...@@ -2429,26 +2429,28 @@ class TransitionElementsKindStub : public HydrogenCodeStub { ...@@ -2429,26 +2429,28 @@ class TransitionElementsKindStub : public HydrogenCodeStub {
DEFINE_HYDROGEN_CODE_STUB(TransitionElementsKind, HydrogenCodeStub); DEFINE_HYDROGEN_CODE_STUB(TransitionElementsKind, HydrogenCodeStub);
}; };
class AllocateHeapNumberStub : public TurboFanCodeStub {
class AllocateHeapNumberStub final : public HydrogenCodeStub {
public: public:
explicit AllocateHeapNumberStub(Isolate* isolate) explicit AllocateHeapNumberStub(Isolate* isolate)
: HydrogenCodeStub(isolate) {} : TurboFanCodeStub(isolate) {}
void InitializeDescriptor(CodeStubDescriptor* descriptor) override;
void GenerateAssembly(compiler::CodeStubAssembler* assembler) const override;
private:
DEFINE_CALL_INTERFACE_DESCRIPTOR(AllocateHeapNumber); DEFINE_CALL_INTERFACE_DESCRIPTOR(AllocateHeapNumber);
DEFINE_HYDROGEN_CODE_STUB(AllocateHeapNumber, HydrogenCodeStub); DEFINE_CODE_STUB(AllocateHeapNumber, TurboFanCodeStub);
}; };
class AllocateMutableHeapNumberStub : public TurboFanCodeStub {
class AllocateMutableHeapNumberStub final : public HydrogenCodeStub {
public: public:
explicit AllocateMutableHeapNumberStub(Isolate* isolate) explicit AllocateMutableHeapNumberStub(Isolate* isolate)
: HydrogenCodeStub(isolate) {} : TurboFanCodeStub(isolate) {}
void InitializeDescriptor(CodeStubDescriptor* descriptor) override;
void GenerateAssembly(compiler::CodeStubAssembler* assembler) const override;
private:
DEFINE_CALL_INTERFACE_DESCRIPTOR(AllocateMutableHeapNumber); DEFINE_CALL_INTERFACE_DESCRIPTOR(AllocateMutableHeapNumber);
DEFINE_HYDROGEN_CODE_STUB(AllocateMutableHeapNumber, HydrogenCodeStub); DEFINE_CODE_STUB(AllocateMutableHeapNumber, TurboFanCodeStub);
}; };
......
...@@ -256,6 +256,122 @@ Node* CodeStubAssembler::LoadRoot(Heap::RootListIndex root_index) { ...@@ -256,6 +256,122 @@ Node* CodeStubAssembler::LoadRoot(Heap::RootListIndex root_index) {
return nullptr; return nullptr;
} }
Node* CodeStubAssembler::AllocateRawUnaligned(Node* size_in_bytes,
AllocationFlags flags,
Node* top_address,
Node* limit_address) {
Node* top = Load(MachineType::Pointer(), top_address);
Node* limit = Load(MachineType::Pointer(), limit_address);
// If there's not enough space, call the runtime.
RawMachineLabel runtime_call, no_runtime_call, merge_runtime;
raw_assembler_->Branch(
raw_assembler_->IntPtrLessThan(IntPtrSub(limit, top), size_in_bytes),
&runtime_call, &no_runtime_call);
raw_assembler_->Bind(&runtime_call);
// AllocateInTargetSpace does not use the context.
Node* context = IntPtrConstant(0);
Node* runtime_flags = SmiTag(Int32Constant(
AllocateDoubleAlignFlag::encode(false) |
AllocateTargetSpace::encode(flags & kPretenured
? AllocationSpace::OLD_SPACE
: AllocationSpace::NEW_SPACE)));
Node* runtime_result = CallRuntime(Runtime::kAllocateInTargetSpace, context,
SmiTag(size_in_bytes), runtime_flags);
raw_assembler_->Goto(&merge_runtime);
// When there is enough space, return `top' and bump it up.
raw_assembler_->Bind(&no_runtime_call);
Node* no_runtime_result = top;
StoreNoWriteBarrier(MachineType::PointerRepresentation(), top_address,
IntPtrAdd(top, size_in_bytes));
no_runtime_result =
IntPtrAdd(no_runtime_result, IntPtrConstant(kHeapObjectTag));
raw_assembler_->Goto(&merge_runtime);
raw_assembler_->Bind(&merge_runtime);
return raw_assembler_->Phi(MachineType::PointerRepresentation(),
runtime_result, no_runtime_result);
}
Node* CodeStubAssembler::AllocateRawAligned(Node* size_in_bytes,
AllocationFlags flags,
Node* top_address,
Node* limit_address) {
Node* top = Load(MachineType::Pointer(), top_address);
Node* limit = Load(MachineType::Pointer(), limit_address);
Node* adjusted_size = size_in_bytes;
if (flags & kDoubleAlignment) {
// TODO(epertoso): Simd128 alignment.
RawMachineLabel aligned, not_aligned, merge;
raw_assembler_->Branch(WordAnd(top, IntPtrConstant(kDoubleAlignmentMask)),
&not_aligned, &aligned);
raw_assembler_->Bind(&not_aligned);
Node* not_aligned_size =
IntPtrAdd(size_in_bytes, IntPtrConstant(kPointerSize));
raw_assembler_->Goto(&merge);
raw_assembler_->Bind(&aligned);
raw_assembler_->Goto(&merge);
raw_assembler_->Bind(&merge);
adjusted_size = raw_assembler_->Phi(MachineType::PointerRepresentation(),
not_aligned_size, adjusted_size);
}
Node* address = AllocateRawUnaligned(adjusted_size, kNone, top, limit);
RawMachineLabel needs_filler, doesnt_need_filler, merge_address;
raw_assembler_->Branch(
raw_assembler_->IntPtrEqual(adjusted_size, size_in_bytes),
&doesnt_need_filler, &needs_filler);
raw_assembler_->Bind(&needs_filler);
// Store a filler and increase the address by kPointerSize.
// TODO(epertoso): this code assumes that we only align to kDoubleSize. Change
// it when Simd128 alignment is supported.
StoreNoWriteBarrier(MachineType::PointerRepresentation(), top,
LoadRoot(Heap::kOnePointerFillerMapRootIndex));
Node* address_with_filler = IntPtrAdd(address, IntPtrConstant(kPointerSize));
raw_assembler_->Goto(&merge_address);
raw_assembler_->Bind(&doesnt_need_filler);
Node* address_without_filler = address;
raw_assembler_->Goto(&merge_address);
raw_assembler_->Bind(&merge_address);
address = raw_assembler_->Phi(MachineType::PointerRepresentation(),
address_with_filler, address_without_filler);
// Update the top.
StoreNoWriteBarrier(MachineType::PointerRepresentation(), top_address,
IntPtrAdd(top, adjusted_size));
return address;
}
Node* CodeStubAssembler::Allocate(int size_in_bytes, AllocationFlags flags) {
bool const new_space = !(flags & kPretenured);
Node* top_address = ExternalConstant(
new_space
? ExternalReference::new_space_allocation_top_address(isolate())
: ExternalReference::old_space_allocation_top_address(isolate()));
Node* limit_address = ExternalConstant(
new_space
? ExternalReference::new_space_allocation_limit_address(isolate())
: ExternalReference::old_space_allocation_limit_address(isolate()));
#ifdef V8_HOST_ARCH_32_BIT
if (flags & kDoubleAlignment) {
return AllocateRawAligned(IntPtrConstant(size_in_bytes), flags, top_address,
limit_address);
}
#endif
return AllocateRawUnaligned(IntPtrConstant(size_in_bytes), flags, top_address,
limit_address);
}
Node* CodeStubAssembler::Load(MachineType rep, Node* base) { Node* CodeStubAssembler::Load(MachineType rep, Node* base) {
return raw_assembler_->Load(rep, base); return raw_assembler_->Load(rep, base);
} }
......
...@@ -113,6 +113,14 @@ class CodeStubAssembler { ...@@ -113,6 +113,14 @@ class CodeStubAssembler {
Impl* impl_; Impl* impl_;
}; };
enum AllocationFlag : uint8_t {
kNone = 0,
kDoubleAlignment = 1,
kPretenured = 1 << 1
};
typedef base::Flags<AllocationFlag> AllocationFlags;
// =========================================================================== // ===========================================================================
// Base Assembler // Base Assembler
// =========================================================================== // ===========================================================================
...@@ -251,6 +259,9 @@ class CodeStubAssembler { ...@@ -251,6 +259,9 @@ class CodeStubAssembler {
int additional_offset = 0); int additional_offset = 0);
Node* LoadFixedArrayElementConstantIndex(Node* object, int index); Node* LoadFixedArrayElementConstantIndex(Node* object, int index);
// Allocate an object of the given size.
Node* Allocate(int size, AllocationFlags flags);
// Store an array element to a FixedArray. // Store an array element to a FixedArray.
Node* StoreFixedArrayElementNoWriteBarrier(Node* object, Node* index, Node* StoreFixedArrayElementNoWriteBarrier(Node* object, Node* index,
Node* value); Node* value);
...@@ -290,6 +301,11 @@ class CodeStubAssembler { ...@@ -290,6 +301,11 @@ class CodeStubAssembler {
Node* SmiShiftBitsConstant(); Node* SmiShiftBitsConstant();
Node* AllocateRawAligned(Node* size_in_bytes, AllocationFlags flags,
Node* top_address, Node* limit_address);
Node* AllocateRawUnaligned(Node* size_in_bytes, AllocationFlags flags,
Node* top_adddress, Node* limit_address);
base::SmartPointer<RawMachineAssembler> raw_assembler_; base::SmartPointer<RawMachineAssembler> raw_assembler_;
Code::Flags flags_; Code::Flags flags_;
const char* name_; const char* name_;
...@@ -299,6 +315,8 @@ class CodeStubAssembler { ...@@ -299,6 +315,8 @@ class CodeStubAssembler {
DISALLOW_COPY_AND_ASSIGN(CodeStubAssembler); DISALLOW_COPY_AND_ASSIGN(CodeStubAssembler);
}; };
DEFINE_OPERATORS_FOR_FLAGS(CodeStubAssembler::AllocationFlags);
class CodeStubAssembler::Label { class CodeStubAssembler::Label {
public: public:
explicit Label(CodeStubAssembler* assembler); explicit Label(CodeStubAssembler* assembler);
......
...@@ -169,6 +169,18 @@ ...@@ -169,6 +169,18 @@
# BUG(v8:3434). # BUG(v8:3434).
' test-api/LoadICFastApi_DirectCall_GCMoveStubWithProfiler': [SKIP], ' test-api/LoadICFastApi_DirectCall_GCMoveStubWithProfiler': [SKIP],
# BUG(v8:4795).
'test-run-wasm-js/Run_JSSelectAlign_0': [SKIP],
'test-run-wasm-js/Run_JSSelectAlign_2': [SKIP],
'test-run-wasm-js/Run_JSSelectAlign_4': [SKIP],
'test-run-wasm-js/Run_JSSelect_0': [SKIP],
'test-run-wasm-js/Run_JSSelect_1': [SKIP],
'test-run-wasm-js/Run_JSSelect_2': [SKIP],
'test-run-wasm-js/Run_JSSelect_3': [SKIP],
'test-run-wasm-js/Run_JSSelect_4': [SKIP],
'test-run-wasm-js/Run_JSSelect_5': [SKIP],
'test-run-wasm-js/Run_JSSelect_6': [SKIP],
}], # 'arch == arm64' }], # 'arch == arm64'
['arch == arm64 and simulator_run == True', { ['arch == arm64 and simulator_run == True', {
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment