Commit 343898ed authored by Tobias Tebbi's avatar Tobias Tebbi Committed by Commit Bot

[csa][turbofan] allow large object allocation

Add suport for large object space allocations in Turbofan and use it
from CSA when young large objects are enabled. This maintains the
Turbofan invariant that the generation is statically predictable.

In principle, this enables write barrier elimination for large objects
allocated from Torque/CSA. But it doesn't seem to trigger much yet,
probably we have to improve the MemoryOptimizer.

Bug: v8:7793

Change-Id: I7ea7d0cb549573db65fafe5df5edf67e0ce90893
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1565905Reviewed-by: 's avatarJaroslav Sevcik <jarin@chromium.org>
Commit-Queue: Tobias Tebbi <tebbi@chromium.org>
Cr-Commit-Position: refs/heads/master@{#60984}
parent 1397697e
...@@ -1232,15 +1232,23 @@ TNode<HeapObject> CodeStubAssembler::Allocate(TNode<IntPtrT> size_in_bytes, ...@@ -1232,15 +1232,23 @@ TNode<HeapObject> CodeStubAssembler::Allocate(TNode<IntPtrT> size_in_bytes,
AllocationFlags flags) { AllocationFlags flags) {
Comment("Allocate"); Comment("Allocate");
bool const new_space = !(flags & kPretenured); bool const new_space = !(flags & kPretenured);
if (!(flags & kAllowLargeObjectAllocation)) { bool const allow_large_objects = flags & kAllowLargeObjectAllocation;
// For optimized allocations, we don't allow the allocation to happen in a
// different generation than requested.
bool const always_allocated_in_requested_space =
!new_space || !allow_large_objects || FLAG_young_generation_large_objects;
if (!allow_large_objects) {
intptr_t size_constant; intptr_t size_constant;
if (ToIntPtrConstant(size_in_bytes, size_constant)) { if (ToIntPtrConstant(size_in_bytes, size_constant)) {
CHECK_LE(size_constant, kMaxRegularHeapObjectSize); CHECK_LE(size_constant, kMaxRegularHeapObjectSize);
} }
} }
if (!(flags & kDoubleAlignment) && !(flags & kAllowLargeObjectAllocation)) { if (!(flags & kDoubleAlignment) && always_allocated_in_requested_space) {
return OptimizedAllocate(size_in_bytes, new_space ? AllocationType::kYoung return OptimizedAllocate(
: AllocationType::kOld); size_in_bytes,
new_space ? AllocationType::kYoung : AllocationType::kOld,
allow_large_objects ? AllowLargeObjects::kTrue
: AllowLargeObjects::kFalse);
} }
TNode<ExternalReference> top_address = ExternalConstant( TNode<ExternalReference> top_address = ExternalConstant(
new_space new_space
...@@ -3889,8 +3897,8 @@ CodeStubAssembler::AllocateUninitializedJSArrayWithElements( ...@@ -3889,8 +3897,8 @@ CodeStubAssembler::AllocateUninitializedJSArrayWithElements(
AllocateFixedArray(kind, capacity, capacity_mode, allocation_flags); AllocateFixedArray(kind, capacity, capacity_mode, allocation_flags);
if (IsDoubleElementsKind(kind)) { if (IsDoubleElementsKind(kind)) {
FillFixedDoubleArrayWithZero(CAST(elements.value()), FillFixedDoubleArrayWithZero(
ParameterToIntPtr(capacity, capacity_mode)); CAST(elements.value()), ParameterToIntPtr(capacity, capacity_mode));
} else { } else {
FillFixedArrayWithSmiZero(CAST(elements.value()), FillFixedArrayWithSmiZero(CAST(elements.value()),
ParameterToIntPtr(capacity, capacity_mode)); ParameterToIntPtr(capacity, capacity_mode));
...@@ -3914,20 +3922,20 @@ CodeStubAssembler::AllocateUninitializedJSArrayWithElements( ...@@ -3914,20 +3922,20 @@ CodeStubAssembler::AllocateUninitializedJSArrayWithElements(
InnerAllocate(array.value(), elements_offset)); InnerAllocate(array.value(), elements_offset));
StoreObjectFieldNoWriteBarrier(array.value(), JSObject::kElementsOffset, StoreObjectFieldNoWriteBarrier(array.value(), JSObject::kElementsOffset,
elements.value()); elements.value());
// Setup elements object. // Setup elements object.
STATIC_ASSERT(FixedArrayBase::kHeaderSize == 2 * kTaggedSize); STATIC_ASSERT(FixedArrayBase::kHeaderSize == 2 * kTaggedSize);
RootIndex elements_map_index = IsDoubleElementsKind(kind) RootIndex elements_map_index = IsDoubleElementsKind(kind)
? RootIndex::kFixedDoubleArrayMap ? RootIndex::kFixedDoubleArrayMap
: RootIndex::kFixedArrayMap; : RootIndex::kFixedArrayMap;
DCHECK(RootsTable::IsImmortalImmovable(elements_map_index)); DCHECK(RootsTable::IsImmortalImmovable(elements_map_index));
StoreMapNoWriteBarrier(elements.value(), elements_map_index); StoreMapNoWriteBarrier(elements.value(), elements_map_index);
TNode<Smi> capacity_smi = ParameterToTagged(capacity, capacity_mode); TNode<Smi> capacity_smi = ParameterToTagged(capacity, capacity_mode);
CSA_ASSERT(this, SmiGreaterThan(capacity_smi, SmiConstant(0))); CSA_ASSERT(this, SmiGreaterThan(capacity_smi, SmiConstant(0)));
StoreObjectFieldNoWriteBarrier(elements.value(), FixedArray::kLengthOffset, StoreObjectFieldNoWriteBarrier(elements.value(), FixedArray::kLengthOffset,
capacity_smi); capacity_smi);
Goto(&out); Goto(&out);
} }
...@@ -3972,8 +3980,8 @@ TNode<JSArray> CodeStubAssembler::AllocateJSArray( ...@@ -3972,8 +3980,8 @@ TNode<JSArray> CodeStubAssembler::AllocateJSArray(
Label out(this), nonempty(this); Label out(this), nonempty(this);
Branch(SmiEqual(ParameterToTagged(capacity, capacity_mode), SmiConstant(0)), Branch(SmiEqual(ParameterToTagged(capacity, capacity_mode), SmiConstant(0)),
&out, &nonempty); &out, &nonempty);
BIND(&nonempty); BIND(&nonempty);
{ {
......
...@@ -1120,10 +1120,11 @@ void CodeAssembler::GotoIfException(Node* node, Label* if_exception, ...@@ -1120,10 +1120,11 @@ void CodeAssembler::GotoIfException(Node* node, Label* if_exception,
raw_assembler()->AddNode(raw_assembler()->common()->IfSuccess(), node); raw_assembler()->AddNode(raw_assembler()->common()->IfSuccess(), node);
} }
TNode<HeapObject> CodeAssembler::OptimizedAllocate(TNode<IntPtrT> size, TNode<HeapObject> CodeAssembler::OptimizedAllocate(
AllocationType allocation) { TNode<IntPtrT> size, AllocationType allocation,
return UncheckedCast<HeapObject>( AllowLargeObjects allow_large_objects) {
raw_assembler()->OptimizedAllocate(size, allocation)); return UncheckedCast<HeapObject>(raw_assembler()->OptimizedAllocate(
size, allocation, allow_large_objects));
} }
void CodeAssembler::HandleException(Node* node) { void CodeAssembler::HandleException(Node* node) {
......
...@@ -970,7 +970,8 @@ class V8_EXPORT_PRIVATE CodeAssembler { ...@@ -970,7 +970,8 @@ class V8_EXPORT_PRIVATE CodeAssembler {
// Optimized memory operations that map to Turbofan simplified nodes. // Optimized memory operations that map to Turbofan simplified nodes.
TNode<HeapObject> OptimizedAllocate(TNode<IntPtrT> size, TNode<HeapObject> OptimizedAllocate(TNode<IntPtrT> size,
AllocationType allocation); AllocationType allocation,
AllowLargeObjects allow_large_objects);
void OptimizedStoreField(MachineRepresentation rep, TNode<HeapObject> object, void OptimizedStoreField(MachineRepresentation rep, TNode<HeapObject> object,
int offset, Node* value, int offset, Node* value,
WriteBarrierKind write_barrier); WriteBarrierKind write_barrier);
......
...@@ -223,13 +223,14 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node, ...@@ -223,13 +223,14 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
gasm()->Reset(effect, control); gasm()->Reset(effect, control);
AllocationType allocation = AllocationTypeOf(node->op()); const AllocateParameters& allocation = AllocateParametersOf(node->op());
AllocationType allocation_type = allocation.allocation_type();
// Propagate tenuring from outer allocations to inner allocations, i.e. // Propagate tenuring from outer allocations to inner allocations, i.e.
// when we allocate an object in old space and store a newly allocated // when we allocate an object in old space and store a newly allocated
// child object into the pretenured object, then the newly allocated // child object into the pretenured object, then the newly allocated
// child object also should get pretenured to old space. // child object also should get pretenured to old space.
if (allocation == AllocationType::kOld) { if (allocation_type == AllocationType::kOld) {
for (Edge const edge : node->use_edges()) { for (Edge const edge : node->use_edges()) {
Node* const user = edge.from(); Node* const user = edge.from();
if (user->opcode() == IrOpcode::kStoreField && edge.index() == 0) { if (user->opcode() == IrOpcode::kStoreField && edge.index() == 0) {
...@@ -242,14 +243,14 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node, ...@@ -242,14 +243,14 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
} }
} }
} else { } else {
DCHECK_EQ(AllocationType::kYoung, allocation); DCHECK_EQ(AllocationType::kYoung, allocation_type);
for (Edge const edge : node->use_edges()) { for (Edge const edge : node->use_edges()) {
Node* const user = edge.from(); Node* const user = edge.from();
if (user->opcode() == IrOpcode::kStoreField && edge.index() == 1) { if (user->opcode() == IrOpcode::kStoreField && edge.index() == 1) {
Node* const parent = user->InputAt(0); Node* const parent = user->InputAt(0);
if (parent->opcode() == IrOpcode::kAllocateRaw && if (parent->opcode() == IrOpcode::kAllocateRaw &&
AllocationTypeOf(parent->op()) == AllocationType::kOld) { AllocationTypeOf(parent->op()) == AllocationType::kOld) {
allocation = AllocationType::kOld; allocation_type = AllocationType::kOld;
break; break;
} }
} }
...@@ -258,11 +259,11 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node, ...@@ -258,11 +259,11 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
// Determine the top/limit addresses. // Determine the top/limit addresses.
Node* top_address = __ ExternalConstant( Node* top_address = __ ExternalConstant(
allocation == AllocationType::kYoung allocation_type == AllocationType::kYoung
? ExternalReference::new_space_allocation_top_address(isolate()) ? ExternalReference::new_space_allocation_top_address(isolate())
: ExternalReference::old_space_allocation_top_address(isolate())); : ExternalReference::old_space_allocation_top_address(isolate()));
Node* limit_address = __ ExternalConstant( Node* limit_address = __ ExternalConstant(
allocation == AllocationType::kYoung allocation_type == AllocationType::kYoung
? ExternalReference::new_space_allocation_limit_address(isolate()) ? ExternalReference::new_space_allocation_limit_address(isolate())
: ExternalReference::old_space_allocation_limit_address(isolate())); : ExternalReference::old_space_allocation_limit_address(isolate()));
...@@ -273,7 +274,7 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node, ...@@ -273,7 +274,7 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
intptr_t const object_size = m.Value(); intptr_t const object_size = m.Value();
if (allocation_folding_ == AllocationFolding::kDoAllocationFolding && if (allocation_folding_ == AllocationFolding::kDoAllocationFolding &&
state->size() <= kMaxRegularHeapObjectSize - object_size && state->size() <= kMaxRegularHeapObjectSize - object_size &&
state->group()->allocation() == allocation) { state->group()->allocation() == allocation_type) {
// We can fold this Allocate {node} into the allocation {group} // We can fold this Allocate {node} into the allocation {group}
// represented by the given {state}. Compute the upper bound for // represented by the given {state}. Compute the upper bound for
// the new {state}. // the new {state}.
...@@ -331,7 +332,7 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node, ...@@ -331,7 +332,7 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
__ Bind(&call_runtime); __ Bind(&call_runtime);
{ {
Node* target = allocation == AllocationType::kYoung Node* target = allocation_type == AllocationType::kYoung
? __ ? __
AllocateInYoungGenerationStubConstant() AllocateInYoungGenerationStubConstant()
: __ : __
...@@ -363,7 +364,7 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node, ...@@ -363,7 +364,7 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
// Start a new allocation group. // Start a new allocation group.
AllocationGroup* group = AllocationGroup* group =
new (zone()) AllocationGroup(value, allocation, size, zone()); new (zone()) AllocationGroup(value, allocation_type, size, zone());
state = AllocationState::Open(group, object_size, top, zone()); state = AllocationState::Open(group, object_size, top, zone());
} }
} else { } else {
...@@ -382,6 +383,11 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node, ...@@ -382,6 +383,11 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
// Check if we can do bump pointer allocation here. // Check if we can do bump pointer allocation here.
Node* check = __ UintLessThan(new_top, limit); Node* check = __ UintLessThan(new_top, limit);
__ GotoIfNot(check, &call_runtime); __ GotoIfNot(check, &call_runtime);
if (allocation.allow_large_objects() == AllowLargeObjects::kTrue) {
__ GotoIfNot(
__ UintLessThan(size, __ IntPtrConstant(kMaxRegularHeapObjectSize)),
&call_runtime);
}
__ Store(StoreRepresentation(MachineType::PointerRepresentation(), __ Store(StoreRepresentation(MachineType::PointerRepresentation(),
kNoWriteBarrier), kNoWriteBarrier),
top_address, __ IntPtrConstant(0), new_top); top_address, __ IntPtrConstant(0), new_top);
...@@ -389,7 +395,7 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node, ...@@ -389,7 +395,7 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
__ IntAdd(top, __ IntPtrConstant(kHeapObjectTag)))); __ IntAdd(top, __ IntPtrConstant(kHeapObjectTag))));
__ Bind(&call_runtime); __ Bind(&call_runtime);
Node* target = allocation == AllocationType::kYoung Node* target = allocation_type == AllocationType::kYoung
? __ ? __
AllocateInYoungGenerationStubConstant() AllocateInYoungGenerationStubConstant()
: __ : __
...@@ -408,7 +414,7 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node, ...@@ -408,7 +414,7 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
// Create an unfoldable allocation group. // Create an unfoldable allocation group.
AllocationGroup* group = AllocationGroup* group =
new (zone()) AllocationGroup(value, allocation, zone()); new (zone()) AllocationGroup(value, allocation_type, zone());
state = AllocationState::Closed(group, zone()); state = AllocationState::Closed(group, zone());
} }
......
...@@ -69,9 +69,12 @@ Node* RawMachineAssembler::RelocatableIntPtrConstant(intptr_t value, ...@@ -69,9 +69,12 @@ Node* RawMachineAssembler::RelocatableIntPtrConstant(intptr_t value,
: RelocatableInt32Constant(static_cast<int>(value), rmode); : RelocatableInt32Constant(static_cast<int>(value), rmode);
} }
Node* RawMachineAssembler::OptimizedAllocate(Node* size, Node* RawMachineAssembler::OptimizedAllocate(
AllocationType allocation) { Node* size, AllocationType allocation,
return AddNode(simplified()->AllocateRaw(Type::Any(), allocation), size); AllowLargeObjects allow_large_objects) {
return AddNode(
simplified()->AllocateRaw(Type::Any(), allocation, allow_large_objects),
size);
} }
Schedule* RawMachineAssembler::Export() { Schedule* RawMachineAssembler::Export() {
......
...@@ -227,7 +227,8 @@ class V8_EXPORT_PRIVATE RawMachineAssembler { ...@@ -227,7 +227,8 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
} }
Node* Retain(Node* value) { return AddNode(common()->Retain(), value); } Node* Retain(Node* value) { return AddNode(common()->Retain(), value); }
Node* OptimizedAllocate(Node* size, AllocationType allocation); Node* OptimizedAllocate(Node* size, AllocationType allocation,
AllowLargeObjects allow_large_objects);
// Unaligned memory operations // Unaligned memory operations
Node* UnalignedLoad(MachineType type, Node* base) { Node* UnalignedLoad(MachineType type, Node* base) {
......
...@@ -547,19 +547,23 @@ bool operator==(AllocateParameters const& lhs, AllocateParameters const& rhs) { ...@@ -547,19 +547,23 @@ bool operator==(AllocateParameters const& lhs, AllocateParameters const& rhs) {
lhs.type() == rhs.type(); lhs.type() == rhs.type();
} }
const AllocateParameters& AllocateParametersOf(const Operator* op) {
DCHECK(op->opcode() == IrOpcode::kAllocate ||
op->opcode() == IrOpcode::kAllocateRaw);
return OpParameter<AllocateParameters>(op);
}
AllocationType AllocationTypeOf(const Operator* op) { AllocationType AllocationTypeOf(const Operator* op) {
if (op->opcode() == IrOpcode::kNewDoubleElements || if (op->opcode() == IrOpcode::kNewDoubleElements ||
op->opcode() == IrOpcode::kNewSmiOrObjectElements) { op->opcode() == IrOpcode::kNewSmiOrObjectElements) {
return OpParameter<AllocationType>(op); return OpParameter<AllocationType>(op);
} }
DCHECK(op->opcode() == IrOpcode::kAllocate || return AllocateParametersOf(op).allocation_type();
op->opcode() == IrOpcode::kAllocateRaw);
return OpParameter<AllocateParameters>(op).allocation_type();
} }
Type AllocateTypeOf(const Operator* op) { Type AllocateTypeOf(const Operator* op) {
DCHECK_EQ(IrOpcode::kAllocate, op->opcode()); DCHECK_EQ(IrOpcode::kAllocate, op->opcode());
return OpParameter<AllocateParameters>(op).type(); return AllocateParametersOf(op).type();
} }
UnicodeEncoding UnicodeEncodingOf(const Operator* op) { UnicodeEncoding UnicodeEncodingOf(const Operator* op) {
...@@ -1651,11 +1655,18 @@ const Operator* SimplifiedOperatorBuilder::Allocate(Type type, ...@@ -1651,11 +1655,18 @@ const Operator* SimplifiedOperatorBuilder::Allocate(Type type,
} }
const Operator* SimplifiedOperatorBuilder::AllocateRaw( const Operator* SimplifiedOperatorBuilder::AllocateRaw(
Type type, AllocationType allocation) { Type type, AllocationType allocation,
AllowLargeObjects allow_large_objects) {
// We forbid optimized allocations to allocate in a different generation than
// requested.
DCHECK(!(allow_large_objects == AllowLargeObjects::kTrue &&
allocation == AllocationType::kYoung &&
!FLAG_young_generation_large_objects));
return new (zone()) Operator1<AllocateParameters>( return new (zone()) Operator1<AllocateParameters>(
IrOpcode::kAllocateRaw, IrOpcode::kAllocateRaw,
Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite, Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite,
"AllocateRaw", 1, 1, 1, 1, 1, 1, AllocateParameters(type, allocation)); "AllocateRaw", 1, 1, 1, 1, 1, 1,
AllocateParameters(type, allocation, allow_large_objects));
} }
const Operator* SimplifiedOperatorBuilder::StringCodePointAt( const Operator* SimplifiedOperatorBuilder::StringCodePointAt(
......
...@@ -480,15 +480,21 @@ bool IsRestLengthOf(const Operator* op) V8_WARN_UNUSED_RESULT; ...@@ -480,15 +480,21 @@ bool IsRestLengthOf(const Operator* op) V8_WARN_UNUSED_RESULT;
class AllocateParameters { class AllocateParameters {
public: public:
AllocateParameters(Type type, AllocationType allocation_type) AllocateParameters(
: type_(type), allocation_type_(allocation_type) {} Type type, AllocationType allocation_type,
AllowLargeObjects allow_large_objects = AllowLargeObjects::kFalse)
: type_(type),
allocation_type_(allocation_type),
allow_large_objects_(allow_large_objects) {}
Type type() const { return type_; } Type type() const { return type_; }
AllocationType allocation_type() const { return allocation_type_; } AllocationType allocation_type() const { return allocation_type_; }
AllowLargeObjects allow_large_objects() const { return allow_large_objects_; }
private: private:
Type type_; Type type_;
AllocationType allocation_type_; AllocationType allocation_type_;
AllowLargeObjects allow_large_objects_;
}; };
bool IsCheckedWithFeedback(const Operator* op); bool IsCheckedWithFeedback(const Operator* op);
...@@ -499,6 +505,9 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, AllocateParameters); ...@@ -499,6 +505,9 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, AllocateParameters);
bool operator==(AllocateParameters const&, AllocateParameters const&); bool operator==(AllocateParameters const&, AllocateParameters const&);
const AllocateParameters& AllocateParametersOf(const Operator* op)
V8_WARN_UNUSED_RESULT;
AllocationType AllocationTypeOf(const Operator* op) V8_WARN_UNUSED_RESULT; AllocationType AllocationTypeOf(const Operator* op) V8_WARN_UNUSED_RESULT;
Type AllocateTypeOf(const Operator* op) V8_WARN_UNUSED_RESULT; Type AllocateTypeOf(const Operator* op) V8_WARN_UNUSED_RESULT;
...@@ -789,7 +798,8 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final ...@@ -789,7 +798,8 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* Allocate(Type type, const Operator* Allocate(Type type,
AllocationType allocation = AllocationType::kYoung); AllocationType allocation = AllocationType::kYoung);
const Operator* AllocateRaw( const Operator* AllocateRaw(
Type type, AllocationType allocation = AllocationType::kYoung); Type type, AllocationType allocation = AllocationType::kYoung,
AllowLargeObjects allow_large_objects = AllowLargeObjects::kFalse);
const Operator* LoadFieldByIndex(); const Operator* LoadFieldByIndex();
const Operator* LoadField(FieldAccess const&); const Operator* LoadField(FieldAccess const&);
......
...@@ -761,6 +761,8 @@ inline std::ostream& operator<<(std::ostream& os, WriteBarrierKind kind) { ...@@ -761,6 +761,8 @@ inline std::ostream& operator<<(std::ostream& os, WriteBarrierKind kind) {
UNREACHABLE(); UNREACHABLE();
} }
enum class AllowLargeObjects { kFalse, kTrue };
enum MinimumCapacity { enum MinimumCapacity {
USE_DEFAULT_MINIMUM_CAPACITY, USE_DEFAULT_MINIMUM_CAPACITY,
USE_CUSTOM_MINIMUM_CAPACITY USE_CUSTOM_MINIMUM_CAPACITY
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment