Commit 343898ed authored by Tobias Tebbi's avatar Tobias Tebbi Committed by Commit Bot

[csa][turbofan] allow large object allocation

Add suport for large object space allocations in Turbofan and use it
from CSA when young large objects are enabled. This maintains the
Turbofan invariant that the generation is statically predictable.

In principle, this enables write barrier elimination for large objects
allocated from Torque/CSA. But it doesn't seem to trigger much yet,
probably we have to improve the MemoryOptimizer.

Bug: v8:7793

Change-Id: I7ea7d0cb549573db65fafe5df5edf67e0ce90893
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1565905Reviewed-by: 's avatarJaroslav Sevcik <jarin@chromium.org>
Commit-Queue: Tobias Tebbi <tebbi@chromium.org>
Cr-Commit-Position: refs/heads/master@{#60984}
parent 1397697e
......@@ -1232,15 +1232,23 @@ TNode<HeapObject> CodeStubAssembler::Allocate(TNode<IntPtrT> size_in_bytes,
AllocationFlags flags) {
Comment("Allocate");
bool const new_space = !(flags & kPretenured);
if (!(flags & kAllowLargeObjectAllocation)) {
bool const allow_large_objects = flags & kAllowLargeObjectAllocation;
// For optimized allocations, we don't allow the allocation to happen in a
// different generation than requested.
bool const always_allocated_in_requested_space =
!new_space || !allow_large_objects || FLAG_young_generation_large_objects;
if (!allow_large_objects) {
intptr_t size_constant;
if (ToIntPtrConstant(size_in_bytes, size_constant)) {
CHECK_LE(size_constant, kMaxRegularHeapObjectSize);
}
}
if (!(flags & kDoubleAlignment) && !(flags & kAllowLargeObjectAllocation)) {
return OptimizedAllocate(size_in_bytes, new_space ? AllocationType::kYoung
: AllocationType::kOld);
if (!(flags & kDoubleAlignment) && always_allocated_in_requested_space) {
return OptimizedAllocate(
size_in_bytes,
new_space ? AllocationType::kYoung : AllocationType::kOld,
allow_large_objects ? AllowLargeObjects::kTrue
: AllowLargeObjects::kFalse);
}
TNode<ExternalReference> top_address = ExternalConstant(
new_space
......@@ -3889,8 +3897,8 @@ CodeStubAssembler::AllocateUninitializedJSArrayWithElements(
AllocateFixedArray(kind, capacity, capacity_mode, allocation_flags);
if (IsDoubleElementsKind(kind)) {
FillFixedDoubleArrayWithZero(CAST(elements.value()),
ParameterToIntPtr(capacity, capacity_mode));
FillFixedDoubleArrayWithZero(
CAST(elements.value()), ParameterToIntPtr(capacity, capacity_mode));
} else {
FillFixedArrayWithSmiZero(CAST(elements.value()),
ParameterToIntPtr(capacity, capacity_mode));
......
......@@ -1120,10 +1120,11 @@ void CodeAssembler::GotoIfException(Node* node, Label* if_exception,
raw_assembler()->AddNode(raw_assembler()->common()->IfSuccess(), node);
}
TNode<HeapObject> CodeAssembler::OptimizedAllocate(TNode<IntPtrT> size,
AllocationType allocation) {
return UncheckedCast<HeapObject>(
raw_assembler()->OptimizedAllocate(size, allocation));
TNode<HeapObject> CodeAssembler::OptimizedAllocate(
TNode<IntPtrT> size, AllocationType allocation,
AllowLargeObjects allow_large_objects) {
return UncheckedCast<HeapObject>(raw_assembler()->OptimizedAllocate(
size, allocation, allow_large_objects));
}
void CodeAssembler::HandleException(Node* node) {
......
......@@ -970,7 +970,8 @@ class V8_EXPORT_PRIVATE CodeAssembler {
// Optimized memory operations that map to Turbofan simplified nodes.
TNode<HeapObject> OptimizedAllocate(TNode<IntPtrT> size,
AllocationType allocation);
AllocationType allocation,
AllowLargeObjects allow_large_objects);
void OptimizedStoreField(MachineRepresentation rep, TNode<HeapObject> object,
int offset, Node* value,
WriteBarrierKind write_barrier);
......
......@@ -223,13 +223,14 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
gasm()->Reset(effect, control);
AllocationType allocation = AllocationTypeOf(node->op());
const AllocateParameters& allocation = AllocateParametersOf(node->op());
AllocationType allocation_type = allocation.allocation_type();
// Propagate tenuring from outer allocations to inner allocations, i.e.
// when we allocate an object in old space and store a newly allocated
// child object into the pretenured object, then the newly allocated
// child object also should get pretenured to old space.
if (allocation == AllocationType::kOld) {
if (allocation_type == AllocationType::kOld) {
for (Edge const edge : node->use_edges()) {
Node* const user = edge.from();
if (user->opcode() == IrOpcode::kStoreField && edge.index() == 0) {
......@@ -242,14 +243,14 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
}
}
} else {
DCHECK_EQ(AllocationType::kYoung, allocation);
DCHECK_EQ(AllocationType::kYoung, allocation_type);
for (Edge const edge : node->use_edges()) {
Node* const user = edge.from();
if (user->opcode() == IrOpcode::kStoreField && edge.index() == 1) {
Node* const parent = user->InputAt(0);
if (parent->opcode() == IrOpcode::kAllocateRaw &&
AllocationTypeOf(parent->op()) == AllocationType::kOld) {
allocation = AllocationType::kOld;
allocation_type = AllocationType::kOld;
break;
}
}
......@@ -258,11 +259,11 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
// Determine the top/limit addresses.
Node* top_address = __ ExternalConstant(
allocation == AllocationType::kYoung
allocation_type == AllocationType::kYoung
? ExternalReference::new_space_allocation_top_address(isolate())
: ExternalReference::old_space_allocation_top_address(isolate()));
Node* limit_address = __ ExternalConstant(
allocation == AllocationType::kYoung
allocation_type == AllocationType::kYoung
? ExternalReference::new_space_allocation_limit_address(isolate())
: ExternalReference::old_space_allocation_limit_address(isolate()));
......@@ -273,7 +274,7 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
intptr_t const object_size = m.Value();
if (allocation_folding_ == AllocationFolding::kDoAllocationFolding &&
state->size() <= kMaxRegularHeapObjectSize - object_size &&
state->group()->allocation() == allocation) {
state->group()->allocation() == allocation_type) {
// We can fold this Allocate {node} into the allocation {group}
// represented by the given {state}. Compute the upper bound for
// the new {state}.
......@@ -331,7 +332,7 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
__ Bind(&call_runtime);
{
Node* target = allocation == AllocationType::kYoung
Node* target = allocation_type == AllocationType::kYoung
? __
AllocateInYoungGenerationStubConstant()
: __
......@@ -363,7 +364,7 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
// Start a new allocation group.
AllocationGroup* group =
new (zone()) AllocationGroup(value, allocation, size, zone());
new (zone()) AllocationGroup(value, allocation_type, size, zone());
state = AllocationState::Open(group, object_size, top, zone());
}
} else {
......@@ -382,6 +383,11 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
// Check if we can do bump pointer allocation here.
Node* check = __ UintLessThan(new_top, limit);
__ GotoIfNot(check, &call_runtime);
if (allocation.allow_large_objects() == AllowLargeObjects::kTrue) {
__ GotoIfNot(
__ UintLessThan(size, __ IntPtrConstant(kMaxRegularHeapObjectSize)),
&call_runtime);
}
__ Store(StoreRepresentation(MachineType::PointerRepresentation(),
kNoWriteBarrier),
top_address, __ IntPtrConstant(0), new_top);
......@@ -389,7 +395,7 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
__ IntAdd(top, __ IntPtrConstant(kHeapObjectTag))));
__ Bind(&call_runtime);
Node* target = allocation == AllocationType::kYoung
Node* target = allocation_type == AllocationType::kYoung
? __
AllocateInYoungGenerationStubConstant()
: __
......@@ -408,7 +414,7 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
// Create an unfoldable allocation group.
AllocationGroup* group =
new (zone()) AllocationGroup(value, allocation, zone());
new (zone()) AllocationGroup(value, allocation_type, zone());
state = AllocationState::Closed(group, zone());
}
......
......@@ -69,9 +69,12 @@ Node* RawMachineAssembler::RelocatableIntPtrConstant(intptr_t value,
: RelocatableInt32Constant(static_cast<int>(value), rmode);
}
Node* RawMachineAssembler::OptimizedAllocate(Node* size,
AllocationType allocation) {
return AddNode(simplified()->AllocateRaw(Type::Any(), allocation), size);
Node* RawMachineAssembler::OptimizedAllocate(
Node* size, AllocationType allocation,
AllowLargeObjects allow_large_objects) {
return AddNode(
simplified()->AllocateRaw(Type::Any(), allocation, allow_large_objects),
size);
}
Schedule* RawMachineAssembler::Export() {
......
......@@ -227,7 +227,8 @@ class V8_EXPORT_PRIVATE RawMachineAssembler {
}
Node* Retain(Node* value) { return AddNode(common()->Retain(), value); }
Node* OptimizedAllocate(Node* size, AllocationType allocation);
Node* OptimizedAllocate(Node* size, AllocationType allocation,
AllowLargeObjects allow_large_objects);
// Unaligned memory operations
Node* UnalignedLoad(MachineType type, Node* base) {
......
......@@ -547,19 +547,23 @@ bool operator==(AllocateParameters const& lhs, AllocateParameters const& rhs) {
lhs.type() == rhs.type();
}
const AllocateParameters& AllocateParametersOf(const Operator* op) {
DCHECK(op->opcode() == IrOpcode::kAllocate ||
op->opcode() == IrOpcode::kAllocateRaw);
return OpParameter<AllocateParameters>(op);
}
AllocationType AllocationTypeOf(const Operator* op) {
if (op->opcode() == IrOpcode::kNewDoubleElements ||
op->opcode() == IrOpcode::kNewSmiOrObjectElements) {
return OpParameter<AllocationType>(op);
}
DCHECK(op->opcode() == IrOpcode::kAllocate ||
op->opcode() == IrOpcode::kAllocateRaw);
return OpParameter<AllocateParameters>(op).allocation_type();
return AllocateParametersOf(op).allocation_type();
}
Type AllocateTypeOf(const Operator* op) {
DCHECK_EQ(IrOpcode::kAllocate, op->opcode());
return OpParameter<AllocateParameters>(op).type();
return AllocateParametersOf(op).type();
}
UnicodeEncoding UnicodeEncodingOf(const Operator* op) {
......@@ -1651,11 +1655,18 @@ const Operator* SimplifiedOperatorBuilder::Allocate(Type type,
}
const Operator* SimplifiedOperatorBuilder::AllocateRaw(
Type type, AllocationType allocation) {
Type type, AllocationType allocation,
AllowLargeObjects allow_large_objects) {
// We forbid optimized allocations to allocate in a different generation than
// requested.
DCHECK(!(allow_large_objects == AllowLargeObjects::kTrue &&
allocation == AllocationType::kYoung &&
!FLAG_young_generation_large_objects));
return new (zone()) Operator1<AllocateParameters>(
IrOpcode::kAllocateRaw,
Operator::kNoDeopt | Operator::kNoThrow | Operator::kNoWrite,
"AllocateRaw", 1, 1, 1, 1, 1, 1, AllocateParameters(type, allocation));
"AllocateRaw", 1, 1, 1, 1, 1, 1,
AllocateParameters(type, allocation, allow_large_objects));
}
const Operator* SimplifiedOperatorBuilder::StringCodePointAt(
......
......@@ -480,15 +480,21 @@ bool IsRestLengthOf(const Operator* op) V8_WARN_UNUSED_RESULT;
class AllocateParameters {
public:
AllocateParameters(Type type, AllocationType allocation_type)
: type_(type), allocation_type_(allocation_type) {}
AllocateParameters(
Type type, AllocationType allocation_type,
AllowLargeObjects allow_large_objects = AllowLargeObjects::kFalse)
: type_(type),
allocation_type_(allocation_type),
allow_large_objects_(allow_large_objects) {}
Type type() const { return type_; }
AllocationType allocation_type() const { return allocation_type_; }
AllowLargeObjects allow_large_objects() const { return allow_large_objects_; }
private:
Type type_;
AllocationType allocation_type_;
AllowLargeObjects allow_large_objects_;
};
bool IsCheckedWithFeedback(const Operator* op);
......@@ -499,6 +505,9 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, AllocateParameters);
bool operator==(AllocateParameters const&, AllocateParameters const&);
const AllocateParameters& AllocateParametersOf(const Operator* op)
V8_WARN_UNUSED_RESULT;
AllocationType AllocationTypeOf(const Operator* op) V8_WARN_UNUSED_RESULT;
Type AllocateTypeOf(const Operator* op) V8_WARN_UNUSED_RESULT;
......@@ -789,7 +798,8 @@ class V8_EXPORT_PRIVATE SimplifiedOperatorBuilder final
const Operator* Allocate(Type type,
AllocationType allocation = AllocationType::kYoung);
const Operator* AllocateRaw(
Type type, AllocationType allocation = AllocationType::kYoung);
Type type, AllocationType allocation = AllocationType::kYoung,
AllowLargeObjects allow_large_objects = AllowLargeObjects::kFalse);
const Operator* LoadFieldByIndex();
const Operator* LoadField(FieldAccess const&);
......
......@@ -761,6 +761,8 @@ inline std::ostream& operator<<(std::ostream& os, WriteBarrierKind kind) {
UNREACHABLE();
}
enum class AllowLargeObjects { kFalse, kTrue };
enum MinimumCapacity {
USE_DEFAULT_MINIMUM_CAPACITY,
USE_CUSTOM_MINIMUM_CAPACITY
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment