Commit 7d3c9bce authored by Tobias Tebbi's avatar Tobias Tebbi Committed by Commit Bot

[csa][turbofan] Prevent large object allocations unless allowed

The slow-path emitted by the memory optimizer now checks if large
object allocations were allowed before going ahead and allocating
a large object. This is important because manual allocation folding
in CSA must not be performed on a large object.

Bug: v8:9388
Change-Id: I74b840c9c9276bd17611842e0eae7b0e58b142d2
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/1675960
Commit-Queue: Tobias Tebbi <tebbi@chromium.org>
Reviewed-by: 's avatarUlan Degenbaev <ulan@chromium.org>
Reviewed-by: 's avatarJakob Gruber <jgruber@chromium.org>
Cr-Commit-Position: refs/heads/master@{#62605}
parent 54a36366
......@@ -170,7 +170,9 @@ namespace internal {
\
/* Adapters for Turbofan into runtime */ \
TFC(AllocateInYoungGeneration, Allocate) \
TFC(AllocateRegularInYoungGeneration, Allocate) \
TFC(AllocateInOldGeneration, Allocate) \
TFC(AllocateRegularInOldGeneration, Allocate) \
\
/* TurboFan support builtins */ \
TFS(CopyFastSmiOrObjectElements, kObject) \
......
......@@ -810,17 +810,49 @@ TF_BUILTIN(AdaptorWithBuiltinExitFrame, CodeStubAssembler) {
TF_BUILTIN(AllocateInYoungGeneration, CodeStubAssembler) {
TNode<IntPtrT> requested_size =
UncheckedCast<IntPtrT>(Parameter(Descriptor::kRequestedSize));
CSA_CHECK(this, IsValidPositiveSmi(requested_size));
TNode<Smi> allocation_flags =
SmiConstant(Smi::FromInt(AllocateDoubleAlignFlag::encode(false) |
AllowLargeObjectAllocationFlag::encode(true)));
TailCallRuntime(Runtime::kAllocateInYoungGeneration, NoContextConstant(),
SmiFromIntPtr(requested_size));
SmiFromIntPtr(requested_size), allocation_flags);
}
TF_BUILTIN(AllocateRegularInYoungGeneration, CodeStubAssembler) {
TNode<IntPtrT> requested_size =
UncheckedCast<IntPtrT>(Parameter(Descriptor::kRequestedSize));
CSA_CHECK(this, IsValidPositiveSmi(requested_size));
TNode<Smi> allocation_flags =
SmiConstant(Smi::FromInt(AllocateDoubleAlignFlag::encode(false) |
AllowLargeObjectAllocationFlag::encode(false)));
TailCallRuntime(Runtime::kAllocateInYoungGeneration, NoContextConstant(),
SmiFromIntPtr(requested_size), allocation_flags);
}
TF_BUILTIN(AllocateInOldGeneration, CodeStubAssembler) {
TNode<IntPtrT> requested_size =
UncheckedCast<IntPtrT>(Parameter(Descriptor::kRequestedSize));
CSA_CHECK(this, IsValidPositiveSmi(requested_size));
TNode<Smi> runtime_flags =
SmiConstant(Smi::FromInt(AllocateDoubleAlignFlag::encode(false) |
AllowLargeObjectAllocationFlag::encode(true)));
TailCallRuntime(Runtime::kAllocateInOldGeneration, NoContextConstant(),
SmiFromIntPtr(requested_size), runtime_flags);
}
TF_BUILTIN(AllocateRegularInOldGeneration, CodeStubAssembler) {
TNode<IntPtrT> requested_size =
UncheckedCast<IntPtrT>(Parameter(Descriptor::kRequestedSize));
CSA_CHECK(this, IsValidPositiveSmi(requested_size));
TNode<Smi> runtime_flags =
SmiConstant(Smi::FromInt(AllocateDoubleAlignFlag::encode(false) |
AllowLargeObjectAllocationFlag::encode(false)));
TailCallRuntime(Runtime::kAllocateInOldGeneration, NoContextConstant(),
SmiFromIntPtr(requested_size), SmiConstant(0));
SmiFromIntPtr(requested_size), runtime_flags);
}
TF_BUILTIN(Abort, CodeStubAssembler) {
......
......@@ -1127,20 +1127,23 @@ TNode<HeapObject> CodeStubAssembler::AllocateRaw(TNode<IntPtrT> size_in_bytes,
Label runtime_call(this, Label::kDeferred), no_runtime_call(this), out(this);
bool needs_double_alignment = flags & kDoubleAlignment;
bool allow_large_object_allocation = flags & kAllowLargeObjectAllocation;
if (flags & kAllowLargeObjectAllocation) {
if (allow_large_object_allocation) {
Label next(this);
GotoIf(IsRegularHeapObjectSize(size_in_bytes), &next);
TNode<Smi> runtime_flags = SmiConstant(Smi::FromInt(
AllocateDoubleAlignFlag::encode(needs_double_alignment) |
AllowLargeObjectAllocationFlag::encode(allow_large_object_allocation)));
if (FLAG_young_generation_large_objects) {
result = CallRuntime(Runtime::kAllocateInYoungGeneration,
NoContextConstant(), SmiTag(size_in_bytes));
result =
CallRuntime(Runtime::kAllocateInYoungGeneration, NoContextConstant(),
SmiTag(size_in_bytes), runtime_flags);
} else {
TNode<Smi> alignment_flag = SmiConstant(Smi::FromInt(
AllocateDoubleAlignFlag::encode(needs_double_alignment)));
result =
CallRuntime(Runtime::kAllocateInOldGeneration, NoContextConstant(),
SmiTag(size_in_bytes), alignment_flag);
SmiTag(size_in_bytes), runtime_flags);
}
Goto(&out);
......@@ -1167,15 +1170,17 @@ TNode<HeapObject> CodeStubAssembler::AllocateRaw(TNode<IntPtrT> size_in_bytes,
BIND(&runtime_call);
{
TNode<Smi> runtime_flags = SmiConstant(Smi::FromInt(
AllocateDoubleAlignFlag::encode(needs_double_alignment) |
AllowLargeObjectAllocationFlag::encode(allow_large_object_allocation)));
if (flags & kPretenured) {
TNode<Smi> runtime_flags = SmiConstant(Smi::FromInt(
AllocateDoubleAlignFlag::encode(needs_double_alignment)));
result =
CallRuntime(Runtime::kAllocateInOldGeneration, NoContextConstant(),
SmiTag(size_in_bytes), runtime_flags);
} else {
result = CallRuntime(Runtime::kAllocateInYoungGeneration,
NoContextConstant(), SmiTag(size_in_bytes));
result =
CallRuntime(Runtime::kAllocateInYoungGeneration, NoContextConstant(),
SmiTag(size_in_bytes), runtime_flags);
}
Goto(&out);
}
......
......@@ -95,22 +95,24 @@ namespace compiler {
V(Uint32Mod) \
V(Uint32Div)
#define JSGRAPH_SINGLETON_CONSTANT_LIST(V) \
V(TrueConstant) \
V(FalseConstant) \
V(NullConstant) \
V(BigIntMapConstant) \
V(BooleanMapConstant) \
V(HeapNumberMapConstant) \
V(NoContextConstant) \
V(EmptyStringConstant) \
V(UndefinedConstant) \
V(TheHoleConstant) \
V(FixedArrayMapConstant) \
V(FixedDoubleArrayMapConstant) \
V(ToNumberBuiltinConstant) \
V(AllocateInYoungGenerationStubConstant) \
V(AllocateInOldGenerationStubConstant)
#define JSGRAPH_SINGLETON_CONSTANT_LIST(V) \
V(TrueConstant) \
V(FalseConstant) \
V(NullConstant) \
V(BigIntMapConstant) \
V(BooleanMapConstant) \
V(HeapNumberMapConstant) \
V(NoContextConstant) \
V(EmptyStringConstant) \
V(UndefinedConstant) \
V(TheHoleConstant) \
V(FixedArrayMapConstant) \
V(FixedDoubleArrayMapConstant) \
V(ToNumberBuiltinConstant) \
V(AllocateInYoungGenerationStubConstant) \
V(AllocateRegularInYoungGenerationStubConstant) \
V(AllocateInOldGenerationStubConstant) \
V(AllocateRegularInOldGenerationStubConstant)
class GraphAssembler;
......
......@@ -128,9 +128,17 @@ void JSGraph::GetCachedNodes(NodeVector* nodes) {
DEFINE_GETTER(AllocateInYoungGenerationStubConstant,
HeapConstant(BUILTIN_CODE(isolate(), AllocateInYoungGeneration)))
DEFINE_GETTER(AllocateRegularInYoungGenerationStubConstant,
HeapConstant(BUILTIN_CODE(isolate(),
AllocateRegularInYoungGeneration)))
DEFINE_GETTER(AllocateInOldGenerationStubConstant,
HeapConstant(BUILTIN_CODE(isolate(), AllocateInOldGeneration)))
DEFINE_GETTER(AllocateRegularInOldGenerationStubConstant,
HeapConstant(BUILTIN_CODE(isolate(),
AllocateRegularInOldGeneration)))
DEFINE_GETTER(ArrayConstructorStubConstant,
HeapConstant(BUILTIN_CODE(isolate(), ArrayConstructorImpl)))
......
......@@ -80,31 +80,33 @@ class V8_EXPORT_PRIVATE JSGraph : public MachineGraph {
void GetCachedNodes(NodeVector* nodes);
// Cached global nodes.
#define CACHED_GLOBAL_LIST(V) \
V(AllocateInYoungGenerationStubConstant) \
V(AllocateInOldGenerationStubConstant) \
V(ArrayConstructorStubConstant) \
V(BigIntMapConstant) \
V(BooleanMapConstant) \
V(ToNumberBuiltinConstant) \
V(EmptyFixedArrayConstant) \
V(EmptyStringConstant) \
V(FixedArrayMapConstant) \
V(PropertyArrayMapConstant) \
V(FixedDoubleArrayMapConstant) \
V(HeapNumberMapConstant) \
V(OptimizedOutConstant) \
V(StaleRegisterConstant) \
V(UndefinedConstant) \
V(TheHoleConstant) \
V(TrueConstant) \
V(FalseConstant) \
V(NullConstant) \
V(ZeroConstant) \
V(OneConstant) \
V(NaNConstant) \
V(MinusOneConstant) \
V(EmptyStateValues) \
#define CACHED_GLOBAL_LIST(V) \
V(AllocateInYoungGenerationStubConstant) \
V(AllocateRegularInYoungGenerationStubConstant) \
V(AllocateInOldGenerationStubConstant) \
V(AllocateRegularInOldGenerationStubConstant) \
V(ArrayConstructorStubConstant) \
V(BigIntMapConstant) \
V(BooleanMapConstant) \
V(ToNumberBuiltinConstant) \
V(EmptyFixedArrayConstant) \
V(EmptyStringConstant) \
V(FixedArrayMapConstant) \
V(PropertyArrayMapConstant) \
V(FixedDoubleArrayMapConstant) \
V(HeapNumberMapConstant) \
V(OptimizedOutConstant) \
V(StaleRegisterConstant) \
V(UndefinedConstant) \
V(TheHoleConstant) \
V(TrueConstant) \
V(FalseConstant) \
V(NullConstant) \
V(ZeroConstant) \
V(OneConstant) \
V(NaNConstant) \
V(MinusOneConstant) \
V(EmptyStateValues) \
V(SingleDeadTypedStateValues)
// Cached global node accessor methods.
......
......@@ -2093,7 +2093,9 @@ void JSHeapBroker::SerializeShareableObjects() {
{
Builtins::Name builtins[] = {
Builtins::kAllocateInYoungGeneration,
Builtins::kAllocateRegularInYoungGeneration,
Builtins::kAllocateInOldGeneration,
Builtins::kAllocateRegularInOldGeneration,
Builtins::kArgumentsAdaptorTrampoline,
Builtins::kArrayConstructorImpl,
Builtins::kCallFunctionForwardVarargs,
......
......@@ -298,6 +298,21 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
}
}
Node* allocate_builtin;
if (allocation_type == AllocationType::kYoung) {
if (allocation.allow_large_objects() == AllowLargeObjects::kTrue) {
allocate_builtin = __ AllocateInYoungGenerationStubConstant();
} else {
allocate_builtin = __ AllocateRegularInYoungGenerationStubConstant();
}
} else {
if (allocation.allow_large_objects() == AllowLargeObjects::kTrue) {
allocate_builtin = __ AllocateInOldGenerationStubConstant();
} else {
allocate_builtin = __ AllocateRegularInOldGenerationStubConstant();
}
}
// Determine the top/limit addresses.
Node* top_address = __ ExternalConstant(
allocation_type == AllocationType::kYoung
......@@ -373,11 +388,6 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
__ Bind(&call_runtime);
{
Node* target = allocation_type == AllocationType::kYoung
? __
AllocateInYoungGenerationStubConstant()
: __
AllocateInOldGenerationStubConstant();
if (!allocate_operator_.is_set()) {
auto descriptor = AllocateDescriptor{};
auto call_descriptor = Linkage::GetStubCallDescriptor(
......@@ -386,7 +396,7 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
allocate_operator_.set(common()->Call(call_descriptor));
}
Node* vfalse = __ BitcastTaggedToWord(
__ Call(allocate_operator_.get(), target, size));
__ Call(allocate_operator_.get(), allocate_builtin, size));
vfalse = __ IntSub(vfalse, __ IntPtrConstant(kHeapObjectTag));
__ Goto(&done, vfalse);
}
......@@ -436,11 +446,6 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
__ IntAdd(top, __ IntPtrConstant(kHeapObjectTag))));
__ Bind(&call_runtime);
Node* target = allocation_type == AllocationType::kYoung
? __
AllocateInYoungGenerationStubConstant()
: __
AllocateInOldGenerationStubConstant();
if (!allocate_operator_.is_set()) {
auto descriptor = AllocateDescriptor{};
auto call_descriptor = Linkage::GetStubCallDescriptor(
......@@ -448,7 +453,7 @@ void MemoryOptimizer::VisitAllocateRaw(Node* node,
CallDescriptor::kCanUseRoots, Operator::kNoThrow);
allocate_operator_.set(common()->Call(call_descriptor));
}
__ Goto(&done, __ Call(allocate_operator_.get(), target, size));
__ Goto(&done, __ Call(allocate_operator_.get(), allocate_builtin, size));
__ Bind(&done);
value = done.PhiAt(0);
......
......@@ -772,6 +772,8 @@ DebugInfo::SideEffectState BuiltinGetSideEffectState(Builtins::Name id) {
case Builtins::kStrictPoisonPillThrower:
case Builtins::kAllocateInYoungGeneration:
case Builtins::kAllocateInOldGeneration:
case Builtins::kAllocateRegularInYoungGeneration:
case Builtins::kAllocateRegularInOldGeneration:
return DebugInfo::kHasNoSideEffect;
// Set builtins.
......
......@@ -287,13 +287,20 @@ RUNTIME_FUNCTION(Runtime_BytecodeBudgetInterrupt) {
RUNTIME_FUNCTION(Runtime_AllocateInYoungGeneration) {
HandleScope scope(isolate);
DCHECK_EQ(1, args.length());
DCHECK_EQ(2, args.length());
CONVERT_SMI_ARG_CHECKED(size, 0);
CONVERT_SMI_ARG_CHECKED(flags, 1);
bool double_align = AllocateDoubleAlignFlag::decode(flags);
bool allow_large_object_allocation =
AllowLargeObjectAllocationFlag::decode(flags);
CHECK(IsAligned(size, kTaggedSize));
CHECK_GT(size, 0);
CHECK(FLAG_young_generation_large_objects ||
size <= kMaxRegularHeapObjectSize);
return *isolate->factory()->NewFillerObject(size, false,
if (!allow_large_object_allocation) {
CHECK(size <= kMaxRegularHeapObjectSize);
}
return *isolate->factory()->NewFillerObject(size, double_align,
AllocationType::kYoung);
}
......@@ -302,9 +309,14 @@ RUNTIME_FUNCTION(Runtime_AllocateInOldGeneration) {
DCHECK_EQ(2, args.length());
CONVERT_SMI_ARG_CHECKED(size, 0);
CONVERT_SMI_ARG_CHECKED(flags, 1);
bool double_align = AllocateDoubleAlignFlag::decode(flags);
bool allow_large_object_allocation =
AllowLargeObjectAllocationFlag::decode(flags);
CHECK(IsAligned(size, kTaggedSize));
CHECK_GT(size, 0);
bool double_align = AllocateDoubleAlignFlag::decode(flags);
if (!allow_large_object_allocation) {
CHECK(size <= kMaxRegularHeapObjectSize);
}
return *isolate->factory()->NewFillerObject(size, double_align,
AllocationType::kOld);
}
......
......@@ -203,7 +203,7 @@ namespace internal {
#define FOR_EACH_INTRINSIC_INTERNAL(F, I) \
F(AccessCheck, 1, 1) \
F(AllocateByteArray, 1, 1) \
F(AllocateInYoungGeneration, 1, 1) \
F(AllocateInYoungGeneration, 2, 1) \
F(AllocateInOldGeneration, 2, 1) \
F(AllocateSeqOneByteString, 1, 1) \
F(AllocateSeqTwoByteString, 1, 1) \
......@@ -769,6 +769,8 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream&, Runtime::FunctionId);
class AllocateDoubleAlignFlag : public BitField<bool, 0, 1> {};
class AllowLargeObjectAllocationFlag : public BitField<bool, 1, 1> {};
class DeclareGlobalsEvalFlag : public BitField<bool, 0, 1> {};
// A set of bits returned by Runtime_GetOptimizationStatus.
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment