Commit e1716bec authored by Wenyu Zhao's avatar Wenyu Zhao Committed by V8 LUCI CQ

[heap] Add a global allocation site tracking flag

This CL adds a v8_allocation_site_tracking flag to control the allocation and
tracking of memento objects.

Disables FLAG_allocation_site_pretenuring if v8_allocation_site_tracking
is disabled.

v8_enable_single_generation implies !v8_allocation_site by default.

Change-Id: Ib07528bd37d91de6bb6ea0bfea1699be4e17fae9
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2897326
Commit-Queue: Wenyu Zhao <wenyu.zhao@anu.edu.au>
Reviewed-by: 's avatarJakob Gruber <jgruber@chromium.org>
Reviewed-by: 's avatarDominik Inführ <dinfuehr@chromium.org>
Cr-Commit-Position: refs/heads/master@{#74879}
parent 0f9e351f
......@@ -322,6 +322,9 @@ declare_args() {
# Enable allocation folding globally (sets -dV8_ALLOCATION_FOLDING).
# When it's disabled, the --turbo-allocation-folding runtime flag will be ignored.
v8_enable_allocation_folding = true
# Enable global allocation site tracking.
v8_allocation_site_tracking = true
}
# Derived defaults.
......@@ -392,6 +395,9 @@ if (v8_enable_third_party_heap) {
v8_enable_pointer_compression = false
v8_enable_pointer_compression_shared_cage = false
}
if (v8_enable_single_generation) {
v8_allocation_site_tracking = false
}
assert(!v8_enable_concurrent_marking || v8_enable_atomic_object_field_writes,
"Concurrent marking requires atomic object field writes.")
assert(!v8_enable_concurrent_marking || v8_enable_atomic_marking_state,
......@@ -457,9 +463,10 @@ assert(
!v8_enable_pointer_compression_shared_cage || v8_enable_pointer_compression,
"Can't share a pointer compression cage if pointers aren't compressed")
assert(!v8_enable_pointer_compression_shared_cage || v8_current_cpu == "x64" ||
v8_current_cpu == "arm64" || v8_current_cpu == "riscv64",
"Sharing a pointer compression cage is only supported on x64,arm64 and riscv64")
assert(
!v8_enable_pointer_compression_shared_cage || v8_current_cpu == "x64" ||
v8_current_cpu == "arm64" || v8_current_cpu == "riscv64",
"Sharing a pointer compression cage is only supported on x64,arm64 and riscv64")
assert(!v8_enable_unconditional_write_barriers || !v8_disable_write_barriers,
"Write barriers can't be both enabled and disabled")
......@@ -875,6 +882,9 @@ config("features") {
if (v8_enable_allocation_folding) {
defines += [ "V8_ALLOCATION_FOLDING" ]
}
if (v8_allocation_site_tracking) {
defines += [ "V8_ALLOCATION_SITE_TRACKING" ]
}
}
config("toolchain") {
......
......@@ -474,7 +474,8 @@ TNode<JSArray> ConstructorBuiltinsAssembler::CreateShallowArrayLiteral(
TNode<AllocationSite> allocation_site = CAST(maybe_allocation_site);
TNode<JSArray> boilerplate = CAST(LoadBoilerplate(allocation_site));
if (allocation_site_mode == TRACK_ALLOCATION_SITE) {
if (allocation_site_mode == TRACK_ALLOCATION_SITE &&
V8_ALLOCATION_SITE_TRACKING_BOOL) {
return CloneFastJSArray(context, boilerplate, allocation_site);
} else {
return CloneFastJSArray(context, boilerplate);
......@@ -515,9 +516,12 @@ TNode<JSArray> ConstructorBuiltinsAssembler::CreateEmptyArrayLiteral(
TNode<IntPtrT> zero_intptr = IntPtrConstant(0);
TNode<Smi> zero = SmiConstant(0);
Comment("Allocate JSArray");
TNode<JSArray> result =
AllocateJSArray(GetInitialFastElementsKind(), array_map, zero_intptr,
zero, allocation_site.value());
base::Optional<TNode<AllocationSite>> site =
V8_ALLOCATION_SITE_TRACKING_BOOL
? base::make_optional(allocation_site.value())
: base::nullopt;
TNode<JSArray> result = AllocateJSArray(GetInitialFastElementsKind(),
array_map, zero_intptr, zero, site);
Goto(&done);
BIND(&done);
......@@ -602,6 +606,7 @@ TNode<HeapObject> ConstructorBuiltinsAssembler::CreateShallowObjectLiteral(
TNode<IntPtrT> allocation_size = instance_size;
bool needs_allocation_memento = FLAG_allocation_site_pretenuring;
if (needs_allocation_memento) {
DCHECK(V8_ALLOCATION_SITE_TRACKING_BOOL);
// Prepare for inner-allocating the AllocationMemento.
allocation_size =
IntPtrAdd(instance_size, IntPtrConstant(AllocationMemento::kSize));
......
......@@ -3917,6 +3917,7 @@ TNode<JSArray> CodeStubAssembler::AllocateJSArray(
int base_size = array_header_size;
if (allocation_site) {
DCHECK(V8_ALLOCATION_SITE_TRACKING_BOOL);
base_size += AllocationMemento::kSize;
}
......@@ -3969,6 +3970,7 @@ CodeStubAssembler::AllocateUninitializedJSArrayWithElements(
{
int base_size = array_header_size;
if (allocation_site) {
DCHECK(V8_ALLOCATION_SITE_TRACKING_BOOL);
base_size += AllocationMemento::kSize;
}
......@@ -4053,6 +4055,7 @@ TNode<JSArray> CodeStubAssembler::AllocateUninitializedJSArray(
RootIndex::kEmptyFixedArray);
if (allocation_site) {
DCHECK(V8_ALLOCATION_SITE_TRACKING_BOOL);
InitializeAllocationMemento(array, IntPtrConstant(JSArray::kHeaderSize),
*allocation_site);
}
......@@ -5347,6 +5350,7 @@ template TNode<FixedArrayBase> CodeStubAssembler::GrowElementsCapacity<IntPtrT>(
void CodeStubAssembler::InitializeAllocationMemento(
TNode<HeapObject> base, TNode<IntPtrT> base_allocation_size,
TNode<AllocationSite> allocation_site) {
DCHECK(V8_ALLOCATION_SITE_TRACKING_BOOL);
Comment("[Initialize AllocationMemento");
TNode<HeapObject> memento = InnerAllocate(base, base_allocation_size);
StoreMapNoWriteBarrier(memento, RootIndex::kAllocationMementoMap);
......@@ -11165,6 +11169,7 @@ void CodeStubAssembler::TransitionElementsKind(TNode<JSObject> object,
void CodeStubAssembler::TrapAllocationMemento(TNode<JSObject> object,
Label* memento_found) {
DCHECK(V8_ALLOCATION_SITE_TRACKING_BOOL);
Comment("[ TrapAllocationMemento");
Label no_memento_found(this);
Label top_check(this), map_check(this);
......
......@@ -518,6 +518,17 @@ DEFINE_BOOL(trace_code_dependencies, false, "trace code dependencies")
// Depend on --trace-deopt-verbose for reporting dependency invalidations.
DEFINE_IMPLICATION(trace_code_dependencies, trace_deopt_verbose)
#ifdef V8_ALLOCATION_SITE_TRACKING
#define V8_ALLOCATION_SITE_TRACKING_BOOL true
#else
#define V8_ALLOCATION_SITE_TRACKING_BOOL false
#endif
DEFINE_BOOL_READONLY(allocation_site_tracking, V8_ALLOCATION_SITE_TRACKING_BOOL,
"Enable allocation site tracking")
DEFINE_NEG_NEG_IMPLICATION(allocation_site_tracking,
allocation_site_pretenuring)
// Flags for experimental implementation features.
DEFINE_BOOL(allocation_site_pretenuring, true,
"pretenure with allocation sites")
......
......@@ -264,7 +264,10 @@ HeapObject Factory::AllocateRawWithAllocationSite(
Handle<AllocationSite> allocation_site) {
DCHECK(map->instance_type() != MAP_TYPE);
int size = map->instance_size();
if (!allocation_site.is_null()) size += AllocationMemento::kSize;
if (!allocation_site.is_null()) {
DCHECK(V8_ALLOCATION_SITE_TRACKING_BOOL);
size += AllocationMemento::kSize;
}
HeapObject result =
isolate()->heap()->AllocateRawWith<Heap::kRetryOrFail>(size, allocation);
WriteBarrierMode write_barrier_mode = allocation == AllocationType::kYoung
......@@ -281,6 +284,7 @@ HeapObject Factory::AllocateRawWithAllocationSite(
void Factory::InitializeAllocationMemento(AllocationMemento memento,
AllocationSite allocation_site) {
DCHECK(V8_ALLOCATION_SITE_TRACKING_BOOL);
memento.set_map_after_allocation(*allocation_memento_map(),
SKIP_WRITE_BARRIER);
memento.set_allocation_site(allocation_site, SKIP_WRITE_BARRIER);
......@@ -1663,8 +1667,11 @@ Handle<JSObject> Factory::CopyJSObjectWithAllocationSite(
DCHECK(site.is_null() || AllocationSite::CanTrack(instance_type));
int object_size = map->instance_size();
int adjusted_object_size =
site.is_null() ? object_size : object_size + AllocationMemento::kSize;
int adjusted_object_size = object_size;
if (!site.is_null()) {
DCHECK(V8_ALLOCATION_SITE_TRACKING_BOOL);
adjusted_object_size += AllocationMemento::kSize;
}
HeapObject raw_clone = isolate()->heap()->AllocateRawWith<Heap::kRetryOrFail>(
adjusted_object_size, AllocationType::kYoung);
......
......@@ -122,10 +122,12 @@ bool AllocationSite::PointsToLiteral() const {
// Heuristic: We only need to create allocation site info if the boilerplate
// elements kind is the initial elements kind.
bool AllocationSite::ShouldTrack(ElementsKind boilerplate_elements_kind) {
if (!V8_ALLOCATION_SITE_TRACKING_BOOL) return false;
return IsSmiElementsKind(boilerplate_elements_kind);
}
inline bool AllocationSite::CanTrack(InstanceType type) {
if (!V8_ALLOCATION_SITE_TRACKING_BOOL) return false;
if (FLAG_allocation_site_pretenuring) {
// TurboFan doesn't care at all about String pretenuring feedback,
// so don't bother even trying to track that.
......
......@@ -5222,6 +5222,7 @@ bool AllocationSite::IsNested() {
}
bool AllocationSite::ShouldTrack(ElementsKind from, ElementsKind to) {
if (!V8_ALLOCATION_SITE_TRACKING_BOOL) return false;
return IsMoreGeneralElementsKindTransition(from, to);
}
......
......@@ -3949,7 +3949,7 @@ static int SlimAllocationSiteCount(Heap* heap) {
}
TEST(EnsureAllocationSiteDependentCodesProcessed) {
if (FLAG_always_opt || !FLAG_opt) return;
if (FLAG_always_opt || !FLAG_opt || !V8_ALLOCATION_SITE_TRACKING_BOOL) return;
FLAG_allow_natives_syntax = true;
CcTest::InitializeVM();
Isolate* isolate = CcTest::i_isolate();
......
......@@ -1515,6 +1515,7 @@
'const-dict-tracking': [SKIP],
'compiler/native-context-specialization-hole-check': [SKIP],
'compiler/test-literal-map-migration': [SKIP],
'compiler/deopt-pretenure': [SKIP],
}], # single_generation
################################################################################
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment