Commit cc533716 authored by Clemens Backes's avatar Clemens Backes Committed by Commit Bot

Revert "[test] Rework Allocation Site Pretenruing Tests"

This reverts commit df52b65d.

Reason for revert: fails on TSan: https://ci.chromium.org/ui/p/v8/builders/ci/V8%20Linux64%20TSAN/36351/overview

Original change's description:
> [test] Rework Allocation Site Pretenruing Tests
>
> - Add %PretenureAllocationSite to manually force pretenuring for an
> allocation site during the next GC.
> - Replace cctest test-compiler/DecideToPretenureDuringCompilation, which
> was not triggering the tested behaviour anymore with mjsunit test
> - Add tests for deoptimizations due to pretenuring decision changes
> during OSR.
>
> Bug: chromium:1193094
> Change-Id: I5d6c35e2914b705bf96f27051a4a286413b6fe26
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2825593
> Commit-Queue: Patrick Thier <pthier@chromium.org>
> Reviewed-by: Maya Lekova <mslekova@chromium.org>
> Reviewed-by: Dominik Inführ <dinfuehr@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#74032}

Bug: chromium:1193094
Change-Id: I0cd526984d467c4e1e3637ac642f630e3cffea41
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2835715
Auto-Submit: Clemens Backes <clemensb@chromium.org>
Commit-Queue: Rubber Stamper <rubber-stamper@appspot.gserviceaccount.com>
Bot-Commit: Rubber Stamper <rubber-stamper@appspot.gserviceaccount.com>
Cr-Commit-Position: refs/heads/master@{#74039}
parent 7c7cdec5
...@@ -980,12 +980,6 @@ inline bool MakePretenureDecision( ...@@ -980,12 +980,6 @@ inline bool MakePretenureDecision(
return false; return false;
} }
// Clear feedback calculation fields until the next gc.
inline void ResetPretenuringFeedback(AllocationSite site) {
site.set_memento_found_count(0);
site.set_memento_create_count(0);
}
inline bool DigestPretenuringFeedback(Isolate* isolate, AllocationSite site, inline bool DigestPretenuringFeedback(Isolate* isolate, AllocationSite site,
bool maximum_size_scavenge) { bool maximum_size_scavenge) {
bool deopt = false; bool deopt = false;
...@@ -1013,34 +1007,11 @@ inline bool DigestPretenuringFeedback(Isolate* isolate, AllocationSite site, ...@@ -1013,34 +1007,11 @@ inline bool DigestPretenuringFeedback(Isolate* isolate, AllocationSite site,
site.PretenureDecisionName(site.pretenure_decision())); site.PretenureDecisionName(site.pretenure_decision()));
} }
ResetPretenuringFeedback(site); // Clear feedback calculation fields until the next gc.
return deopt; site.set_memento_found_count(0);
} site.set_memento_create_count(0);
bool PretenureAllocationSiteManually(Isolate* isolate, AllocationSite site) {
AllocationSite::PretenureDecision current_decision =
site.pretenure_decision();
bool deopt = true;
if (current_decision == AllocationSite::kUndecided ||
current_decision == AllocationSite::kMaybeTenure) {
site.set_deopt_dependent_code(true);
site.set_pretenure_decision(AllocationSite::kTenure);
} else {
deopt = false;
}
if (FLAG_trace_pretenuring_statistics) {
PrintIsolate(isolate,
"pretenuring manually requested: AllocationSite(%p): "
"%s => %s\n",
reinterpret_cast<void*>(site.ptr()),
site.PretenureDecisionName(current_decision),
site.PretenureDecisionName(site.pretenure_decision()));
}
ResetPretenuringFeedback(site);
return deopt; return deopt;
} }
} // namespace } // namespace
void Heap::RemoveAllocationSitePretenuringFeedback(AllocationSite site) { void Heap::RemoveAllocationSitePretenuringFeedback(AllocationSite site) {
...@@ -1088,18 +1059,7 @@ void Heap::ProcessPretenuringFeedback() { ...@@ -1088,18 +1059,7 @@ void Heap::ProcessPretenuringFeedback() {
} }
} }
// Step 2: Pretenure allocation sites for manual requests. // Step 2: Deopt maybe tenured allocation sites if necessary.
if (allocation_sites_to_pretenure_) {
while (!allocation_sites_to_pretenure_->empty()) {
auto site = allocation_sites_to_pretenure_->Pop();
if (PretenureAllocationSiteManually(isolate_, site)) {
trigger_deoptimization = true;
}
}
allocation_sites_to_pretenure_.reset();
}
// Step 3: Deopt maybe tenured allocation sites if necessary.
bool deopt_maybe_tenured = DeoptMaybeTenuredAllocationSites(); bool deopt_maybe_tenured = DeoptMaybeTenuredAllocationSites();
if (deopt_maybe_tenured) { if (deopt_maybe_tenured) {
ForeachAllocationSite( ForeachAllocationSite(
...@@ -1135,14 +1095,6 @@ void Heap::ProcessPretenuringFeedback() { ...@@ -1135,14 +1095,6 @@ void Heap::ProcessPretenuringFeedback() {
} }
} }
void Heap::PretenureAllocationSiteOnNextCollection(AllocationSite site) {
if (!allocation_sites_to_pretenure_) {
allocation_sites_to_pretenure_.reset(
new GlobalHandleVector<AllocationSite>(this));
}
allocation_sites_to_pretenure_->Push(site);
}
void Heap::InvalidateCodeDeoptimizationData(Code code) { void Heap::InvalidateCodeDeoptimizationData(Code code) {
CodePageMemoryModificationScope modification_scope(code); CodePageMemoryModificationScope modification_scope(code);
code.set_deoptimization_data(ReadOnlyRoots(this).empty_fixed_array()); code.set_deoptimization_data(ReadOnlyRoots(this).empty_fixed_array());
...@@ -5628,8 +5580,6 @@ void Heap::TearDown() { ...@@ -5628,8 +5580,6 @@ void Heap::TearDown() {
tracer_.reset(); tracer_.reset();
allocation_sites_to_pretenure_.reset();
for (int i = FIRST_MUTABLE_SPACE; i <= LAST_MUTABLE_SPACE; i++) { for (int i = FIRST_MUTABLE_SPACE; i <= LAST_MUTABLE_SPACE; i++) {
delete space_[i]; delete space_[i];
space_[i] = nullptr; space_[i] = nullptr;
......
...@@ -72,8 +72,6 @@ class CppHeap; ...@@ -72,8 +72,6 @@ class CppHeap;
class GCIdleTimeHandler; class GCIdleTimeHandler;
class GCIdleTimeHeapState; class GCIdleTimeHeapState;
class GCTracer; class GCTracer;
template <typename T>
class GlobalHandleVector;
class GlobalSafepoint; class GlobalSafepoint;
class HeapObjectAllocationTracker; class HeapObjectAllocationTracker;
class HeapObjectsFilter; class HeapObjectsFilter;
...@@ -1455,12 +1453,6 @@ class Heap { ...@@ -1455,12 +1453,6 @@ class Heap {
void MergeAllocationSitePretenuringFeedback( void MergeAllocationSitePretenuringFeedback(
const PretenuringFeedbackMap& local_pretenuring_feedback); const PretenuringFeedbackMap& local_pretenuring_feedback);
// Adds an allocation site to the list of sites to be pretenured during the
// next collection. Added allocation sites are pretenured independent of
// their feedback.
V8_EXPORT_PRIVATE void PretenureAllocationSiteOnNextCollection(
AllocationSite site);
// =========================================================================== // ===========================================================================
// Allocation tracking. ====================================================== // Allocation tracking. ======================================================
// =========================================================================== // ===========================================================================
...@@ -2288,9 +2280,6 @@ class Heap { ...@@ -2288,9 +2280,6 @@ class Heap {
// forwarding pointers. // forwarding pointers.
PretenuringFeedbackMap global_pretenuring_feedback_; PretenuringFeedbackMap global_pretenuring_feedback_;
std::unique_ptr<GlobalHandleVector<AllocationSite>>
allocation_sites_to_pretenure_;
char trace_ring_buffer_[kTraceRingBufferSize]; char trace_ring_buffer_[kTraceRingBufferSize];
// Used as boolean. // Used as boolean.
......
...@@ -1027,21 +1027,6 @@ RUNTIME_FUNCTION(Runtime_InYoungGeneration) { ...@@ -1027,21 +1027,6 @@ RUNTIME_FUNCTION(Runtime_InYoungGeneration) {
return isolate->heap()->ToBoolean(ObjectInYoungGeneration(obj)); return isolate->heap()->ToBoolean(ObjectInYoungGeneration(obj));
} }
// Force pretenuring for the allocation site the passed object belongs to.
RUNTIME_FUNCTION(Runtime_PretenureAllocationSite) {
DisallowGarbageCollection no_gc;
DCHECK_EQ(1, args.length());
CONVERT_ARG_CHECKED(JSObject, object, 0);
Heap* heap = object.GetHeap();
AllocationMemento memento =
heap->FindAllocationMemento<Heap::kForRuntime>(object.map(), object);
if (memento.is_null()) return ReadOnlyRoots(isolate).false_value();
AllocationSite site = memento.GetAllocationSite();
heap->PretenureAllocationSiteOnNextCollection(site);
return ReadOnlyRoots(isolate).true_value();
}
namespace { namespace {
v8::ModifyCodeGenerationFromStringsResult DisallowCodegenFromStringsCallback( v8::ModifyCodeGenerationFromStringsResult DisallowCodegenFromStringsCallback(
......
...@@ -532,7 +532,6 @@ namespace internal { ...@@ -532,7 +532,6 @@ namespace internal {
F(OptimizeOsr, -1, 1) \ F(OptimizeOsr, -1, 1) \
F(NewRegExpWithBacktrackLimit, 3, 1) \ F(NewRegExpWithBacktrackLimit, 3, 1) \
F(PrepareFunctionForOptimization, -1, 1) \ F(PrepareFunctionForOptimization, -1, 1) \
F(PretenureAllocationSite, 1, 1) \
F(PrintWithNameForAssert, 2, 1) \ F(PrintWithNameForAssert, 2, 1) \
F(RunningInSimulator, 0, 1) \ F(RunningInSimulator, 0, 1) \
F(RuntimeEvaluateREPL, 1, 1) \ F(RuntimeEvaluateREPL, 1, 1) \
......
...@@ -929,6 +929,119 @@ TEST(DeepEagerCompilationPeakMemory) { ...@@ -929,6 +929,119 @@ TEST(DeepEagerCompilationPeakMemory) {
CHECK_LE(peak_mem_4 - peak_mem_3, peak_mem_3); CHECK_LE(peak_mem_4 - peak_mem_3, peak_mem_3);
} }
// TODO(mslekova): Remove the duplication with test-heap.cc
static int AllocationSitesCount(Heap* heap) {
int count = 0;
for (Object site = heap->allocation_sites_list(); site.IsAllocationSite();) {
AllocationSite cur = AllocationSite::cast(site);
CHECK(cur.HasWeakNext());
site = cur.weak_next();
count++;
}
return count;
}
// This test simulates a specific race-condition if GC is triggered just
// before CompilationDependencies::Commit is finished, and this changes
// the pretenuring decision, thus causing a deoptimization.
TEST(DecideToPretenureDuringCompilation) {
// The test makes use of optimization and relies on deterministic
// compilation.
if (!i::FLAG_opt || i::FLAG_always_opt || i::FLAG_always_sparkplug ||
i::FLAG_minor_mc || i::FLAG_stress_incremental_marking ||
i::FLAG_optimize_for_size || i::FLAG_stress_concurrent_allocation) {
return;
}
FLAG_stress_gc_during_compilation = true;
FLAG_allow_natives_syntax = true;
FLAG_allocation_site_pretenuring = true;
FLAG_flush_bytecode = false;
// Turn on lazy feedback allocation, so we create exactly one allocation site.
// Without lazy feedback allocation, we create two allocation sites.
FLAG_lazy_feedback_allocation = true;
// We want to trigger exactly 1 optimization.
FLAG_use_osr = false;
// We'll do manual initialization.
ManualGCScope manual_gc_scope;
v8::Isolate::CreateParams create_params;
// This setting ensures Heap::MaximumSizeScavenge will return `true`.
// We need to initialize the heap with at least 1 page, while keeping the
// limit low, to ensure the new space fills even on 32-bit architectures.
create_params.constraints.set_max_young_generation_size_in_bytes(
3 * Page::kPageSize);
create_params.array_buffer_allocator = CcTest::array_buffer_allocator();
v8::Isolate* isolate = v8::Isolate::New(create_params);
isolate->Enter();
{
i::Isolate* i_isolate = reinterpret_cast<i::Isolate*>(isolate);
Heap* heap = i_isolate->heap();
GlobalHandles* global_handles = i_isolate->global_handles();
HandleScope handle_scope(i_isolate);
// The allocation site at the head of the list is ours.
Handle<AllocationSite> site;
{
LocalContext context(isolate);
v8::HandleScope scope(context->GetIsolate());
int count = AllocationSitesCount(heap);
CompileRun(
"let arr = [];"
"function foo(shouldKeep) {"
" let local_array = new Array();"
" if (shouldKeep) arr.push(local_array);"
"}"
"function bar(shouldKeep) {"
" for (let i = 0; i < 10000; i++) {"
" foo(shouldKeep);"
" }"
"}"
"%PrepareFunctionForOptimization(bar);"
"bar();");
// This number should be >= kPretenureRatio * 10000,
// where 10000 is the number of iterations in `bar`,
// in order to make the ratio in DigestPretenuringFeedback close to 1.
const int memento_found_bump = 8500;
// One allocation site should have been created.
int new_count = AllocationSitesCount(heap);
CHECK_EQ(new_count, (count + 1));
site = Handle<AllocationSite>::cast(global_handles->Create(
AllocationSite::cast(heap->allocation_sites_list())));
site->set_memento_found_count(memento_found_bump);
CompileRun("%OptimizeFunctionOnNextCall(bar);");
CompileRun("bar(true);");
// The last call should have caused `foo` to bail out of compilation
// due to dependency change (the pretenuring decision in this case).
// This will cause recompilation.
// Check `bar` can get optimized again, meaning the compiler state is
// recoverable from this point.
CompileRun(
"%PrepareFunctionForOptimization(bar);"
"%OptimizeFunctionOnNextCall(bar);");
CompileRun("bar();");
Handle<Object> foo_obj =
JSReceiver::GetProperty(i_isolate, i_isolate->global_object(), "bar")
.ToHandleChecked();
Handle<JSFunction> bar = Handle<JSFunction>::cast(foo_obj);
CHECK(bar->HasAttachedOptimizedCode());
}
}
isolate->Exit();
isolate->Dispose();
}
namespace { namespace {
// Dummy external source stream which returns the whole source in one go. // Dummy external source stream which returns the whole source in one go.
......
// Copyright 2021 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// Flags: --allow-natives-syntax --opt --no-always-opt
// Flags: --allocation-site-pretenuring --stress-gc-during-compilation
// Flags: --max-optimized-bytecode-size=132000
function CheckOptimizationStatus(func, expectedOptimizationStatus) {
let opt_status = %GetOptimizationStatus(func);
assertTrue ((opt_status & expectedOptimizationStatus) !== 0,
"Expected flag " + expectedOptimizationStatus +
" to be set in optimization status");
}
// Trigger pretenuring decision change at entry, deopting at bytecode offset -1.
let arr = [];
var empty;
function DeoptEntry(expectedStatus) {
CheckOptimizationStatus(DeoptEntry, expectedStatus);
empty = [];
arr.push(empty);
}
%PrepareFunctionForOptimization(DeoptEntry);
DeoptEntry(V8OptimizationStatus.kTopmostFrameIsInterpreted
| V8OptimizationStatus.kTopmostFrameIsBaseline);
%OptimizeFunctionOnNextCall(DeoptEntry);
// Force the allocation site to be pretenured.
assertTrue(%PretenureAllocationSite(empty));
// This call should deopt at entry because of the pretenuring decision change.
DeoptEntry(V8OptimizationStatus.kTopmostFrameIsInterpreted
| V8OptimizationStatus.kTopmostFrameIsBaseline);
%PrepareFunctionForOptimization(DeoptEntry);
%OptimizeFunctionOnNextCall(DeoptEntry);
// Function should be compiled now.
DeoptEntry(V8OptimizationStatus.kTopmostFrameIsTurboFanned);
// Trigger pretenuring decision change during OSR.
function createSource(name, fillCnt) {
var src =
`function ${name}() {
let arr = [];
for (var i = 0; i < 10; i++) {
let local_arr = [];
arr[i] = local_arr;`
// Useless bytecodes to force a wider jump.
for (var i = 0; i < fillCnt; i++) {
src += ' try {} catch (e) {}\n';
}
src +=
` if (i == 5) {
%OptimizeOsr();
%PretenureAllocationSite(local_arr);
}
}
}
%PrepareFunctionForOptimization(${name});
${name}();`
return src;
}
// Deopt at JumpLoop.
eval(createSource('Loop',0));
// Deopt at JumpLoop.Wide.
eval(createSource('LoopWide',0xFF));
// Deopt at JumpLoop.ExtraWide.
// --max-optimized-bytecode-size has to be large enough to compile this.
eval(createSource('LoopExtraWide',0xFFF));
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment