Commit ea27a244 authored by Maya Lekova's avatar Maya Lekova Committed by Commit Bot

Revert "Get BytecodeArray via current frame where possible."

This reverts commit 7350e7b2.

Reason for revert: Braking layout test, blocking the roll, see
https://bugs.chromium.org/p/v8/issues/detail?id=8405

Original change's description:
> Get BytecodeArray via current frame where possible.
> 
> With BytecodeArray flushing the SFI->BytecodeArray pointer will become pseudo weak.
> Instead of getting the bytecode array from the SFI, get it from the frame instead
> (which is a strong pointer). Note: This won't actually change behaviour since the
> fact that the bytecode array was on the frame will retain it strongly, however it
> makes the contract that the BytecodeArray must exist at these points more explicit.
> 
> Updates code in runtime-profiler.cc, frames.cc and runtime-test.cc to do this.
> 
> BUG=v8:8395
> 
> Cq-Include-Trybots: luci.chromium.try:linux_chromium_headless_rel;master.tryserver.blink:linux_trusty_blink_rel
> Change-Id: Id7a3e6857abd0e89bf238e9b0b01de4461df54e1
> Reviewed-on: https://chromium-review.googlesource.com/c/1310193
> Commit-Queue: Ross McIlroy <rmcilroy@chromium.org>
> Reviewed-by: Mythri Alle <mythria@chromium.org>
> Cr-Commit-Position: refs/heads/master@{#57198}

TBR=rmcilroy@chromium.org,mythria@chromium.org

Change-Id: Ie5db0ec1d68ca01d62e9880a4476704ad4d013b5
No-Presubmit: true
No-Tree-Checks: true
No-Try: true
Bug: v8:8395
Cq-Include-Trybots: luci.chromium.try:linux_chromium_headless_rel;master.tryserver.blink:linux_trusty_blink_rel
Reviewed-on: https://chromium-review.googlesource.com/c/1314330Reviewed-by: 's avatarMaya Lekova <mslekova@chromium.org>
Commit-Queue: Maya Lekova <mslekova@chromium.org>
Cr-Commit-Position: refs/heads/master@{#57205}
parent b8a91131
......@@ -1655,7 +1655,7 @@ int InterpretedFrame::position() const {
int InterpretedFrame::LookupExceptionHandlerInTable(
int* context_register, HandlerTable::CatchPrediction* prediction) {
HandlerTable table(GetBytecodeArray());
HandlerTable table(function()->shared()->GetBytecodeArray());
return table.LookupRange(GetBytecodeOffset(), context_register, prediction);
}
......@@ -1723,7 +1723,8 @@ void InterpretedFrame::WriteInterpreterRegister(int register_index,
void InterpretedFrame::Summarize(std::vector<FrameSummary>* functions) const {
DCHECK(functions->empty());
AbstractCode* abstract_code = AbstractCode::cast(GetBytecodeArray());
AbstractCode* abstract_code =
AbstractCode::cast(function()->shared()->GetBytecodeArray());
FrameSummary::JavaScriptFrameSummary summary(
isolate(), receiver(), function(), abstract_code, GetBytecodeOffset(),
IsConstructor());
......
......@@ -112,7 +112,7 @@ void RuntimeProfiler::Optimize(JSFunction* function,
function->MarkForOptimization(ConcurrencyMode::kConcurrent);
}
void RuntimeProfiler::AttemptOnStackReplacement(InterpretedFrame* frame,
void RuntimeProfiler::AttemptOnStackReplacement(JavaScriptFrame* frame,
int loop_nesting_levels) {
JSFunction* function = frame->function();
SharedFunctionInfo* shared = function->shared();
......@@ -133,13 +133,14 @@ void RuntimeProfiler::AttemptOnStackReplacement(InterpretedFrame* frame,
}
DCHECK_EQ(StackFrame::INTERPRETED, frame->type());
int level = frame->GetBytecodeArray()->osr_loop_nesting_level();
frame->GetBytecodeArray()->set_osr_loop_nesting_level(
DCHECK(shared->HasBytecodeArray());
int level = shared->GetBytecodeArray()->osr_loop_nesting_level();
shared->GetBytecodeArray()->set_osr_loop_nesting_level(
Min(level + loop_nesting_levels, AbstractCode::kMaxLoopNestingMarker));
}
void RuntimeProfiler::MaybeOptimize(JSFunction* function,
InterpretedFrame* frame) {
JavaScriptFrame* frame) {
if (function->IsInOptimizationQueue()) {
if (FLAG_trace_opt_verbose) {
PrintF("[function ");
......@@ -158,28 +159,32 @@ void RuntimeProfiler::MaybeOptimize(JSFunction* function,
if (function->shared()->optimization_disabled()) return;
OptimizationReason reason =
ShouldOptimize(function, function->shared()->GetBytecodeArray());
if (frame->is_optimized()) return;
OptimizationReason reason = ShouldOptimize(function, frame);
if (reason != OptimizationReason::kDoNotOptimize) {
Optimize(function, reason);
}
}
bool RuntimeProfiler::MaybeOSR(JSFunction* function, InterpretedFrame* frame) {
bool RuntimeProfiler::MaybeOSR(JSFunction* function, JavaScriptFrame* frame) {
SharedFunctionInfo* shared = function->shared();
int ticks = function->feedback_vector()->profiler_ticks();
// TODO(rmcilroy): Also ensure we only OSR top-level code if it is smaller
// than kMaxToplevelSourceSize.
if (function->IsMarkedForOptimization() ||
function->IsMarkedForConcurrentOptimization() ||
function->HasOptimizedCode()) {
if (!frame->is_optimized() &&
(function->IsMarkedForOptimization() ||
function->IsMarkedForConcurrentOptimization() ||
function->HasOptimizedCode())) {
// Attempt OSR if we are still running interpreted code even though the
// the function has long been marked or even already been optimized.
int64_t allowance =
kOSRBytecodeSizeAllowanceBase +
static_cast<int64_t>(ticks) * kOSRBytecodeSizeAllowancePerTick;
if (function->shared()->GetBytecodeArray()->length() <= allowance) {
if (shared->GetBytecodeArray()->length() <= allowance) {
AttemptOnStackReplacement(frame);
}
return true;
......@@ -188,19 +193,21 @@ bool RuntimeProfiler::MaybeOSR(JSFunction* function, InterpretedFrame* frame) {
}
OptimizationReason RuntimeProfiler::ShouldOptimize(JSFunction* function,
BytecodeArray* bytecode) {
JavaScriptFrame* frame) {
SharedFunctionInfo* shared = function->shared();
int ticks = function->feedback_vector()->profiler_ticks();
if (bytecode->length() > kMaxBytecodeSizeForOpt) {
if (shared->GetBytecodeArray()->length() > kMaxBytecodeSizeForOpt) {
return OptimizationReason::kDoNotOptimize;
}
int ticks_for_optimization =
kProfilerTicksBeforeOptimization +
(bytecode->length() / kBytecodeSizeAllowancePerTick);
(shared->GetBytecodeArray()->length() / kBytecodeSizeAllowancePerTick);
if (ticks >= ticks_for_optimization) {
return OptimizationReason::kHotAndStable;
} else if (!any_ic_changed_ &&
bytecode->length() < kMaxBytecodeSizeForEarlyOpt) {
} else if (!any_ic_changed_ && shared->GetBytecodeArray()->length() <
kMaxBytecodeSizeForEarlyOpt) {
// If no IC was patched since the last tick and this function is very
// small, optimistically optimize it now.
return OptimizationReason::kSmallFunction;
......@@ -213,7 +220,7 @@ OptimizationReason RuntimeProfiler::ShouldOptimize(JSFunction* function,
PrintF("ICs changed]\n");
} else {
PrintF(" too large for small function optimization: %d/%d]\n",
bytecode->length(), kMaxBytecodeSizeForEarlyOpt);
shared->GetBytecodeArray()->length(), kMaxBytecodeSizeForEarlyOpt);
}
}
return OptimizationReason::kDoNotOptimize;
......@@ -235,13 +242,13 @@ void RuntimeProfiler::MarkCandidatesForOptimization() {
frame_count++ < frame_count_limit && !it.done();
it.Advance()) {
JavaScriptFrame* frame = it.frame();
if (!frame->is_interpreted()) continue;
if (frame->is_optimized()) continue;
JSFunction* function = frame->function();
DCHECK(function->shared()->is_compiled());
if (!function->shared()->IsInterpreted()) continue;
MaybeOptimize(function, InterpretedFrame::cast(frame));
MaybeOptimize(function, frame);
// TODO(leszeks): Move this increment to before the maybe optimize checks,
// and update the tests to assume the increment has already happened.
......
......@@ -10,9 +10,8 @@
namespace v8 {
namespace internal {
class BytecodeArray;
class Isolate;
class InterpretedFrame;
class JavaScriptFrame;
class JSFunction;
enum class OptimizationReason : uint8_t;
......@@ -24,16 +23,16 @@ class RuntimeProfiler {
void NotifyICChanged() { any_ic_changed_ = true; }
void AttemptOnStackReplacement(InterpretedFrame* frame,
void AttemptOnStackReplacement(JavaScriptFrame* frame,
int nesting_levels = 1);
private:
void MaybeOptimize(JSFunction* function, InterpretedFrame* frame);
void MaybeOptimize(JSFunction* function, JavaScriptFrame* frame);
// Potentially attempts OSR from and returns whether no other
// optimization attempts should be made.
bool MaybeOSR(JSFunction* function, InterpretedFrame* frame);
bool MaybeOSR(JSFunction* function, JavaScriptFrame* frame);
OptimizationReason ShouldOptimize(JSFunction* function,
BytecodeArray* bytecode_array);
JavaScriptFrame* frame);
void Optimize(JSFunction* function, OptimizationReason reason);
void Baseline(JSFunction* function, OptimizationReason reason);
......
......@@ -299,8 +299,7 @@ RUNTIME_FUNCTION(Runtime_OptimizeOsr) {
// Make the profiler arm all back edges in unoptimized code.
if (it.frame()->type() == StackFrame::INTERPRETED) {
isolate->runtime_profiler()->AttemptOnStackReplacement(
InterpretedFrame::cast(it.frame()),
AbstractCode::kMaxLoopNestingMarker);
it.frame(), AbstractCode::kMaxLoopNestingMarker);
}
return ReadOnlyRoots(isolate).undefined_value();
......
......@@ -26,7 +26,7 @@ return x + b;
---
Break location after LiveEdit:
var x = a;
#var x = 3;
var x = #3;
debugger;
stackChanged: true
......@@ -62,12 +62,12 @@ var x = 1;
---
Break location after LiveEdit:
function boo() {
#var x = 3;
var x = #3;
debugger;
stackChanged: true
Protocol.Debugger.stepInto
function boo() {
var x = #3;
debugger;
var x = 3;
#debugger;
var x = 1;
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment