Commit 961a2c88 authored by Ross McIlroy's avatar Ross McIlroy Committed by Commit Bot

[fullcodegen] Remove ability to compile with Full-Codegen.

Removes the pathways to use Full-Codegen from compiler.cc. Also removes all
paths to optimize using AstGraphBuilder, which relies on Full-codegen.
Cleans up ast-numbering, runtime-profiler and some runtime functions to
remove now dead code.

This makes Full-codegen and AstGraphBuilder dead, but doesn't remove their
code yet, that will be done in a followup CL to keep things reviewable.

BUG=v8:6409

Change-Id: I3901ff17d960b2bb084cef0cb39fa16cb8419881
Reviewed-on: https://chromium-review.googlesource.com/583328
Commit-Queue: Ross McIlroy <rmcilroy@chromium.org>
Reviewed-by: 's avatarMichael Starzinger <mstarzinger@chromium.org>
Cr-Commit-Position: refs/heads/master@{#47277}
parent 45b4522e
......@@ -24,9 +24,7 @@ class AstNumberingVisitor final : public AstVisitor<AstNumberingVisitor> {
properties_(zone),
language_mode_(SLOPPY),
slot_cache_(zone),
disable_fullcodegen_reason_(kNoReason),
dont_optimize_reason_(kNoReason),
dont_self_optimize_(false),
collect_type_profile_(collect_type_profile) {
InitializeAstVisitor(stack_limit);
}
......@@ -57,13 +55,8 @@ class AstNumberingVisitor final : public AstVisitor<AstNumberingVisitor> {
return tmp;
}
void DisableSelfOptimization() { dont_self_optimize_ = true; }
void DisableOptimization(BailoutReason reason) {
dont_optimize_reason_ = reason;
DisableSelfOptimization();
}
void DisableFullCodegen(BailoutReason reason) {
disable_fullcodegen_reason_ = reason;
}
template <typename Node>
......@@ -98,9 +91,7 @@ class AstNumberingVisitor final : public AstVisitor<AstNumberingVisitor> {
FunctionKind function_kind_;
// The slot cache allows us to reuse certain feedback slots.
FeedbackSlotCache slot_cache_;
BailoutReason disable_fullcodegen_reason_;
BailoutReason dont_optimize_reason_;
bool dont_self_optimize_;
bool collect_type_profile_;
DEFINE_AST_VISITOR_SUBCLASS_MEMBERS();
......@@ -132,7 +123,6 @@ void AstNumberingVisitor::VisitBreakStatement(BreakStatement* node) {
void AstNumberingVisitor::VisitDebuggerStatement(DebuggerStatement* node) {
DisableFullCodegen(kDebuggerStatement);
}
......@@ -159,16 +149,6 @@ void AstNumberingVisitor::VisitRegExpLiteral(RegExpLiteral* node) {
void AstNumberingVisitor::VisitVariableProxyReference(VariableProxy* node) {
switch (node->var()->location()) {
case VariableLocation::LOOKUP:
DisableFullCodegen(kReferenceToAVariableWhichRequiresDynamicLookup);
break;
case VariableLocation::MODULE:
DisableFullCodegen(kReferenceToModuleVariable);
break;
default:
break;
}
}
void AstNumberingVisitor::VisitVariableProxy(VariableProxy* node,
......@@ -188,14 +168,12 @@ void AstNumberingVisitor::VisitThisFunction(ThisFunction* node) {
void AstNumberingVisitor::VisitSuperPropertyReference(
SuperPropertyReference* node) {
DisableFullCodegen(kSuperReference);
Visit(node->this_var());
Visit(node->home_object());
}
void AstNumberingVisitor::VisitSuperCallReference(SuperCallReference* node) {
DisableFullCodegen(kSuperReference);
Visit(node->this_var());
Visit(node->new_target_var());
Visit(node->this_function_var());
......@@ -209,8 +187,6 @@ void AstNumberingVisitor::VisitExpressionStatement(ExpressionStatement* node) {
void AstNumberingVisitor::VisitReturnStatement(ReturnStatement* node) {
Visit(node->expression());
DCHECK(!node->is_async_return() || disable_fullcodegen_reason_ != kNoReason);
}
void AstNumberingVisitor::VisitSuspend(Suspend* node) {
......@@ -278,14 +254,12 @@ void AstNumberingVisitor::VisitCallRuntime(CallRuntime* node) {
void AstNumberingVisitor::VisitWithStatement(WithStatement* node) {
DisableFullCodegen(kWithStatement);
Visit(node->expression());
Visit(node->statement());
}
void AstNumberingVisitor::VisitDoWhileStatement(DoWhileStatement* node) {
DisableSelfOptimization();
node->set_osr_id(ReserveId());
node->set_first_suspend_id(suspend_count_);
Visit(node->body());
......@@ -295,7 +269,6 @@ void AstNumberingVisitor::VisitDoWhileStatement(DoWhileStatement* node) {
void AstNumberingVisitor::VisitWhileStatement(WhileStatement* node) {
DisableSelfOptimization();
node->set_osr_id(ReserveId());
node->set_first_suspend_id(suspend_count_);
Visit(node->cond());
......@@ -306,14 +279,12 @@ void AstNumberingVisitor::VisitWhileStatement(WhileStatement* node) {
void AstNumberingVisitor::VisitTryCatchStatement(TryCatchStatement* node) {
DCHECK(node->scope() == nullptr || !node->scope()->HasBeenRemoved());
DisableFullCodegen(kTryCatchStatement);
Visit(node->try_block());
Visit(node->catch_block());
}
void AstNumberingVisitor::VisitTryFinallyStatement(TryFinallyStatement* node) {
DisableFullCodegen(kTryFinallyStatement);
Visit(node->try_block());
Visit(node->finally_block());
}
......@@ -363,8 +334,6 @@ void AstNumberingVisitor::VisitCompareOperation(CompareOperation* node) {
}
void AstNumberingVisitor::VisitSpread(Spread* node) {
// We can only get here from spread calls currently.
DisableFullCodegen(kSpreadCall);
Visit(node->expression());
}
......@@ -373,19 +342,16 @@ void AstNumberingVisitor::VisitEmptyParentheses(EmptyParentheses* node) {
}
void AstNumberingVisitor::VisitGetIterator(GetIterator* node) {
DisableFullCodegen(kGetIterator);
Visit(node->iterable());
ReserveFeedbackSlots(node);
}
void AstNumberingVisitor::VisitImportCallExpression(
ImportCallExpression* node) {
DisableFullCodegen(kDynamicImport);
Visit(node->argument());
}
void AstNumberingVisitor::VisitForInStatement(ForInStatement* node) {
DisableSelfOptimization();
node->set_osr_id(ReserveId());
Visit(node->enumerable()); // Not part of loop.
node->set_first_suspend_id(suspend_count_);
......@@ -397,7 +363,6 @@ void AstNumberingVisitor::VisitForInStatement(ForInStatement* node) {
void AstNumberingVisitor::VisitForOfStatement(ForOfStatement* node) {
DisableFullCodegen(kForOfStatement);
node->set_osr_id(ReserveId());
Visit(node->assign_iterator()); // Not part of loop.
node->set_first_suspend_id(suspend_count_);
......@@ -442,7 +407,6 @@ void AstNumberingVisitor::VisitCaseClause(CaseClause* node) {
void AstNumberingVisitor::VisitForStatement(ForStatement* node) {
DisableSelfOptimization();
node->set_osr_id(ReserveId());
if (node->init() != NULL) Visit(node->init()); // Not part of loop.
node->set_first_suspend_id(suspend_count_);
......@@ -454,7 +418,6 @@ void AstNumberingVisitor::VisitForStatement(ForStatement* node) {
void AstNumberingVisitor::VisitClassLiteral(ClassLiteral* node) {
DisableFullCodegen(kClassLiteral);
LanguageModeScope language_mode_scope(this, STRICT);
if (node->extends()) Visit(node->extends());
if (node->constructor()) Visit(node->constructor());
......@@ -481,7 +444,6 @@ void AstNumberingVisitor::VisitObjectLiteral(ObjectLiteral* node) {
}
void AstNumberingVisitor::VisitLiteralProperty(LiteralProperty* node) {
if (node->is_computed_name()) DisableFullCodegen(kComputedPropertyName);
Visit(node->key());
Visit(node->value());
}
......@@ -496,9 +458,6 @@ void AstNumberingVisitor::VisitArrayLiteral(ArrayLiteral* node) {
void AstNumberingVisitor::VisitCall(Call* node) {
if (node->is_possibly_eval()) {
DisableFullCodegen(kFunctionCallsEval);
}
ReserveFeedbackSlots(node);
Visit(node->expression());
VisitArguments(node->arguments());
......@@ -559,29 +518,6 @@ void AstNumberingVisitor::VisitRewritableExpression(
bool AstNumberingVisitor::Renumber(FunctionLiteral* node) {
DeclarationScope* scope = node->scope();
DCHECK(!scope->HasBeenRemoved());
if (scope->new_target_var() != nullptr ||
scope->this_function_var() != nullptr) {
DisableFullCodegen(kSuperReference);
}
if (scope->arguments() != nullptr &&
!scope->arguments()->IsStackAllocated()) {
DisableFullCodegen(kContextAllocatedArguments);
}
if (scope->rest_parameter() != nullptr) {
DisableFullCodegen(kRestParameter);
}
if (IsResumableFunction(node->kind())) {
DisableFullCodegen(kGenerator);
}
if (IsClassConstructor(node->kind())) {
DisableFullCodegen(kClassConstructorFunction);
}
function_kind_ = node->kind();
LanguageModeScope language_mode_scope(this, node->language_mode());
......@@ -596,25 +532,6 @@ bool AstNumberingVisitor::Renumber(FunctionLiteral* node) {
node->set_dont_optimize_reason(dont_optimize_reason());
node->set_suspend_count(suspend_count_);
if (dont_self_optimize_) {
node->set_dont_self_optimize();
}
if (disable_fullcodegen_reason_ != kNoReason) {
node->set_must_use_ignition();
if (FLAG_trace_opt && scope->asm_function()) {
// TODO(leszeks): This is a quick'n'dirty fix to allow the debug name of
// the function to be accessed in the below print. This DCHECK will fail
// if we move ast numbering off the main thread, but that won't be before
// we remove FCG, in which case this entire check isn't necessary anyway.
AllowHandleDereference allow_deref;
DCHECK(!node->debug_name().is_null());
PrintF("[enforcing Ignition for %s because: %s\n",
node->debug_name()->ToCString().get(),
GetBailoutReason(disable_fullcodegen_reason_));
}
}
return !HasStackOverflow();
}
......
......@@ -2512,16 +2512,6 @@ class FunctionLiteral final : public Expression {
return ast_properties_.get_spec();
}
bool must_use_ignition() { return MustUseIgnitionField::decode(bit_field_); }
void set_must_use_ignition() {
bit_field_ = MustUseIgnitionField::update(bit_field_, true);
}
bool dont_self_optimize() { return DontSelfOptimize::decode(bit_field_); }
void set_dont_self_optimize() {
bit_field_ = DontSelfOptimize::update(bit_field_, true);
}
bool dont_optimize() { return dont_optimize_reason() != kNoReason; }
BailoutReason dont_optimize_reason() {
return DontOptimizeReasonField::decode(bit_field_);
......@@ -2593,13 +2583,9 @@ class FunctionLiteral final : public Expression {
class HasDuplicateParameters : public BitField<bool, Pretenure::kNext, 1> {};
class ShouldNotBeUsedOnceHintField
: public BitField<bool, HasDuplicateParameters::kNext, 1> {};
class MustUseIgnitionField
: public BitField<bool, ShouldNotBeUsedOnceHintField::kNext, 1> {};
// TODO(6409): Remove when Full-Codegen dies.
class DontSelfOptimize
: public BitField<bool, MustUseIgnitionField::kNext, 1> {};
class DontOptimizeReasonField
: public BitField<BailoutReason, DontSelfOptimize::kNext, 8> {};
: public BitField<BailoutReason, ShouldNotBeUsedOnceHintField::kNext, 8> {
};
int expected_property_count_;
int parameter_count_;
......
......@@ -126,11 +126,7 @@ bool CompilationInfo::is_this_defined() const { return !IsStub(); }
// profiler, so they trigger their own optimization when they're called
// for the SharedFunctionInfo::kCallsUntilPrimitiveOptimization-th time.
// TODO(6409) Remove when Full-Codegen dies.
bool CompilationInfo::ShouldSelfOptimize() {
return FLAG_opt && !literal()->dont_self_optimize() &&
!literal()->dont_optimize() &&
literal()->scope()->AllowsLazyCompilation();
}
bool CompilationInfo::ShouldSelfOptimize() { return false; }
void CompilationInfo::set_deferred_handles(
std::shared_ptr<DeferredHandles> deferred_handles) {
......
......@@ -23,7 +23,6 @@
#include "src/debug/debug.h"
#include "src/debug/liveedit.h"
#include "src/frames-inl.h"
#include "src/full-codegen/full-codegen.h"
#include "src/globals.h"
#include "src/heap/heap.h"
#include "src/interpreter/interpreter.h"
......@@ -256,22 +255,6 @@ void EnsureFeedbackMetadata(CompilationInfo* compilation_info) {
compilation_info->literal()->feedback_vector_spec()));
}
bool ShouldUseFullCodegen(FunctionLiteral* literal) {
// Code which can't be supported by the old pipeline should use Ignition.
if (literal->must_use_ignition()) return false;
// Resumable functions are not supported by {FullCodeGenerator}, suspended
// activations stored as {JSGeneratorObject} on the heap always assume the
// underlying code to be based on the bytecode array.
DCHECK(!IsResumableFunction(literal->kind()));
// Use full-codegen for asm.js functions.
if (literal->scope()->asm_function()) return true;
// Disabled by default.
return false;
}
bool UseAsmWasm(FunctionLiteral* literal,
Handle<SharedFunctionInfo> shared_info, bool is_debug) {
// Check whether asm.js validation is enabled.
......@@ -291,22 +274,6 @@ bool UseAsmWasm(FunctionLiteral* literal,
return literal->scope()->asm_module();
}
CompilationJob* GetUnoptimizedCompilationJob(ParseInfo* parse_info,
FunctionLiteral* literal,
Isolate* isolate) {
// Function should have been parsed and analyzed before creating a compilation
// job.
DCHECK_NOT_NULL(literal);
DCHECK_NOT_NULL(parse_info->scope());
if (ShouldUseFullCodegen(literal)) {
return FullCodeGenerator::NewCompilationJob(parse_info, literal, isolate);
} else {
return interpreter::Interpreter::NewCompilationJob(parse_info, literal,
isolate);
}
}
void InstallUnoptimizedCode(CompilationInfo* compilation_info) {
Handle<SharedFunctionInfo> shared = compilation_info->shared_info();
DCHECK_EQ(compilation_info->shared_info()->language_mode(),
......@@ -433,7 +400,8 @@ std::unique_ptr<CompilationJob> PrepareAndExecuteUnoptimizedCompileJob(
// through to standard unoptimized compile.
}
std::unique_ptr<CompilationJob> job(
GetUnoptimizedCompilationJob(parse_info, literal, isolate));
interpreter::Interpreter::NewCompilationJob(parse_info, literal,
isolate));
if (job->PrepareJob() == CompilationJob::SUCCEEDED &&
job->ExecuteJob() == CompilationJob::SUCCEEDED) {
return job;
......@@ -441,15 +409,6 @@ std::unique_ptr<CompilationJob> PrepareAndExecuteUnoptimizedCompileJob(
return std::unique_ptr<CompilationJob>(); // Compilation failed, return null.
}
bool InnerFunctionShouldUseFullCodegen(
ThreadedList<ThreadedListZoneEntry<FunctionLiteral*>>* literals) {
for (auto it : *literals) {
FunctionLiteral* literal = it->value();
if (ShouldUseFullCodegen(literal)) return true;
}
return false;
}
Handle<SharedFunctionInfo> CompileUnoptimizedCode(
ParseInfo* parse_info, Handle<SharedFunctionInfo> shared_info,
Isolate* isolate) {
......@@ -461,23 +420,6 @@ Handle<SharedFunctionInfo> CompileUnoptimizedCode(
return Handle<SharedFunctionInfo>::null();
}
if (ShouldUseFullCodegen(parse_info->literal()) ||
InnerFunctionShouldUseFullCodegen(&inner_literals)) {
// If we might compile with full-codegen internalize now, otherwise
// we internalize when finalizing compilation.
parse_info->ast_value_factory()->Internalize(isolate);
// Full-codegen needs to access ScopeInfos when compiling, so allocate now.
DeclarationScope::AllocateScopeInfos(parse_info, isolate,
AnalyzeMode::kRegular);
if (parse_info->is_toplevel()) {
// Full-codegen needs to access SFI when compiling, so allocate the array
// now.
EnsureSharedFunctionInfosArrayOnScript(parse_info, isolate);
}
}
// Prepare and execute compilation of the outer-most function.
std::unique_ptr<CompilationJob> outer_job(
PrepareAndExecuteUnoptimizedCompileJob(parse_info, parse_info->literal(),
......@@ -638,21 +580,6 @@ void InsertCodeIntoOptimizedCodeCache(CompilationInfo* compilation_info) {
bool GetOptimizedCodeNow(CompilationJob* job) {
CompilationInfo* compilation_info = job->compilation_info();
Isolate* isolate = compilation_info->isolate();
// Parsing is not required when optimizing from existing bytecode.
if (!compilation_info->is_optimizing_from_bytecode()) {
ParseInfo* parse_info = job->parse_info();
if (!Compiler::ParseAndAnalyze(parse_info, compilation_info->shared_info(),
isolate)) {
return false;
}
compilation_info->set_literal(parse_info->literal());
parse_info->ast_value_factory()->Internalize(isolate);
DeclarationScope::AllocateScopeInfos(parse_info, isolate,
AnalyzeMode::kRegular);
EnsureFeedbackMetadata(compilation_info);
}
TimerEventScope<TimerEventRecompileSynchronous> timer(isolate);
RuntimeCallTimerScope runtimeTimer(isolate,
&RuntimeCallStats::RecompileSynchronous);
......@@ -702,19 +629,6 @@ bool GetOptimizedCodeLater(CompilationJob* job) {
return false;
}
// Parsing is not required when optimizing from existing bytecode.
if (!compilation_info->is_optimizing_from_bytecode()) {
ParseInfo* parse_info = job->parse_info();
if (!Compiler::ParseAndAnalyze(parse_info, compilation_info->shared_info(),
isolate)) {
return false;
}
compilation_info->set_literal(parse_info->literal());
DeclarationScope::AllocateScopeInfos(parse_info, isolate,
AnalyzeMode::kRegular);
EnsureFeedbackMetadata(compilation_info);
}
TimerEventScope<TimerEventRecompileSynchronous> timer(isolate);
RuntimeCallTimerScope runtimeTimer(isolate,
&RuntimeCallStats::RecompileSynchronous);
......@@ -739,11 +653,6 @@ MaybeHandle<Code> GetOptimizedCode(Handle<JSFunction> function,
Isolate* isolate = function->GetIsolate();
Handle<SharedFunctionInfo> shared(function->shared(), isolate);
bool ignition_osr = osr_frame && osr_frame->is_interpreted();
USE(ignition_osr);
DCHECK_IMPLIES(ignition_osr, !osr_ast_id.IsNone());
DCHECK_IMPLIES(ignition_osr, FLAG_ignition_osr);
// Make sure we clear the optimization marker on the function so that we
// don't try to re-optimize.
if (function->HasOptimizationMarker()) {
......@@ -806,20 +715,8 @@ MaybeHandle<Code> GetOptimizedCode(Handle<JSFunction> function,
RuntimeCallTimerScope runtimeTimer(isolate, &RuntimeCallStats::OptimizeCode);
TRACE_EVENT0(TRACE_DISABLED_BY_DEFAULT("v8.compile"), "V8.OptimizeCode");
// TurboFan can optimize directly from existing bytecode.
if (shared->HasBytecodeArray()) {
compilation_info->MarkAsOptimizeFromBytecode();
}
// Verify that OSR compilations are delegated to the correct graph builder.
// Depending on the underlying frame the semantics of the {BailoutId} differ
// and the various graph builders hard-code a certain semantic:
// - Interpreter : The BailoutId represents a bytecode offset.
// - FullCodegen : The BailoutId represents the id of an AST node.
DCHECK_IMPLIES(compilation_info->is_osr() && ignition_osr,
compilation_info->is_optimizing_from_bytecode());
DCHECK_IMPLIES(compilation_info->is_osr() && !ignition_osr,
!compilation_info->is_optimizing_from_bytecode());
// TODO(rmcilroy): Remove OptimizeFromBytecode flag.
compilation_info->MarkAsOptimizeFromBytecode();
// In case of concurrent recompilation, all handles below this point will be
// allocated in a deferred handle scope that is detached and handed off to
......@@ -1613,7 +1510,8 @@ CompilationJob* Compiler::PrepareUnoptimizedCompilationJob(
ParseInfo* parse_info, Isolate* isolate) {
VMState<COMPILER> state(isolate);
std::unique_ptr<CompilationJob> job(
GetUnoptimizedCompilationJob(parse_info, parse_info->literal(), isolate));
interpreter::Interpreter::NewCompilationJob(
parse_info, parse_info->literal(), isolate));
if (job->PrepareJob() != CompilationJob::SUCCEEDED) {
return nullptr;
}
......
......@@ -276,6 +276,8 @@ AstGraphBuilder::AstGraphBuilder(Zone* local_zone, CompilationInfo* info,
exit_controls_(local_zone),
loop_assignment_analysis_(loop),
state_values_cache_(jsgraph) {
// TODO(6409, rmcilroy): Remove ast graph builder in followup CL.
UNREACHABLE();
InitializeAstVisitor(info->isolate());
}
......
......@@ -311,7 +311,6 @@ DEFINE_BOOL(unbox_double_arrays, true, "automatically unbox arrays of doubles")
DEFINE_BOOL(string_slices, true, "use string slices")
// Flags for Ignition.
DEFINE_BOOL(ignition_osr, true, "enable support for OSR from ignition code")
DEFINE_BOOL(ignition_elide_noneffectful_bytecodes, true,
"elide bytecodes which won't have any external effect")
DEFINE_BOOL(ignition_reo, true, "use ignition register equivalence optimizer")
......
......@@ -73,6 +73,8 @@ FullCodeGenerator::FullCodeGenerator(MacroAssembler* masm,
source_position_table_builder_(info->zone(),
info->SourcePositionRecordingMode()),
ic_total_count_(0) {
// TODO(6409, rmcilroy): Remove fullcodegen in followup CL.
UNREACHABLE();
DCHECK(!info->IsStub());
Initialize(stack_limit);
}
......@@ -89,7 +91,8 @@ bool FullCodeGenerator::MakeCode(ParseInfo* parse_info, CompilationInfo* info,
uintptr_t stack_limit) {
Isolate* isolate = info->isolate();
DCHECK(!info->literal()->must_use_ignition());
// TODO(6409, rmcilroy): Remove fullcodegen in followup CL.
UNREACHABLE();
DCHECK(!FLAG_minimal);
RuntimeCallTimerScope runtimeTimer(isolate,
&RuntimeCallStats::CompileFullCode);
......
......@@ -14148,27 +14148,6 @@ void JSFunction::ClearTypeFeedbackInfo() {
}
}
BailoutId Code::TranslatePcOffsetToBytecodeOffset(uint32_t pc_offset) {
DisallowHeapAllocation no_gc;
DCHECK(kind() == FUNCTION);
BackEdgeTable back_edges(this, &no_gc);
for (uint32_t i = 0; i < back_edges.length(); i++) {
if (back_edges.pc_offset(i) == pc_offset) return back_edges.ast_id(i);
}
return BailoutId::None();
}
uint32_t Code::TranslateBytecodeOffsetToPcOffset(BailoutId bytecode_offset) {
DisallowHeapAllocation no_gc;
DCHECK(kind() == FUNCTION);
BackEdgeTable back_edges(this, &no_gc);
for (uint32_t i = 0; i < back_edges.length(); i++) {
if (back_edges.ast_id(i) == bytecode_offset) return back_edges.pc_offset(i);
}
UNREACHABLE(); // We expect to find the back edge.
return 0;
}
void Code::MakeCodeAgeSequenceYoung(byte* sequence, Isolate* isolate) {
PatchPlatformCodeAge(isolate, sequence, kNoAgeCodeAge);
}
......
......@@ -4022,9 +4022,6 @@ class Code: public HeapObject {
void ClearInlineCaches();
BailoutId TranslatePcOffsetToBytecodeOffset(uint32_t pc_offset);
uint32_t TranslateBytecodeOffsetToPcOffset(BailoutId bytecode_offset);
#define DECL_CODE_AGE_ENUM(X) k##X##CodeAge,
enum Age {
kToBeExecutedOnceCodeAge = -3,
......
......@@ -12,7 +12,6 @@
#include "src/compiler.h"
#include "src/execution.h"
#include "src/frames-inl.h"
#include "src/full-codegen/full-codegen.h"
#include "src/global-handles.h"
#include "src/interpreter/interpreter.h"
......@@ -38,32 +37,25 @@ STATIC_ASSERT(kTicksWhenNotEnoughTypeInfo < 256);
// The number of ticks required for optimizing a function increases with
// the size of the bytecode. This is in addition to the
// kProfilerTicksBeforeOptimization required for any function.
static const int kCodeSizeAllowancePerTickIgnition =
static const int kCodeSizeAllowancePerTick =
50 * interpreter::Interpreter::kCodeSizeMultiplier;
// Maximum size in bytes of generate code for a function to allow OSR.
static const int kOSRCodeSizeAllowanceBase =
100 * FullCodeGenerator::kCodeSizeMultiplier;
static const int kOSRCodeSizeAllowanceBaseIgnition =
10 * interpreter::Interpreter::kCodeSizeMultiplier;
static const int kOSRCodeSizeAllowancePerTick =
4 * FullCodeGenerator::kCodeSizeMultiplier;
static const int kOSRCodeSizeAllowancePerTickIgnition =
2 * interpreter::Interpreter::kCodeSizeMultiplier;
// Maximum size in bytes of generated code for a function to be optimized
// the very first time it is seen on the stack.
static const int kMaxSizeEarlyOpt =
5 * FullCodeGenerator::kCodeSizeMultiplier;
static const int kMaxSizeEarlyOptIgnition =
5 * interpreter::Interpreter::kCodeSizeMultiplier;
// Certain functions are simply too big to be worth optimizing.
// We aren't using the code size multiplier here because there is no
// "kMaxSizeOpt" with which we would need to normalize. This constant is
// only for optimization decisions coming into TurboFan from Ignition.
static const int kMaxSizeOptIgnition = 60 * KB;
// "kMaxSizeOpt" with which we would need to normalize.
static const int kMaxSizeOpt = 60 * KB;
#define OPTIMIZATION_REASON_LIST(V) \
V(DoNotOptimize, "do not optimize") \
......@@ -174,40 +166,24 @@ void RuntimeProfiler::AttemptOnStackReplacement(JavaScriptFrame* frame,
// arguments accesses, which is unsound. Don't try OSR.
if (shared->uses_arguments()) return;
// We're using on-stack replacement: modify unoptimized code so that
// certain back edges in any unoptimized frame will trigger on-stack
// replacement for that frame.
// - Ignition: Store new loop nesting level in BytecodeArray header.
// - FullCodegen: Patch back edges up to new level using BackEdgeTable.
// We're using on-stack replacement: Store new loop nesting level in
// BytecodeArray header so that certain back edges in any interpreter frame
// for this bytecode will trigger on-stack replacement for that frame.
if (FLAG_trace_osr) {
PrintF("[OSR - arming back edges in ");
function->PrintName();
PrintF("]\n");
}
if (frame->type() == StackFrame::JAVA_SCRIPT) {
DCHECK(shared->HasBaselineCode());
DCHECK(BackEdgeTable::Verify(shared->GetIsolate(), shared->code()));
for (int i = 0; i < loop_nesting_levels; i++) {
BackEdgeTable::Patch(isolate_, shared->code());
}
} else if (frame->type() == StackFrame::INTERPRETED) {
DCHECK(shared->HasBytecodeArray());
if (!FLAG_ignition_osr) return; // Only use this when enabled.
int level = shared->bytecode_array()->osr_loop_nesting_level();
shared->bytecode_array()->set_osr_loop_nesting_level(
Min(level + loop_nesting_levels, AbstractCode::kMaxLoopNestingMarker));
} else {
UNREACHABLE();
}
DCHECK_EQ(StackFrame::INTERPRETED, frame->type());
DCHECK(shared->HasBytecodeArray());
int level = shared->bytecode_array()->osr_loop_nesting_level();
shared->bytecode_array()->set_osr_loop_nesting_level(
Min(level + loop_nesting_levels, AbstractCode::kMaxLoopNestingMarker));
}
void RuntimeProfiler::MaybeOptimizeFullCodegen(JSFunction* function,
JavaScriptFrame* frame,
int frame_count) {
SharedFunctionInfo* shared = function->shared();
Code* shared_code = shared->code();
if (shared_code->kind() != Code::FUNCTION) return;
void RuntimeProfiler::MaybeOptimize(JSFunction* function,
JavaScriptFrame* frame) {
if (function->IsInOptimizationQueue()) {
if (FLAG_trace_opt_verbose) {
PrintF("[function ");
......@@ -220,87 +196,7 @@ void RuntimeProfiler::MaybeOptimizeFullCodegen(JSFunction* function,
if (FLAG_always_osr) {
AttemptOnStackReplacement(frame, AbstractCode::kMaxLoopNestingMarker);
// Fall through and do a normal optimized compile as well.
} else if (!frame->is_optimized() &&
(function->IsMarkedForOptimization() ||
function->IsMarkedForConcurrentOptimization() ||
function->HasOptimizedCode())) {
// Attempt OSR if we are still running unoptimized code even though the
// the function has long been marked or even already been optimized.
int ticks = function->feedback_vector()->profiler_ticks();
int64_t allowance =
kOSRCodeSizeAllowanceBase +
static_cast<int64_t>(ticks) * kOSRCodeSizeAllowancePerTick;
if (shared_code->CodeSize() <= allowance) {
AttemptOnStackReplacement(frame);
}
return;
}
// Only record top-level code on top of the execution stack and
// avoid optimizing excessively large scripts since top-level code
// will be executed only once.
const int kMaxToplevelSourceSize = 10 * 1024;
if (shared->is_toplevel() &&
(frame_count > 1 || shared->SourceSize() > kMaxToplevelSourceSize)) {
return;
}
// Do not record non-optimizable functions.
if (shared->optimization_disabled()) {
return;
}
if (frame->is_optimized()) return;
int ticks = function->feedback_vector()->profiler_ticks();
if (ticks >= kProfilerTicksBeforeOptimization) {
int typeinfo, generic, total, type_percentage, generic_percentage;
GetICCounts(function, &typeinfo, &generic, &total, &type_percentage,
&generic_percentage);
if (type_percentage >= FLAG_type_info_threshold &&
generic_percentage <= FLAG_generic_ic_threshold) {
// If this particular function hasn't had any ICs patched for enough
// ticks, optimize it now.
Optimize(function, OptimizationReason::kHotAndStable);
} else if (ticks >= kTicksWhenNotEnoughTypeInfo) {
Optimize(function, OptimizationReason::kHotWithoutMuchTypeInfo);
} else {
if (FLAG_trace_opt_verbose) {
PrintF("[not yet optimizing ");
function->PrintName();
PrintF(", not enough type info: %d/%d (%d%%)]\n", typeinfo, total,
type_percentage);
}
}
} else if (!any_ic_changed_ &&
shared_code->instruction_size() < kMaxSizeEarlyOpt) {
// If no IC was patched since the last tick and this function is very
// small, optimistically optimize it now.
int typeinfo, generic, total, type_percentage, generic_percentage;
GetICCounts(function, &typeinfo, &generic, &total, &type_percentage,
&generic_percentage);
if (type_percentage >= FLAG_type_info_threshold &&
generic_percentage <= FLAG_generic_ic_threshold) {
Optimize(function, OptimizationReason::kSmallFunction);
}
}
}
void RuntimeProfiler::MaybeOptimizeIgnition(JSFunction* function,
JavaScriptFrame* frame) {
if (function->IsInOptimizationQueue()) {
if (FLAG_trace_opt_verbose) {
PrintF("[function ");
function->PrintName();
PrintF(" is already in optimization queue]\n");
}
return;
}
if (FLAG_always_osr) {
AttemptOnStackReplacement(frame, AbstractCode::kMaxLoopNestingMarker);
// Fall through and do a normal optimized compile as well.
} else if (MaybeOSRIgnition(function, frame)) {
} else if (MaybeOSR(function, frame)) {
return;
}
......@@ -308,15 +204,14 @@ void RuntimeProfiler::MaybeOptimizeIgnition(JSFunction* function,
if (frame->is_optimized()) return;
OptimizationReason reason = ShouldOptimizeIgnition(function, frame);
OptimizationReason reason = ShouldOptimize(function, frame);
if (reason != OptimizationReason::kDoNotOptimize) {
Optimize(function, reason);
}
}
bool RuntimeProfiler::MaybeOSRIgnition(JSFunction* function,
JavaScriptFrame* frame) {
bool RuntimeProfiler::MaybeOSR(JSFunction* function, JavaScriptFrame* frame) {
SharedFunctionInfo* shared = function->shared();
int ticks = function->feedback_vector()->profiler_ticks();
......@@ -330,8 +225,8 @@ bool RuntimeProfiler::MaybeOSRIgnition(JSFunction* function,
// Attempt OSR if we are still running interpreted code even though the
// the function has long been marked or even already been optimized.
int64_t allowance =
kOSRCodeSizeAllowanceBaseIgnition +
static_cast<int64_t>(ticks) * kOSRCodeSizeAllowancePerTickIgnition;
kOSRCodeSizeAllowanceBase +
static_cast<int64_t>(ticks) * kOSRCodeSizeAllowancePerTick;
if (shared->bytecode_array()->Size() <= allowance) {
AttemptOnStackReplacement(frame);
}
......@@ -340,18 +235,18 @@ bool RuntimeProfiler::MaybeOSRIgnition(JSFunction* function,
return false;
}
OptimizationReason RuntimeProfiler::ShouldOptimizeIgnition(
JSFunction* function, JavaScriptFrame* frame) {
OptimizationReason RuntimeProfiler::ShouldOptimize(JSFunction* function,
JavaScriptFrame* frame) {
SharedFunctionInfo* shared = function->shared();
int ticks = function->feedback_vector()->profiler_ticks();
if (shared->bytecode_array()->Size() > kMaxSizeOptIgnition) {
if (shared->bytecode_array()->Size() > kMaxSizeOpt) {
return OptimizationReason::kDoNotOptimize;
}
int ticks_for_optimization =
kProfilerTicksBeforeOptimization +
(shared->bytecode_array()->Size() / kCodeSizeAllowancePerTickIgnition);
(shared->bytecode_array()->Size() / kCodeSizeAllowancePerTick);
if (ticks >= ticks_for_optimization) {
int typeinfo, generic, total, type_percentage, generic_percentage;
GetICCounts(function, &typeinfo, &generic, &total, &type_percentage,
......@@ -372,7 +267,7 @@ OptimizationReason RuntimeProfiler::ShouldOptimizeIgnition(
return OptimizationReason::kDoNotOptimize;
}
} else if (!any_ic_changed_ &&
shared->bytecode_array()->Size() < kMaxSizeEarlyOptIgnition) {
shared->bytecode_array()->Size() < kMaxSizeEarlyOpt) {
// If no IC was patched since the last tick and this function is very
// small, optimistically optimize it now.
int typeinfo, generic, total, type_percentage, generic_percentage;
......@@ -399,7 +294,7 @@ OptimizationReason RuntimeProfiler::ShouldOptimizeIgnition(
PrintF("ICs changed]\n");
} else {
PrintF(" too large for small function optimization: %d/%d]\n",
shared->bytecode_array()->Size(), kMaxSizeEarlyOptIgnition);
shared->bytecode_array()->Size(), kMaxSizeEarlyOpt);
}
}
return OptimizationReason::kDoNotOptimize;
......@@ -421,17 +316,13 @@ void RuntimeProfiler::MarkCandidatesForOptimization() {
frame_count++ < frame_count_limit && !it.done();
it.Advance()) {
JavaScriptFrame* frame = it.frame();
if (frame->is_optimized()) {
continue;
}
if (frame->is_optimized()) continue;
JSFunction* function = frame->function();
DCHECK(function->shared()->is_compiled());
if (function->shared()->IsInterpreted()) {
MaybeOptimizeIgnition(function, frame);
} else {
MaybeOptimizeFullCodegen(function, frame, frame_count);
}
if (!function->shared()->IsInterpreted()) continue;
MaybeOptimize(function, frame);
// TODO(leszeks): Move this increment to before the maybe optimize checks,
// and update the tests to assume the increment has already happened.
......
......@@ -27,15 +27,12 @@ class RuntimeProfiler {
int nesting_levels = 1);
private:
void MaybeOptimizeFullCodegen(JSFunction* function, JavaScriptFrame* frame,
int frame_count);
void MaybeBaselineIgnition(JSFunction* function, JavaScriptFrame* frame);
void MaybeOptimizeIgnition(JSFunction* function, JavaScriptFrame* frame);
// Potentially attempts OSR from ignition and returns whether no other
void MaybeOptimize(JSFunction* function, JavaScriptFrame* frame);
// Potentially attempts OSR from and returns whether no other
// optimization attempts should be made.
bool MaybeOSRIgnition(JSFunction* function, JavaScriptFrame* frame);
OptimizationReason ShouldOptimizeIgnition(JSFunction* function,
JavaScriptFrame* frame);
bool MaybeOSR(JSFunction* function, JavaScriptFrame* frame);
OptimizationReason ShouldOptimize(JSFunction* function,
JavaScriptFrame* frame);
void Optimize(JSFunction* function, OptimizationReason reason);
void Baseline(JSFunction* function, OptimizationReason reason);
......
......@@ -244,30 +244,6 @@ static bool IsSuitableForOnStackReplacement(Isolate* isolate,
namespace {
BailoutId DetermineEntryAndDisarmOSRForBaseline(JavaScriptFrame* frame) {
Handle<Code> caller_code(frame->function()->shared()->code());
// Passing the PC in the JavaScript frame from the caller directly is
// not GC safe, so we walk the stack to get it.
if (!caller_code->contains(frame->pc())) {
// Code on the stack may not be the code object referenced by the shared
// function info. It may have been replaced to include deoptimization data.
caller_code = Handle<Code>(frame->LookupCode());
}
DCHECK_EQ(frame->LookupCode(), *caller_code);
DCHECK_EQ(Code::FUNCTION, caller_code->kind());
DCHECK(caller_code->contains(frame->pc()));
// Revert the patched back edge table, regardless of whether OSR succeeds.
BackEdgeTable::Revert(frame->isolate(), *caller_code);
// Return a BailoutId representing an AST id of the {IterationStatement}.
uint32_t pc_offset =
static_cast<uint32_t>(frame->pc() - caller_code->instruction_start());
return caller_code->TranslatePcOffsetToBytecodeOffset(pc_offset);
}
BailoutId DetermineEntryAndDisarmOSRForInterpreter(JavaScriptFrame* frame) {
InterpretedFrame* iframe = reinterpret_cast<InterpretedFrame*>(frame);
......@@ -280,7 +256,6 @@ BailoutId DetermineEntryAndDisarmOSRForInterpreter(JavaScriptFrame* frame) {
DCHECK(frame->LookupCode()->is_interpreter_trampoline_builtin());
DCHECK(frame->function()->shared()->HasBytecodeArray());
DCHECK(frame->is_interpreted());
DCHECK(FLAG_ignition_osr);
// Reset the OSR loop nesting depth to disarm back edges.
bytecode->set_osr_loop_nesting_level(0);
......@@ -306,12 +281,11 @@ RUNTIME_FUNCTION(Runtime_CompileForOnStackReplacement) {
JavaScriptFrameIterator it(isolate);
JavaScriptFrame* frame = it.frame();
DCHECK_EQ(frame->function(), *function);
DCHECK(frame->is_interpreted());
// Determine the entry point for which this OSR request has been fired and
// also disarm all back edges in the calling code to stop new requests.
BailoutId ast_id = frame->is_interpreted()
? DetermineEntryAndDisarmOSRForInterpreter(frame)
: DetermineEntryAndDisarmOSRForBaseline(frame);
BailoutId ast_id = DetermineEntryAndDisarmOSRForInterpreter(frame);
DCHECK(!ast_id.IsNone());
MaybeHandle<Code> maybe_result;
......
......@@ -233,7 +233,7 @@ RUNTIME_FUNCTION(Runtime_OptimizeFunctionOnNextCall) {
}
// If the function is already optimized, just return.
if (function->IsOptimized()) {
if (function->IsOptimized() || function->shared()->HasAsmWasmData()) {
return isolate->heap()->undefined_value();
}
......
......@@ -10,11 +10,9 @@
#include "src/compiler/linkage.h"
#include "src/compiler/pipeline.h"
#include "src/execution.h"
#include "src/full-codegen/full-codegen.h"
#include "src/handles.h"
#include "src/objects-inl.h"
#include "src/parsing/parse-info.h"
#include "src/parsing/parsing.h"
#include "test/cctest/cctest.h"
namespace v8 {
......@@ -149,13 +147,9 @@ Handle<JSFunction> FunctionTester::Compile(Handle<JSFunction> function) {
}
CHECK(Compiler::Compile(function, Compiler::CLEAR_EXCEPTION));
if (info.shared_info()->HasBytecodeArray()) {
info.MarkAsDeoptimizationEnabled();
info.MarkAsOptimizeFromBytecode();
} else {
CHECK(Compiler::ParseAndAnalyze(&parse_info, shared, info.isolate()));
parse_info.ast_value_factory()->Internalize(info.isolate());
}
CHECK(info.shared_info()->HasBytecodeArray());
info.MarkAsDeoptimizationEnabled();
info.MarkAsOptimizeFromBytecode();
JSFunction::EnsureLiterals(function);
Handle<Code> code = Pipeline::GenerateCodeForTesting(&info);
......@@ -174,9 +168,6 @@ Handle<JSFunction> FunctionTester::CompileGraph(Graph* graph) {
CompilationInfo info(parse_info.zone(), function->GetIsolate(),
parse_info.script(), shared, function);
CHECK(
parsing::ParseFunction(&parse_info, info.shared_info(), info.isolate()));
Handle<Code> code = Pipeline::GenerateCodeForTesting(&info, graph);
CHECK(!code.is_null());
function->ReplaceCode(*code);
......
......@@ -3,11 +3,13 @@
# found in the LICENSE file.
[
[ALWAYS, {
# Issue 6166.
'debugger/asm-js-breakpoint-during-exec': [FAIL],
}], # ALWAYS
##############################################################################
['variant != default', {
# Issue 6166.
'debugger/asm-js-breakpoint-during-exec': [PASS, FAIL],
# Issue 6167.
'debugger/eval-scopes': [PASS, FAIL],
'debugger/scope-skip-variables-with-empty-name': [PASS, FAIL],
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment