Commit b54e49ae authored by mstarzinger's avatar mstarzinger Committed by Commit bot

[interpreter] Add OSR nesting level to bytecode header.

This adds a new field to the header of every BytecodeArray which stores
the current nesting level up to which loop back edges are armed as OSR
points. The intention is to arm OSR points incrementally from outermost
to innermost until one fires (similar to OSR from FullCodegen).

R=rmcilroy@chromium.org
BUG=v8:4764

Review-Url: https://codereview.chromium.org/2172583002
Cr-Commit-Position: refs/heads/master@{#38017}
parent 0a36b5cd
......@@ -302,6 +302,7 @@ DEFINE_BOOL(ignition_generators, true,
DEFINE_STRING(ignition_filter, "*", "filter for ignition interpreter")
DEFINE_BOOL(ignition_deadcode, true,
"use ignition dead code elimination optimizer")
DEFINE_BOOL(ignition_osr, false, "enable support for OSR from ignition code")
DEFINE_BOOL(ignition_peephole, true, "use ignition peephole optimizer")
DEFINE_BOOL(ignition_reo, true, "use ignition register equivalence optimizer")
DEFINE_BOOL(ignition_filter_expression_positions, true,
......
......@@ -224,7 +224,7 @@ void FullCodeGenerator::RecordBackEdge(BailoutId ast_id) {
// The pc offset does not need to be encoded and packed together with a state.
DCHECK(masm_->pc_offset() > 0);
DCHECK(loop_depth() > 0);
uint8_t depth = Min(loop_depth(), Code::kMaxLoopNestingMarker);
uint8_t depth = Min(loop_depth(), AbstractCode::kMaxLoopNestingMarker);
BackEdgeEntry entry =
{ ast_id, static_cast<unsigned>(masm_->pc_offset()), depth };
back_edges_.Add(entry, zone());
......@@ -1771,7 +1771,7 @@ void BackEdgeTable::Patch(Isolate* isolate, Code* unoptimized) {
// to find the matching loops to patch the interrupt
// call to an unconditional call to the replacement code.
int loop_nesting_level = unoptimized->allow_osr_at_loop_nesting_level() + 1;
if (loop_nesting_level > Code::kMaxLoopNestingMarker) return;
if (loop_nesting_level > AbstractCode::kMaxLoopNestingMarker) return;
BackEdgeTable back_edges(unoptimized, &no_gc);
for (uint32_t i = 0; i < back_edges.length(); i++) {
......@@ -1818,7 +1818,7 @@ bool BackEdgeTable::Verify(Isolate* isolate, Code* unoptimized) {
BackEdgeTable back_edges(unoptimized, &no_gc);
for (uint32_t i = 0; i < back_edges.length(); i++) {
uint32_t loop_depth = back_edges.loop_depth(i);
CHECK_LE(static_cast<int>(loop_depth), Code::kMaxLoopNestingMarker);
CHECK_LE(static_cast<int>(loop_depth), AbstractCode::kMaxLoopNestingMarker);
// Assert that all back edges for shallower loops (and only those)
// have already been patched.
CHECK_EQ((static_cast<int>(loop_depth) <= loop_nesting_level),
......
......@@ -3038,6 +3038,7 @@ AllocationResult Heap::AllocateBytecodeArray(int length,
instance->set_frame_size(frame_size);
instance->set_parameter_count(parameter_count);
instance->set_interrupt_budget(interpreter::Interpreter::InterruptBudget());
instance->set_osr_loop_nesting_level(0);
instance->set_constant_pool(constant_pool);
instance->set_handler_table(empty_fixed_array());
instance->set_source_position_table(empty_byte_array());
......@@ -3386,6 +3387,7 @@ AllocationResult Heap::CopyBytecodeArray(BytecodeArray* bytecode_array) {
copy->set_handler_table(bytecode_array->handler_table());
copy->set_source_position_table(bytecode_array->source_position_table());
copy->set_interrupt_budget(bytecode_array->interrupt_budget());
copy->set_osr_loop_nesting_level(bytecode_array->osr_loop_nesting_level());
bytecode_array->CopyBytecodesTo(copy);
return copy;
}
......
......@@ -4096,6 +4096,15 @@ void BytecodeArray::set_interrupt_budget(int interrupt_budget) {
WRITE_INT_FIELD(this, kInterruptBudgetOffset, interrupt_budget);
}
int BytecodeArray::osr_loop_nesting_level() const {
return READ_INT_FIELD(this, kOSRNestingLevelOffset);
}
void BytecodeArray::set_osr_loop_nesting_level(int depth) {
DCHECK(0 <= depth && depth <= AbstractCode::kMaxLoopNestingMarker);
WRITE_INT_FIELD(this, kOSRNestingLevelOffset, depth);
}
int BytecodeArray::parameter_count() const {
// Parameter count is stored as the size on stack of the parameters to allow
// it to be used directly by generated code.
......@@ -5054,7 +5063,7 @@ int Code::allow_osr_at_loop_nesting_level() {
void Code::set_allow_osr_at_loop_nesting_level(int level) {
DCHECK_EQ(FUNCTION, kind());
DCHECK(level >= 0 && level <= kMaxLoopNestingMarker);
DCHECK(level >= 0 && level <= AbstractCode::kMaxLoopNestingMarker);
int previous = READ_UINT32_FIELD(this, kKindSpecificFlags2Offset);
int updated = AllowOSRAtLoopNestingLevelField::update(previous, level);
WRITE_UINT32_FIELD(this, kKindSpecificFlags2Offset, updated);
......
......@@ -4517,6 +4517,10 @@ class BytecodeArray : public FixedArrayBase {
inline int interrupt_budget() const;
inline void set_interrupt_budget(int interrupt_budget);
// Accessors for OSR loop nesting level.
inline int osr_loop_nesting_level() const;
inline void set_osr_loop_nesting_level(int depth);
// Accessors for the constant pool.
DECL_ACCESSORS(constant_pool, FixedArray)
......@@ -4556,7 +4560,10 @@ class BytecodeArray : public FixedArrayBase {
static const int kFrameSizeOffset = kSourcePositionTableOffset + kPointerSize;
static const int kParameterSizeOffset = kFrameSizeOffset + kIntSize;
static const int kInterruptBudgetOffset = kParameterSizeOffset + kIntSize;
static const int kHeaderSize = kInterruptBudgetOffset + kIntSize;
// TODO(4764): The OSR nesting level is guaranteed to be in [0;6] bounds and
// could potentially be merged with another field (e.g. parameter_size).
static const int kOSRNestingLevelOffset = kInterruptBudgetOffset + kIntSize;
static const int kHeaderSize = kOSRNestingLevelOffset + kIntSize;
// Maximal memory consumption for a single BytecodeArray.
static const int kMaxSize = 512 * MB;
......@@ -5358,10 +5365,6 @@ class Code: public HeapObject {
static Handle<WeakCell> WeakCellFor(Handle<Code> code);
WeakCell* CachedWeakCell();
// Max loop nesting marker used to postpose OSR. We don't take loop
// nesting that is deeper than 5 levels into account.
static const int kMaxLoopNestingMarker = 6;
static const int kConstantPoolSize =
FLAG_enable_embedded_constant_pool ? kIntSize : 0;
......@@ -5465,7 +5468,6 @@ class Code: public HeapObject {
kIsCrankshaftedBit + 1, 27> {}; // NOLINT
class AllowOSRAtLoopNestingLevelField: public BitField<int,
kIsCrankshaftedBit + 1 + 27, 4> {}; // NOLINT
STATIC_ASSERT(AllowOSRAtLoopNestingLevelField::kMax >= kMaxLoopNestingMarker);
static const int kArgumentsBits = 16;
static const int kMaxArguments = (1 << kArgumentsBits) - 1;
......@@ -5537,6 +5539,12 @@ class AbstractCode : public HeapObject {
DECLARE_CAST(AbstractCode)
inline Code* GetCode();
inline BytecodeArray* GetBytecodeArray();
// Max loop nesting marker used to postpose OSR. We don't take loop
// nesting that is deeper than 5 levels into account.
static const int kMaxLoopNestingMarker = 6;
STATIC_ASSERT(Code::AllowOSRAtLoopNestingLevelField::kMax >=
kMaxLoopNestingMarker);
};
// Dependent code is a singly linked list of fixed arrays. Each array contains
......
......@@ -138,17 +138,29 @@ void RuntimeProfiler::AttemptOnStackReplacement(JSFunction* function,
// arguments accesses, which is unsound. Don't try OSR.
if (shared->uses_arguments()) return;
// We're using on-stack replacement: patch the unoptimized code so that
// any back edge in any unoptimized frame will trigger on-stack
// We're using on-stack replacement: modify unoptimized code so that
// certain back edges in any unoptimized frame will trigger on-stack
// replacement for that frame.
// - Ignition: Store new loop nesting level in BytecodeArray header.
// - FullCodegen: Patch back edges up to new level using BackEdgeTable.
if (FLAG_trace_osr) {
PrintF("[OSR - patching back edges in ");
PrintF("[OSR - arming back edges in ");
function->PrintName();
PrintF("]\n");
}
for (int i = 0; i < loop_nesting_levels; i++) {
BackEdgeTable::Patch(isolate_, shared->code());
if (shared->code()->kind() == Code::FUNCTION) {
DCHECK(BackEdgeTable::Verify(shared->GetIsolate(), shared->code()));
for (int i = 0; i < loop_nesting_levels; i++) {
BackEdgeTable::Patch(isolate_, shared->code());
}
} else if (shared->HasBytecodeArray()) {
DCHECK(FLAG_ignition_osr); // Should only happen when enabled.
int level = shared->bytecode_array()->osr_loop_nesting_level();
shared->bytecode_array()->set_osr_loop_nesting_level(
Min(level + loop_nesting_levels, AbstractCode::kMaxLoopNestingMarker));
} else {
UNREACHABLE();
}
}
......@@ -161,7 +173,7 @@ void RuntimeProfiler::MaybeOptimizeFullCodegen(JSFunction* function,
if (function->IsInOptimizationQueue()) return;
if (FLAG_always_osr) {
AttemptOnStackReplacement(function, Code::kMaxLoopNestingMarker);
AttemptOnStackReplacement(function, AbstractCode::kMaxLoopNestingMarker);
// Fall through and do a normal optimized compile as well.
} else if (!frame_optimized &&
(function->IsMarkedForOptimization() ||
......
......@@ -175,20 +175,19 @@ RUNTIME_FUNCTION(Runtime_OptimizeOsr) {
return isolate->heap()->undefined_value();
}
// If function is interpreted, just return. OSR is not supported.
// TODO(4764): Remove this check when OSR is enabled in the interpreter.
if (function->shared()->HasBytecodeArray()) {
// If function is interpreted but OSR hasn't been enabled, just return.
if (function->shared()->HasBytecodeArray() && !FLAG_ignition_osr) {
return isolate->heap()->undefined_value();
}
// If the function is already optimized, just return.
if (function->IsOptimized()) return isolate->heap()->undefined_value();
Code* unoptimized = function->shared()->code();
if (unoptimized->kind() == Code::FUNCTION) {
DCHECK(BackEdgeTable::Verify(isolate, unoptimized));
// Make the profiler arm all back edges in unoptimized code.
if (function->shared()->HasBytecodeArray() ||
function->shared()->code()->kind() == Code::FUNCTION) {
isolate->runtime_profiler()->AttemptOnStackReplacement(
*function, Code::kMaxLoopNestingMarker);
*function, AbstractCode::kMaxLoopNestingMarker);
}
return isolate->heap()->undefined_value();
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment