Commit 51b99213 authored by Jakob Gruber's avatar Jakob Gruber Committed by V8 LUCI CQ

[osr] Add an install-by-offset mechanism

.. for concurrent OSR. There, the challenge is to hit the correct
JumpLoop bytecode once compilation completes, since execution has
moved on in the meantime.

This CL adds a new mechanism to request installation at a specific
bytecode offset. We add a new `osr_install_target` field to the
BytecodeArray:

  bitfield struct OSRUrgencyAndInstallTarget extends uint16 {
    osr_urgency: uint32: 3 bit;
    osr_install_target: uint32: 13 bit;
  }

  // [...]
  osr_urgency_and_install_target: OSRUrgencyAndInstallTarget;
  bytecode_age: uint16;  // Only 3 bits used.
  // [...]

Note urgency and install target are packed into one 16 bit field,
we can thus merge both checks into one comparison within JumpLoop.
Note also that these fields are adjacent to the bytecode age; we
still reset both OSR state and age with a single (now 32-bit)
store.

The install target is the lowest 13 bits of the bytecode offset.
When set, every reached JumpLoop will check `is this my offset?`,
and if yes, jump into runtime to tier up.

Drive-by: Rename BaselineAssembler::LoadByteField to LoadWord8Field.

Bug: v8:12161
Change-Id: I275d468b19df3a4816392a2fec0713a8d211ef80
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3571812Reviewed-by: 's avatarLeszek Swirski <leszeks@chromium.org>
Commit-Queue: Jakob Linke <jgruber@chromium.org>
Cr-Commit-Position: refs/heads/main@{#79853}
parent a90f1748
......@@ -102,18 +102,28 @@ void BaselineAssembler::JumpTarget() {
void BaselineAssembler::Jump(Label* target, Label::Distance distance) {
__ b(target);
}
void BaselineAssembler::JumpIfRoot(Register value, RootIndex index,
Label* target, Label::Distance) {
__ JumpIfRoot(value, index, target);
}
void BaselineAssembler::JumpIfNotRoot(Register value, RootIndex index,
Label* target, Label::Distance) {
__ JumpIfNotRoot(value, index, target);
}
void BaselineAssembler::JumpIfSmi(Register value, Label* target,
Label::Distance) {
__ JumpIfSmi(value, target);
}
void BaselineAssembler::JumpIfImmediate(Condition cc, Register left, int right,
Label* target,
Label::Distance distance) {
JumpIf(cc, left, Operand(right), target, distance);
}
void BaselineAssembler::JumpIfNotSmi(Register value, Label* target,
Label::Distance) {
__ JumpIfNotSmi(value, target);
......@@ -351,18 +361,27 @@ void BaselineAssembler::LoadTaggedPointerField(Register output, Register source,
int offset) {
__ ldr(output, FieldMemOperand(source, offset));
}
void BaselineAssembler::LoadTaggedSignedField(Register output, Register source,
int offset) {
__ ldr(output, FieldMemOperand(source, offset));
}
void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
int offset) {
__ ldr(output, FieldMemOperand(source, offset));
}
void BaselineAssembler::LoadByteField(Register output, Register source,
void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
Register source, int offset) {
__ ldrh(output, FieldMemOperand(source, offset));
}
void BaselineAssembler::LoadWord8Field(Register output, Register source,
int offset) {
__ ldrb(output, FieldMemOperand(source, offset));
}
void BaselineAssembler::StoreTaggedSignedField(Register target, int offset,
Smi value) {
ASM_CODE_COMMENT(masm_);
......@@ -371,6 +390,7 @@ void BaselineAssembler::StoreTaggedSignedField(Register target, int offset,
__ mov(tmp, Operand(value));
__ str(tmp, FieldMemOperand(target, offset));
}
void BaselineAssembler::StoreTaggedFieldWithWriteBarrier(Register target,
int offset,
Register value) {
......@@ -380,6 +400,7 @@ void BaselineAssembler::StoreTaggedFieldWithWriteBarrier(Register target,
__ RecordWriteField(target, offset, value, kLRHasNotBeenSaved,
SaveFPRegsMode::kIgnore);
}
void BaselineAssembler::StoreTaggedFieldNoWriteBarrier(Register target,
int offset,
Register value) {
......@@ -432,6 +453,10 @@ void BaselineAssembler::AddSmi(Register lhs, Smi rhs) {
__ add(lhs, lhs, Operand(rhs));
}
void BaselineAssembler::Word32And(Register output, Register lhs, int rhs) {
__ and_(output, lhs, Operand(rhs));
}
void BaselineAssembler::Switch(Register reg, int case_value_base,
Label** labels, int num_labels) {
ASM_CODE_COMMENT(masm_);
......
......@@ -100,23 +100,33 @@ void BaselineAssembler::JumpTarget() { __ JumpTarget(); }
void BaselineAssembler::Jump(Label* target, Label::Distance distance) {
__ B(target);
}
void BaselineAssembler::JumpIfRoot(Register value, RootIndex index,
Label* target, Label::Distance) {
__ JumpIfRoot(value, index, target);
}
void BaselineAssembler::JumpIfNotRoot(Register value, RootIndex index,
Label* target, Label::Distance) {
__ JumpIfNotRoot(value, index, target);
}
void BaselineAssembler::JumpIfSmi(Register value, Label* target,
Label::Distance) {
__ JumpIfSmi(value, target);
}
void BaselineAssembler::JumpIfNotSmi(Register value, Label* target,
Label::Distance) {
__ JumpIfNotSmi(value, target);
}
void BaselineAssembler::JumpIfImmediate(Condition cc, Register left, int right,
Label* target,
Label::Distance distance) {
JumpIf(cc, left, Immediate(right), target, distance);
}
void BaselineAssembler::CallBuiltin(Builtin builtin) {
if (masm()->options().short_builtin_calls) {
// Generate pc-relative call.
......@@ -424,18 +434,27 @@ void BaselineAssembler::LoadTaggedPointerField(Register output, Register source,
int offset) {
__ LoadTaggedPointerField(output, FieldMemOperand(source, offset));
}
void BaselineAssembler::LoadTaggedSignedField(Register output, Register source,
int offset) {
__ LoadTaggedSignedField(output, FieldMemOperand(source, offset));
}
void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
int offset) {
__ LoadAnyTaggedField(output, FieldMemOperand(source, offset));
}
void BaselineAssembler::LoadByteField(Register output, Register source,
void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
Register source, int offset) {
__ Ldrh(output, FieldMemOperand(source, offset));
}
void BaselineAssembler::LoadWord8Field(Register output, Register source,
int offset) {
__ Ldrb(output, FieldMemOperand(source, offset));
}
void BaselineAssembler::StoreTaggedSignedField(Register target, int offset,
Smi value) {
ASM_CODE_COMMENT(masm_);
......@@ -444,6 +463,7 @@ void BaselineAssembler::StoreTaggedSignedField(Register target, int offset,
__ Mov(tmp, Operand(value));
__ StoreTaggedField(tmp, FieldMemOperand(target, offset));
}
void BaselineAssembler::StoreTaggedFieldWithWriteBarrier(Register target,
int offset,
Register value) {
......@@ -452,6 +472,7 @@ void BaselineAssembler::StoreTaggedFieldWithWriteBarrier(Register target,
__ RecordWriteField(target, offset, value, kLRHasNotBeenSaved,
SaveFPRegsMode::kIgnore);
}
void BaselineAssembler::StoreTaggedFieldNoWriteBarrier(Register target,
int offset,
Register value) {
......@@ -509,6 +530,10 @@ void BaselineAssembler::AddSmi(Register lhs, Smi rhs) {
}
}
void BaselineAssembler::Word32And(Register output, Register lhs, int rhs) {
__ And(output, lhs, Immediate(rhs));
}
void BaselineAssembler::Switch(Register reg, int case_value_base,
Label** labels, int num_labels) {
ASM_CODE_COMMENT(masm_);
......
......@@ -79,6 +79,9 @@ class BaselineAssembler {
Label::Distance distance = Label::kFar);
inline void JumpIfSmi(Condition cc, Register lhs, Register rhs, Label* target,
Label::Distance distance = Label::kFar);
inline void JumpIfImmediate(Condition cc, Register left, int right,
Label* target,
Label::Distance distance = Label::kFar);
inline void JumpIfTagged(Condition cc, Register value, MemOperand operand,
Label* target,
Label::Distance distance = Label::kFar);
......@@ -152,7 +155,9 @@ class BaselineAssembler {
inline void LoadTaggedSignedField(Register output, Register source,
int offset);
inline void LoadTaggedAnyField(Register output, Register source, int offset);
inline void LoadByteField(Register output, Register source, int offset);
inline void LoadWord16FieldZeroExtend(Register output, Register source,
int offset);
inline void LoadWord8Field(Register output, Register source, int offset);
inline void StoreTaggedSignedField(Register target, int offset, Smi value);
inline void StoreTaggedFieldWithWriteBarrier(Register target, int offset,
Register value);
......@@ -173,6 +178,8 @@ class BaselineAssembler {
inline void SmiUntag(Register value);
inline void SmiUntag(Register output, Register value);
inline void Word32And(Register output, Register lhs, int rhs);
inline void Switch(Register reg, int case_value_base, Label** labels,
int num_labels);
......
......@@ -1538,7 +1538,7 @@ void BaselineCompiler::VisitTestUndetectable() {
Register map_bit_field = kInterpreterAccumulatorRegister;
__ LoadMap(map_bit_field, kInterpreterAccumulatorRegister);
__ LoadByteField(map_bit_field, map_bit_field, Map::kBitFieldOffset);
__ LoadWord8Field(map_bit_field, map_bit_field, Map::kBitFieldOffset);
__ TestAndBranch(map_bit_field, Map::Bits1::IsUndetectableBit::kMask,
Condition::kZero, &not_undetectable, Label::kNear);
......@@ -1665,7 +1665,7 @@ void BaselineCompiler::VisitTestTypeOf() {
// All other undetectable maps are typeof undefined.
Register map_bit_field = kInterpreterAccumulatorRegister;
__ LoadMap(map_bit_field, kInterpreterAccumulatorRegister);
__ LoadByteField(map_bit_field, map_bit_field, Map::kBitFieldOffset);
__ LoadWord8Field(map_bit_field, map_bit_field, Map::kBitFieldOffset);
__ TestAndBranch(map_bit_field, Map::Bits1::IsUndetectableBit::kMask,
Condition::kZero, &not_undetectable, Label::kNear);
......@@ -1685,7 +1685,7 @@ void BaselineCompiler::VisitTestTypeOf() {
// Check if the map is callable but not undetectable.
Register map_bit_field = kInterpreterAccumulatorRegister;
__ LoadMap(map_bit_field, kInterpreterAccumulatorRegister);
__ LoadByteField(map_bit_field, map_bit_field, Map::kBitFieldOffset);
__ LoadWord8Field(map_bit_field, map_bit_field, Map::kBitFieldOffset);
__ TestAndBranch(map_bit_field, Map::Bits1::IsCallableBit::kMask,
Condition::kZero, &not_callable, Label::kNear);
__ TestAndBranch(map_bit_field, Map::Bits1::IsUndetectableBit::kMask,
......@@ -1717,7 +1717,7 @@ void BaselineCompiler::VisitTestTypeOf() {
// If the map is undetectable or callable, return false.
Register map_bit_field = kInterpreterAccumulatorRegister;
__ LoadByteField(map_bit_field, map, Map::kBitFieldOffset);
__ LoadWord8Field(map_bit_field, map, Map::kBitFieldOffset);
__ TestAndBranch(map_bit_field,
Map::Bits1::IsUndetectableBit::kMask |
Map::Bits1::IsCallableBit::kMask,
......@@ -1925,21 +1925,50 @@ void BaselineCompiler::VisitCreateRestParameter() {
}
void BaselineCompiler::VisitJumpLoop() {
BaselineAssembler::ScratchRegisterScope scope(&basm_);
Register scratch = scope.AcquireScratch();
Label osr_not_armed;
Label osr_not_armed, osr;
{
BaselineAssembler::ScratchRegisterScope scope(&basm_);
Register osr_urgency_and_install_target = scope.AcquireScratch();
ASM_CODE_COMMENT_STRING(&masm_, "OSR Check Armed");
Register osr_urgency = scratch;
__ LoadRegister(osr_urgency, interpreter::Register::bytecode_array());
__ LoadByteField(osr_urgency, osr_urgency,
BytecodeArray::kOsrUrgencyOffset);
__ LoadRegister(osr_urgency_and_install_target,
interpreter::Register::bytecode_array());
__ LoadWord16FieldZeroExtend(
osr_urgency_and_install_target, osr_urgency_and_install_target,
BytecodeArray::kOsrUrgencyAndInstallTargetOffset);
int loop_depth = iterator().GetImmediateOperand(1);
__ JumpIfByte(Condition::kUnsignedLessThanEqual, osr_urgency, loop_depth,
&osr_not_armed);
CallBuiltin<Builtin::kBaselineOnStackReplacement>();
__ JumpIfImmediate(Condition::kUnsignedLessThanEqual,
osr_urgency_and_install_target, loop_depth,
&osr_not_armed, Label::kNear);
// TODO(jgruber): Move the extended checks into the
// BaselineOnStackReplacement builtin.
// OSR based on urgency, i.e. is the OSR urgency greater than the current
// loop depth?
STATIC_ASSERT(BytecodeArray::OsrUrgencyBits::kShift == 0);
Register scratch2 = scope.AcquireScratch();
__ Word32And(scratch2, osr_urgency_and_install_target,
BytecodeArray::OsrUrgencyBits::kMask);
__ JumpIfImmediate(Condition::kUnsignedGreaterThan, scratch2, loop_depth,
&osr, Label::kNear);
// OSR based on the install target offset, i.e. does the current bytecode
// offset match the install target offset?
static constexpr int kShift = BytecodeArray::OsrInstallTargetBits::kShift;
static constexpr int kMask = BytecodeArray::OsrInstallTargetBits::kMask;
const int encoded_current_offset =
BytecodeArray::OsrInstallTargetFor(
BytecodeOffset{iterator().current_offset()})
<< kShift;
__ Word32And(scratch2, osr_urgency_and_install_target, kMask);
__ JumpIfImmediate(Condition::kNotEqual, scratch2, encoded_current_offset,
&osr_not_armed, Label::kNear);
}
__ Bind(&osr);
CallBuiltin<Builtin::kBaselineOnStackReplacement>();
__ Bind(&osr_not_armed);
Label* label = &labels_[iterator().GetJumpTargetOffset()]->unlinked;
int weight = iterator().GetRelativeJumpTargetOffset() -
......@@ -2184,7 +2213,7 @@ void BaselineCompiler::VisitThrowIfNotSuperConstructor() {
LoadRegister(reg, 0);
Register map_bit_field = scratch_scope.AcquireScratch();
__ LoadMap(map_bit_field, reg);
__ LoadByteField(map_bit_field, map_bit_field, Map::kBitFieldOffset);
__ LoadWord8Field(map_bit_field, map_bit_field, Map::kBitFieldOffset);
__ TestAndBranch(map_bit_field, Map::Bits1::IsConstructorBit::kMask,
Condition::kNotZero, &done, Label::kNear);
......
......@@ -103,18 +103,29 @@ void BaselineAssembler::JumpTarget() {
void BaselineAssembler::Jump(Label* target, Label::Distance distance) {
__ jmp(target, distance);
}
void BaselineAssembler::JumpIfRoot(Register value, RootIndex index,
Label* target, Label::Distance distance) {
__ JumpIfRoot(value, index, target, distance);
}
void BaselineAssembler::JumpIfNotRoot(Register value, RootIndex index,
Label* target, Label::Distance distance) {
__ JumpIfNotRoot(value, index, target, distance);
}
void BaselineAssembler::JumpIfSmi(Register value, Label* target,
Label::Distance distance) {
__ JumpIfSmi(value, target, distance);
}
void BaselineAssembler::JumpIfImmediate(Condition cc, Register left, int right,
Label* target,
Label::Distance distance) {
__ cmp(left, Immediate(right));
__ j(AsMasmCondition(cc), target, distance);
}
void BaselineAssembler::JumpIfNotSmi(Register value, Label* target,
Label::Distance distance) {
__ JumpIfNotSmi(value, target, distance);
......@@ -323,22 +334,32 @@ void BaselineAssembler::LoadTaggedPointerField(Register output, Register source,
int offset) {
__ mov(output, FieldOperand(source, offset));
}
void BaselineAssembler::LoadTaggedSignedField(Register output, Register source,
int offset) {
__ mov(output, FieldOperand(source, offset));
}
void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
int offset) {
__ mov(output, FieldOperand(source, offset));
}
void BaselineAssembler::LoadByteField(Register output, Register source,
void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
Register source, int offset) {
__ movzx_w(output, FieldOperand(source, offset));
}
void BaselineAssembler::LoadWord8Field(Register output, Register source,
int offset) {
__ mov_b(output, FieldOperand(source, offset));
}
void BaselineAssembler::StoreTaggedSignedField(Register target, int offset,
Smi value) {
__ mov(FieldOperand(target, offset), Immediate(value));
}
void BaselineAssembler::StoreTaggedFieldWithWriteBarrier(Register target,
int offset,
Register value) {
......@@ -349,6 +370,7 @@ void BaselineAssembler::StoreTaggedFieldWithWriteBarrier(Register target,
__ mov(FieldOperand(target, offset), value);
__ RecordWriteField(target, offset, value, scratch, SaveFPRegsMode::kIgnore);
}
void BaselineAssembler::StoreTaggedFieldNoWriteBarrier(Register target,
int offset,
Register value) {
......@@ -391,6 +413,11 @@ void BaselineAssembler::AddSmi(Register lhs, Smi rhs) {
__ add(lhs, Immediate(rhs));
}
void BaselineAssembler::Word32And(Register output, Register lhs, int rhs) {
Move(output, lhs);
__ and_(output, Immediate(rhs));
}
void BaselineAssembler::Switch(Register reg, int case_value_base,
Label** labels, int num_labels) {
ASM_CODE_COMMENT(masm_);
......
......@@ -346,7 +346,7 @@ void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
int offset) {
__ Ld_d(output, FieldMemOperand(source, offset));
}
void BaselineAssembler::LoadByteField(Register output, Register source,
void BaselineAssembler::LoadWord8Field(Register output, Register source,
int offset) {
__ Ld_b(output, FieldMemOperand(source, offset));
}
......
......@@ -356,7 +356,7 @@ void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
int offset) {
__ Lw(output, FieldMemOperand(source, offset));
}
void BaselineAssembler::LoadByteField(Register output, Register source,
void BaselineAssembler::LoadWord8Field(Register output, Register source,
int offset) {
__ lb(output, FieldMemOperand(source, offset));
}
......
......@@ -354,7 +354,7 @@ void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
int offset) {
__ Ld(output, FieldMemOperand(source, offset));
}
void BaselineAssembler::LoadByteField(Register output, Register source,
void BaselineAssembler::LoadWord8Field(Register output, Register source,
int offset) {
__ Lb(output, FieldMemOperand(source, offset));
}
......
......@@ -455,7 +455,7 @@ void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
int offset) {
UNIMPLEMENTED();
}
void BaselineAssembler::LoadByteField(Register output, Register source,
void BaselineAssembler::LoadWord8Field(Register output, Register source,
int offset) {
UNIMPLEMENTED();
}
......
......@@ -347,7 +347,7 @@ void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
int offset) {
__ LoadAnyTaggedField(output, FieldMemOperand(source, offset));
}
void BaselineAssembler::LoadByteField(Register output, Register source,
void BaselineAssembler::LoadWord8Field(Register output, Register source,
int offset) {
__ Lb(output, FieldMemOperand(source, offset));
}
......
......@@ -439,7 +439,7 @@ void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
int offset) {
__ LoadAnyTaggedField(output, FieldMemOperand(source, offset), r0);
}
void BaselineAssembler::LoadByteField(Register output, Register source,
void BaselineAssembler::LoadWord8Field(Register output, Register source,
int offset) {
__ LoadU8(output, FieldMemOperand(source, offset));
}
......
......@@ -195,6 +195,14 @@ void BaselineAssembler::JumpIfSmi(Condition cc, Register lhs, Register rhs,
__ SmiCompare(lhs, rhs);
__ j(AsMasmCondition(cc), target, distance);
}
void BaselineAssembler::JumpIfImmediate(Condition cc, Register left, int right,
Label* target,
Label::Distance distance) {
__ cmpq(left, Immediate(right));
__ j(AsMasmCondition(cc), target, distance);
}
// cmp_tagged
void BaselineAssembler::JumpIfTagged(Condition cc, Register value,
MemOperand operand, Label* target,
......@@ -338,7 +346,11 @@ void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
int offset) {
__ LoadAnyTaggedField(output, FieldOperand(source, offset));
}
void BaselineAssembler::LoadByteField(Register output, Register source,
void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
Register source, int offset) {
__ movzxwq(output, FieldOperand(source, offset));
}
void BaselineAssembler::LoadWord8Field(Register output, Register source,
int offset) {
__ movb(output, FieldOperand(source, offset));
}
......@@ -402,6 +414,11 @@ void BaselineAssembler::AddSmi(Register lhs, Smi rhs) {
}
}
void BaselineAssembler::Word32And(Register output, Register lhs, int rhs) {
Move(output, lhs);
__ andq(output, Immediate(rhs));
}
void BaselineAssembler::Switch(Register reg, int case_value_base,
Label** labels, int num_labels) {
ASM_CODE_COMMENT(masm_);
......
......@@ -1065,6 +1065,22 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
TailCallOptimizedCodeSlot(masm, optimized_code_entry, r6);
}
namespace {
void ResetBytecodeAgeAndOsrState(MacroAssembler* masm, Register bytecode_array,
Register scratch) {
// Reset the bytecode age and OSR state (optimized to a single write).
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
BytecodeArray::kOsrUrgencyAndInstallTargetOffset + kUInt16Size);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ mov(scratch, Operand(0));
__ str(scratch,
FieldMemOperand(bytecode_array,
BytecodeArray::kOsrUrgencyAndInstallTargetOffset));
}
} // namespace
// static
void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
UseScratchRegisterScope temps(masm);
......@@ -1135,21 +1151,10 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
// the frame, so load it into a register.
Register bytecodeArray = descriptor.GetRegisterParameter(
BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray);
// Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset
// are 8-bit fields next to each other, so we could just optimize by writing
// a 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
BytecodeArray::kOsrUrgencyOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
{
UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire();
__ mov(scratch, Operand(0));
__ strh(scratch,
FieldMemOperand(bytecodeArray, BytecodeArray::kOsrUrgencyOffset));
ResetBytecodeAgeAndOsrState(masm, bytecodeArray, temps.Acquire());
}
__ Push(argc, bytecodeArray);
// Baseline code frames store the feedback vector where interpreter would
......@@ -1289,15 +1294,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
FrameScope frame_scope(masm, StackFrame::MANUAL);
__ PushStandardFrame(closure);
// Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset are
// 8-bit fields next to each other, so we could just optimize by writing a
// 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
BytecodeArray::kOsrUrgencyOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ mov(r9, Operand(0));
__ strh(r9, FieldMemOperand(kInterpreterBytecodeArrayRegister,
BytecodeArray::kOsrUrgencyOffset));
ResetBytecodeAgeAndOsrState(masm, kInterpreterBytecodeArrayRegister, r9);
// Load the initial bytecode offset.
__ mov(kInterpreterBytecodeOffsetRegister,
......@@ -3659,14 +3656,11 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
__ Pop(kInterpreterAccumulatorRegister);
if (is_osr) {
// Reset the OSR loop nesting depth to disarm back edges.
// TODO(pthier): Separate baseline Sparkplug from TF arming and don't disarm
// Sparkplug here.
// TODO(pthier): Separate baseline Sparkplug from TF arming and don't
// disarm Sparkplug here.
UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire();
__ mov(scratch, Operand(0));
__ strh(scratch, FieldMemOperand(kInterpreterBytecodeArrayRegister,
BytecodeArray::kOsrUrgencyOffset));
ResetBytecodeAgeAndOsrState(masm, kInterpreterBytecodeArrayRegister,
temps.Acquire());
Generate_OSREntry(masm, code_obj,
Operand(Code::kHeaderSize - kHeapObjectTag));
} else {
......
......@@ -1248,6 +1248,21 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
TailCallOptimizedCodeSlot(masm, optimized_code_entry, x4);
}
namespace {
void ResetBytecodeAgeAndOsrState(MacroAssembler* masm,
Register bytecode_array) {
// Reset the bytecode age and OSR state (optimized to a single write).
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
BytecodeArray::kOsrUrgencyAndInstallTargetOffset + kUInt16Size);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ Str(wzr,
FieldMemOperand(bytecode_array,
BytecodeArray::kOsrUrgencyAndInstallTargetOffset));
}
} // namespace
// static
void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
UseScratchRegisterScope temps(masm);
......@@ -1310,16 +1325,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
// the frame, so load it into a register.
Register bytecode_array = descriptor.GetRegisterParameter(
BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray);
// Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset
// are 8-bit fields next to each other, so we could just optimize by writing
// a 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
BytecodeArray::kOsrUrgencyOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ Strh(wzr,
FieldMemOperand(bytecode_array, BytecodeArray::kOsrUrgencyOffset));
ResetBytecodeAgeAndOsrState(masm, bytecode_array);
__ Push(argc, bytecode_array);
// Baseline code frames store the feedback vector where interpreter would
......@@ -1469,15 +1475,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ mov(fp, sp);
__ Push(cp, closure);
// Reset code age.
// Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset are
// 8-bit fields next to each other, so we could just optimize by writing a
// 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
BytecodeArray::kOsrUrgencyOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ Strh(wzr, FieldMemOperand(kInterpreterBytecodeArrayRegister,
BytecodeArray::kOsrUrgencyOffset));
ResetBytecodeAgeAndOsrState(masm, kInterpreterBytecodeArrayRegister);
// Load the initial bytecode offset.
__ Mov(kInterpreterBytecodeOffsetRegister,
......@@ -4182,11 +4180,9 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
__ Pop(kInterpreterAccumulatorRegister, padreg);
if (is_osr) {
// Reset the OSR loop nesting depth to disarm back edges.
// TODO(pthier): Separate baseline Sparkplug from TF arming and don't disarm
// Sparkplug here.
__ Strh(wzr, FieldMemOperand(kInterpreterBytecodeArrayRegister,
BytecodeArray::kOsrUrgencyOffset));
// TODO(pthier): Separate baseline Sparkplug from TF arming and don't
// disarm Sparkplug here.
ResetBytecodeAgeAndOsrState(masm, kInterpreterBytecodeArrayRegister);
Generate_OSREntry(masm, code_obj, Code::kHeaderSize - kHeapObjectTag);
} else {
__ Add(code_obj, code_obj, Code::kHeaderSize - kHeapObjectTag);
......
......@@ -1036,6 +1036,21 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
TailCallOptimizedCodeSlot(masm, optimized_code_entry);
}
namespace {
void ResetBytecodeAgeAndOsrState(MacroAssembler* masm,
Register bytecode_array) {
// Reset the bytecode age and OSR state (optimized to a single write).
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
BytecodeArray::kOsrUrgencyAndInstallTargetOffset + kUInt16Size);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ mov_w(FieldOperand(bytecode_array,
BytecodeArray::kOsrUrgencyAndInstallTargetOffset),
Immediate(0));
}
} // namespace
// Generate code for entering a JS function with the interpreter.
// On entry to the function the receiver and arguments have been pushed on the
// stack left to right.
......@@ -1124,15 +1139,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
}
// Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset are
// 8-bit fields next to each other, so we could just optimize by writing a
// 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
BytecodeArray::kOsrUrgencyOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ mov_w(FieldOperand(kInterpreterBytecodeArrayRegister,
BytecodeArray::kOsrUrgencyOffset),
Immediate(0));
ResetBytecodeAgeAndOsrState(masm, kInterpreterBytecodeArrayRegister);
// Push bytecode array.
__ push(kInterpreterBytecodeArrayRegister);
......@@ -1744,14 +1751,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
// the frame, so load it into a register.
Register bytecode_array = scratch;
__ movd(bytecode_array, saved_bytecode_array);
// Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset
// are 8-bit fields next to each other, so we could just optimize by writing
// a 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
BytecodeArray::kOsrUrgencyOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ mov_w(FieldOperand(bytecode_array, BytecodeArray::kOsrUrgencyOffset),
Immediate(0));
ResetBytecodeAgeAndOsrState(masm, bytecode_array);
__ Push(bytecode_array);
// Baseline code frames store the feedback vector where interpreter would
......@@ -4275,12 +4275,9 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
__ pop(kInterpreterAccumulatorRegister);
if (is_osr) {
// Reset the OSR loop nesting depth to disarm back edges.
// TODO(pthier): Separate baseline Sparkplug from TF arming and don't disarm
// Sparkplug here.
__ mov_w(FieldOperand(kInterpreterBytecodeArrayRegister,
BytecodeArray::kOsrUrgencyOffset),
Immediate(0));
// TODO(pthier): Separate baseline Sparkplug from TF arming and don't
// disarm Sparkplug here.
ResetBytecodeAgeAndOsrState(masm, kInterpreterBytecodeArrayRegister);
Generate_OSREntry(masm, code_obj);
} else {
__ jmp(code_obj);
......
......@@ -1144,6 +1144,21 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
jump_mode);
}
namespace {
void ResetBytecodeAgeAndOsrState(MacroAssembler* masm,
Register bytecode_array) {
// Reset the bytecode age and OSR state (optimized to a single write).
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
BytecodeArray::kOsrUrgencyAndInstallTargetOffset + kUInt16Size);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ movl(FieldOperand(bytecode_array,
BytecodeArray::kOsrUrgencyAndInstallTargetOffset),
Immediate(0));
}
} // namespace
// Generate code for entering a JS function with the interpreter.
// On entry to the function the receiver and arguments have been pushed on the
// stack left to right.
......@@ -1219,15 +1234,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Push(kJavaScriptCallTargetRegister); // Callee's JS function.
__ Push(kJavaScriptCallArgCountRegister); // Actual argument count.
// Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset are
// 8-bit fields next to each other, so we could just optimize by writing a
// 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
BytecodeArray::kOsrUrgencyOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ movw(FieldOperand(kInterpreterBytecodeArrayRegister,
BytecodeArray::kOsrUrgencyOffset),
Immediate(0));
ResetBytecodeAgeAndOsrState(masm, kInterpreterBytecodeArrayRegister);
// Load initial bytecode offset.
__ Move(kInterpreterBytecodeOffsetRegister,
......@@ -1733,15 +1740,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
// onto the frame, so load it into a register.
Register bytecode_array = descriptor.GetRegisterParameter(
BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray);
// Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset
// are 8-bit fields next to each other, so we could just optimize by
// writing a 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
BytecodeArray::kOsrUrgencyOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ movw(FieldOperand(bytecode_array, BytecodeArray::kOsrUrgencyOffset),
Immediate(0));
ResetBytecodeAgeAndOsrState(masm, bytecode_array);
__ Push(bytecode_array);
// Baseline code frames store the feedback vector where interpreter would
......@@ -2726,21 +2725,26 @@ void Generate_OSREntry(MacroAssembler* masm, Register entry_address) {
__ ret(0);
}
void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
enum class OsrSourceTier {
kInterpreter,
kBaseline,
};
void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ CallRuntime(Runtime::kCompileOptimizedOSR);
}
Label skip;
Label jump_to_returned_code;
// If the code object is null, just return to the caller.
__ testq(rax, rax);
__ j(not_equal, &skip, Label::kNear);
__ j(not_equal, &jump_to_returned_code, Label::kNear);
__ ret(0);
__ bind(&skip);
__ bind(&jump_to_returned_code);
if (is_interpreter) {
if (source == OsrSourceTier::kInterpreter) {
// Drop the handler frame that is be sitting on top of the actual
// JavaScript frame. This is the case then OSR is triggered from bytecode.
__ leave();
......@@ -2768,13 +2772,13 @@ void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
} // namespace
void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
return OnStackReplacement(masm, true);
OnStackReplacement(masm, OsrSourceTier::kInterpreter);
}
void Builtins::Generate_BaselineOnStackReplacement(MacroAssembler* masm) {
__ movq(kContextRegister,
MemOperand(rbp, BaselineFrameConstants::kContextOffset));
return OnStackReplacement(masm, false);
OnStackReplacement(masm, OsrSourceTier::kBaseline);
}
#if V8_ENABLE_WEBASSEMBLY
......@@ -5124,12 +5128,8 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
__ popq(kInterpreterAccumulatorRegister);
if (is_osr) {
// Reset the OSR loop nesting depth to disarm back edges.
// TODO(pthier): Separate baseline Sparkplug from TF arming and don't disarm
// Sparkplug here.
__ movw(FieldOperand(kInterpreterBytecodeArrayRegister,
BytecodeArray::kOsrUrgencyOffset),
Immediate(0));
// TODO(pthier): Separate Sparkplug and Turbofan OSR states.
ResetBytecodeAgeAndOsrState(masm, kInterpreterBytecodeArrayRegister);
Generate_OSREntry(masm, code_obj);
} else {
__ jmp(code_obj);
......
......@@ -3363,7 +3363,7 @@ MaybeHandle<CodeT> Compiler::CompileOptimizedOSR(Isolate* isolate,
// -- Alright, decided to proceed. --
// Disarm all back edges, i.e. reset the OSR urgency.
// Disarm all back edges, i.e. reset the OSR urgency and install target.
//
// Note that the bytecode array active on the stack might be different from
// the one installed on the function (e.g. patched by debugger). This however
......@@ -3371,7 +3371,7 @@ MaybeHandle<CodeT> Compiler::CompileOptimizedOSR(Isolate* isolate,
// BytecodeOffset representing the entry point will be valid for any copy of
// the bytecode.
Handle<BytecodeArray> bytecode(frame->GetBytecodeArray(), isolate);
bytecode->reset_osr_urgency();
bytecode->reset_osr_urgency_and_install_target();
CompilerTracer::TraceOptimizeOSR(isolate, function, osr_offset, mode);
MaybeHandle<CodeT> result = GetOrCompileOptimized(
......@@ -3435,9 +3435,12 @@ bool Compiler::FinalizeTurbofanCompilationJob(TurbofanCompilationJob* job,
OptimizedCodeCache::Insert(compilation_info);
CompilerTracer::TraceCompletedJob(isolate, compilation_info);
if (IsOSR(osr_offset)) {
// TODO(jgruber): Implement a targeted install request for the
// specific osr_offset.
shared->GetBytecodeArray(isolate).RequestOsrAtNextOpportunity();
if (FLAG_trace_osr) {
PrintF(CodeTracer::Scope{isolate->GetCodeTracer()}.file(),
"[OSR - requesting install. function: %s, osr offset: %d]\n",
function->DebugNameCStr().get(), osr_offset.ToInt());
}
shared->GetBytecodeArray(isolate).set_osr_install_target(osr_offset);
} else {
function->set_code(*compilation_info->code(), kReleaseStore);
}
......
......@@ -1086,6 +1086,20 @@ class V8_EXPORT_PRIVATE CodeAssembler {
TNode<Word32T> Word32Shr(TNode<Word32T> value, int shift);
TNode<Word32T> Word32Sar(TNode<Word32T> value, int shift);
// Convenience overloads.
TNode<Int32T> Int32Sub(TNode<Int32T> left, int right) {
return Int32Sub(left, Int32Constant(right));
}
TNode<Word32T> Word32And(TNode<Word32T> left, int right) {
return Word32And(left, Int32Constant(right));
}
TNode<Int32T> Word32Shl(TNode<Int32T> left, int right) {
return Word32Shl(left, Int32Constant(right));
}
TNode<BoolT> Word32Equal(TNode<Word32T> left, int right) {
return Word32Equal(left, Int32Constant(right));
}
// Unary
#define DECLARE_CODE_ASSEMBLER_UNARY_OP(name, ResType, ArgType) \
TNode<ResType> name(TNode<ArgType> a);
......
......@@ -247,7 +247,7 @@ Handle<BytecodeArray> FactoryBase<Impl>::NewBytecodeArray(
instance.set_parameter_count(parameter_count);
instance.set_incoming_new_target_or_generator_register(
interpreter::Register::invalid_value());
instance.reset_osr_urgency();
instance.reset_osr_urgency_and_install_target();
instance.set_bytecode_age(BytecodeArray::kNoAgeBytecodeAge);
instance.set_constant_pool(*constant_pool);
instance.set_handler_table(read_only_roots().empty_byte_array(),
......
......@@ -1307,16 +1307,18 @@ void InterpreterAssembler::UpdateInterruptBudgetOnReturn() {
// length of the back-edge, so we just have to correct for the non-zero offset
// of the first bytecode.
const int kFirstBytecodeOffset = BytecodeArray::kHeaderSize - kHeapObjectTag;
TNode<Int32T> profiling_weight =
Int32Sub(TruncateIntPtrToInt32(BytecodeOffset()),
Int32Constant(kFirstBytecodeOffset));
UpdateInterruptBudget(profiling_weight, true);
}
TNode<Int8T> InterpreterAssembler::LoadOsrUrgency() {
return LoadObjectField<Int8T>(BytecodeArrayTaggedPointer(),
BytecodeArray::kOsrUrgencyOffset);
TNode<Int16T> InterpreterAssembler::LoadOsrUrgencyAndInstallTarget() {
// We're loading a 16-bit field, mask it.
return UncheckedCast<Int16T>(Word32And(
LoadObjectField<Int16T>(BytecodeArrayTaggedPointer(),
BytecodeArray::kOsrUrgencyAndInstallTargetOffset),
0xFFFF));
}
void InterpreterAssembler::Abort(AbortReason abort_reason) {
......
......@@ -234,8 +234,8 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
// Updates the profiler interrupt budget for a return.
void UpdateInterruptBudgetOnReturn();
// Returns the OSR urgency from the bytecode header.
TNode<Int8T> LoadOsrUrgency();
// Returns the OSR urgency and install target from the bytecode header.
TNode<Int16T> LoadOsrUrgencyAndInstallTarget();
// Dispatch to the bytecode.
void Dispatch();
......@@ -266,6 +266,12 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
// Perform OnStackReplacement.
void OnStackReplacement(TNode<Context> context, TNode<IntPtrT> relative_jump);
// The BytecodeOffset() is the offset from the ByteCodeArray pointer; to
// translate into runtime `BytecodeOffset` (defined in utils.h as the offset
// from the start of the bytecode section), this constant has to be applied.
static constexpr int kFirstBytecodeOffset =
BytecodeArray::kHeaderSize - kHeapObjectTag;
// Returns the offset from the BytecodeArrayPointer of the current bytecode.
TNode<IntPtrT> BytecodeOffset();
......
......@@ -2166,26 +2166,50 @@ IGNITION_HANDLER(JumpIfJSReceiverConstant, InterpreterAssembler) {
// JumpLoop <imm> <loop_depth>
//
// Jump by the number of bytes represented by the immediate operand |imm|. Also
// performs a loop nesting check, a stack check, and potentially triggers OSR
// in case `loop_depth < osr_urgency`.
// performs a loop nesting check, a stack check, and potentially triggers OSR.
IGNITION_HANDLER(JumpLoop, InterpreterAssembler) {
TNode<IntPtrT> relative_jump = Signed(BytecodeOperandUImmWord(0));
TNode<Int32T> loop_depth = BytecodeOperandImm(1);
TNode<Int8T> osr_urgency = LoadOsrUrgency();
TNode<Int16T> osr_urgency_and_install_target =
LoadOsrUrgencyAndInstallTarget();
TNode<Context> context = GetContext();
// Check if OSR points at the given {loop_depth} are armed by comparing it to
// the current {osr_urgency} loaded from the header of the BytecodeArray.
Label ok(this), osr_armed(this, Label::kDeferred);
TNode<BoolT> condition = Int32GreaterThanOrEqual(loop_depth, osr_urgency);
Branch(condition, &ok, &osr_armed);
// OSR requests can be triggered either through urgency (when > the current
// loop depth), or an explicit install target (= the lower bits of the
// targeted bytecode offset).
Label ok(this), maybe_osr(this, Label::kDeferred);
Branch(Int32GreaterThanOrEqual(loop_depth, osr_urgency_and_install_target),
&ok, &maybe_osr);
BIND(&ok);
// The backward jump can trigger a budget interrupt, which can handle stack
// interrupts, so we don't need to explicitly handle them here.
JumpBackward(relative_jump);
BIND(&osr_armed);
BIND(&maybe_osr);
Label osr(this);
// OSR based on urgency, i.e. is the OSR urgency greater than the current
// loop depth?
STATIC_ASSERT(BytecodeArray::OsrUrgencyBits::kShift == 0);
TNode<Word32T> osr_urgency = Word32And(osr_urgency_and_install_target,
BytecodeArray::OsrUrgencyBits::kMask);
GotoIf(Int32GreaterThan(osr_urgency, loop_depth), &osr);
// OSR based on the install target offset, i.e. does the current bytecode
// offset match the install target offset?
//
// if (((offset << kShift) & kMask) == (target & kMask)) { ... }
static constexpr int kShift = BytecodeArray::OsrInstallTargetBits::kShift;
static constexpr int kMask = BytecodeArray::OsrInstallTargetBits::kMask;
// Note: We OR in 1 to avoid 0 offsets, see Code::OsrInstallTargetFor.
TNode<Word32T> actual = Word32Or(
Int32Sub(TruncateIntPtrToInt32(BytecodeOffset()), kFirstBytecodeOffset),
Int32Constant(1));
actual = Word32And(Word32Shl(UncheckedCast<Int32T>(actual), kShift), kMask);
TNode<Word32T> expected = Word32And(osr_urgency_and_install_target, kMask);
Branch(Word32Equal(actual, expected), &osr, &ok);
BIND(&osr);
OnStackReplacement(context, relative_jump);
}
......
......@@ -1188,13 +1188,14 @@ void BytecodeArray::set_incoming_new_target_or_generator_register(
}
int BytecodeArray::osr_urgency() const {
return ACQUIRE_READ_INT8_FIELD(*this, kOsrUrgencyOffset);
return OsrUrgencyBits::decode(osr_urgency_and_install_target());
}
void BytecodeArray::set_osr_urgency(int urgency) {
DCHECK(0 <= urgency && urgency <= BytecodeArray::kMaxOsrUrgency);
STATIC_ASSERT(BytecodeArray::kMaxOsrUrgency < kMaxInt8);
RELEASE_WRITE_INT8_FIELD(*this, kOsrUrgencyOffset, urgency);
STATIC_ASSERT(BytecodeArray::kMaxOsrUrgency <= OsrUrgencyBits::kMax);
uint32_t value = osr_urgency_and_install_target();
set_osr_urgency_and_install_target(OsrUrgencyBits::update(value, urgency));
}
BytecodeArray::Age BytecodeArray::bytecode_age() const {
......@@ -1208,6 +1209,27 @@ void BytecodeArray::RequestOsrAtNextOpportunity() {
set_osr_urgency(kMaxOsrUrgency);
}
int BytecodeArray::osr_install_target() {
return OsrInstallTargetBits::decode(osr_urgency_and_install_target());
}
void BytecodeArray::set_osr_install_target(BytecodeOffset jump_loop_offset) {
DCHECK_LE(jump_loop_offset.ToInt(), length());
set_osr_urgency_and_install_target(OsrInstallTargetBits::update(
osr_urgency_and_install_target(), OsrInstallTargetFor(jump_loop_offset)));
}
void BytecodeArray::reset_osr_install_target() {
uint32_t value = osr_urgency_and_install_target();
set_osr_urgency_and_install_target(
OsrInstallTargetBits::update(value, kNoOsrInstallTarget));
}
void BytecodeArray::reset_osr_urgency_and_install_target() {
set_osr_urgency_and_install_target(OsrUrgencyBits::encode(0) |
OsrInstallTargetBits::encode(0));
}
void BytecodeArray::set_bytecode_age(BytecodeArray::Age age) {
DCHECK_GE(age, kFirstBytecodeAge);
DCHECK_LE(age, kLastBytecodeAge);
......
......@@ -952,6 +952,8 @@ DEFINE_OPERATORS_FOR_FLAGS(DependentCode::DependencyGroups)
class BytecodeArray
: public TorqueGeneratedBytecodeArray<BytecodeArray, FixedArrayBase> {
public:
DEFINE_TORQUE_GENERATED_OSRURGENCY_AND_INSTALL_TARGET()
enum Age {
kNoAgeBytecodeAge = 0,
kQuadragenarianBytecodeAge,
......@@ -994,11 +996,32 @@ class BytecodeArray
// the function becomes hotter. When the current loop depth is less than the
// osr_urgency, JumpLoop calls into runtime to attempt OSR optimization.
static constexpr int kMaxOsrUrgency = 6;
STATIC_ASSERT(kMaxOsrUrgency <= OsrUrgencyBits::kMax);
inline int osr_urgency() const;
inline void set_osr_urgency(int urgency);
inline void reset_osr_urgency();
inline void RequestOsrAtNextOpportunity();
// The [osr_install_target] is used upon finishing concurrent OSR
// compilation; instead of bumping the osr_urgency (which would target all
// JumpLoops of appropriate loop_depth), we target a specific JumpLoop at the
// given bytecode offset.
static constexpr int kNoOsrInstallTarget = 0;
static constexpr int OsrInstallTargetFor(BytecodeOffset offset) {
// Any set `osr_install_target` must be non-zero since zero is the 'unset'
// value and is ignored by generated code. For branchless code (both here
// and in generated code), we simply OR in a 1.
STATIC_ASSERT(kNoOsrInstallTarget == 0);
return (offset.ToInt() | 1) &
(OsrInstallTargetBits::kMask >> OsrInstallTargetBits::kShift);
}
inline int osr_install_target();
inline void set_osr_install_target(BytecodeOffset jump_loop_offset);
inline void reset_osr_install_target();
inline void reset_osr_urgency_and_install_target();
inline Age bytecode_age() const;
inline void set_bytecode_age(Age age);
......@@ -1017,8 +1040,6 @@ class BytecodeArray
inline int BytecodeArraySize();
inline int raw_instruction_size();
// Returns the size of bytecode and its metadata. This includes the size of
// bytecode, constant pool, source position table, and handler table.
inline int SizeIncludingMetadata();
......@@ -1039,9 +1060,9 @@ class BytecodeArray
inline void clear_padding();
// InterpreterEntryTrampoline expects these fields to be next to each other
// and writes a 16-bit value to reset them.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
kOsrUrgencyOffset + kCharSize);
// and writes a 32-bit value to reset them.
STATIC_ASSERT(kBytecodeAgeOffset ==
kOsrUrgencyAndInstallTargetOffset + kUInt16Size);
// Maximal memory consumption for a single BytecodeArray.
static const int kMaxSize = 512 * MB;
......
......@@ -4,6 +4,14 @@
type DependentCode extends WeakFixedArray;
bitfield struct OSRUrgencyAndInstallTarget extends uint16 {
// The layout is chosen s.t. urgency and the install target offset can be
// loaded with a single 16-bit load (i.e. no masking required).
osr_urgency: uint32: 3 bit;
// The 13 LSB of the install target bytecode offset.
osr_install_target: uint32: 13 bit;
}
extern class BytecodeArray extends FixedArrayBase {
// TODO(v8:8983): bytecode array object sizes vary based on their contents.
constant_pool: FixedArray;
......@@ -22,10 +30,8 @@ extern class BytecodeArray extends FixedArrayBase {
frame_size: int32;
parameter_size: int32;
incoming_new_target_or_generator_register: int32;
// TODO(jgruber): We only use 3 bits for the urgency; consider folding
// into other fields.
osr_urgency: int8;
bytecode_age: int8;
osr_urgency_and_install_target: OSRUrgencyAndInstallTarget;
bytecode_age: uint16; // Only 3 bits used.
}
extern class CodeDataContainer extends HeapObject;
......@@ -491,7 +491,7 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os, FeedbackSlot);
class BytecodeOffset {
public:
explicit constexpr BytecodeOffset(int id) : id_(id) {}
int ToInt() const { return id_; }
constexpr int ToInt() const { return id_; }
static constexpr BytecodeOffset None() { return BytecodeOffset(kNoneId); }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment