Commit 51b99213 authored by Jakob Gruber's avatar Jakob Gruber Committed by V8 LUCI CQ

[osr] Add an install-by-offset mechanism

.. for concurrent OSR. There, the challenge is to hit the correct
JumpLoop bytecode once compilation completes, since execution has
moved on in the meantime.

This CL adds a new mechanism to request installation at a specific
bytecode offset. We add a new `osr_install_target` field to the
BytecodeArray:

  bitfield struct OSRUrgencyAndInstallTarget extends uint16 {
    osr_urgency: uint32: 3 bit;
    osr_install_target: uint32: 13 bit;
  }

  // [...]
  osr_urgency_and_install_target: OSRUrgencyAndInstallTarget;
  bytecode_age: uint16;  // Only 3 bits used.
  // [...]

Note urgency and install target are packed into one 16 bit field,
we can thus merge both checks into one comparison within JumpLoop.
Note also that these fields are adjacent to the bytecode age; we
still reset both OSR state and age with a single (now 32-bit)
store.

The install target is the lowest 13 bits of the bytecode offset.
When set, every reached JumpLoop will check `is this my offset?`,
and if yes, jump into runtime to tier up.

Drive-by: Rename BaselineAssembler::LoadByteField to LoadWord8Field.

Bug: v8:12161
Change-Id: I275d468b19df3a4816392a2fec0713a8d211ef80
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3571812Reviewed-by: 's avatarLeszek Swirski <leszeks@chromium.org>
Commit-Queue: Jakob Linke <jgruber@chromium.org>
Cr-Commit-Position: refs/heads/main@{#79853}
parent a90f1748
...@@ -102,18 +102,28 @@ void BaselineAssembler::JumpTarget() { ...@@ -102,18 +102,28 @@ void BaselineAssembler::JumpTarget() {
void BaselineAssembler::Jump(Label* target, Label::Distance distance) { void BaselineAssembler::Jump(Label* target, Label::Distance distance) {
__ b(target); __ b(target);
} }
void BaselineAssembler::JumpIfRoot(Register value, RootIndex index, void BaselineAssembler::JumpIfRoot(Register value, RootIndex index,
Label* target, Label::Distance) { Label* target, Label::Distance) {
__ JumpIfRoot(value, index, target); __ JumpIfRoot(value, index, target);
} }
void BaselineAssembler::JumpIfNotRoot(Register value, RootIndex index, void BaselineAssembler::JumpIfNotRoot(Register value, RootIndex index,
Label* target, Label::Distance) { Label* target, Label::Distance) {
__ JumpIfNotRoot(value, index, target); __ JumpIfNotRoot(value, index, target);
} }
void BaselineAssembler::JumpIfSmi(Register value, Label* target, void BaselineAssembler::JumpIfSmi(Register value, Label* target,
Label::Distance) { Label::Distance) {
__ JumpIfSmi(value, target); __ JumpIfSmi(value, target);
} }
void BaselineAssembler::JumpIfImmediate(Condition cc, Register left, int right,
Label* target,
Label::Distance distance) {
JumpIf(cc, left, Operand(right), target, distance);
}
void BaselineAssembler::JumpIfNotSmi(Register value, Label* target, void BaselineAssembler::JumpIfNotSmi(Register value, Label* target,
Label::Distance) { Label::Distance) {
__ JumpIfNotSmi(value, target); __ JumpIfNotSmi(value, target);
...@@ -351,18 +361,27 @@ void BaselineAssembler::LoadTaggedPointerField(Register output, Register source, ...@@ -351,18 +361,27 @@ void BaselineAssembler::LoadTaggedPointerField(Register output, Register source,
int offset) { int offset) {
__ ldr(output, FieldMemOperand(source, offset)); __ ldr(output, FieldMemOperand(source, offset));
} }
void BaselineAssembler::LoadTaggedSignedField(Register output, Register source, void BaselineAssembler::LoadTaggedSignedField(Register output, Register source,
int offset) { int offset) {
__ ldr(output, FieldMemOperand(source, offset)); __ ldr(output, FieldMemOperand(source, offset));
} }
void BaselineAssembler::LoadTaggedAnyField(Register output, Register source, void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
int offset) { int offset) {
__ ldr(output, FieldMemOperand(source, offset)); __ ldr(output, FieldMemOperand(source, offset));
} }
void BaselineAssembler::LoadByteField(Register output, Register source,
int offset) { void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
Register source, int offset) {
__ ldrh(output, FieldMemOperand(source, offset));
}
void BaselineAssembler::LoadWord8Field(Register output, Register source,
int offset) {
__ ldrb(output, FieldMemOperand(source, offset)); __ ldrb(output, FieldMemOperand(source, offset));
} }
void BaselineAssembler::StoreTaggedSignedField(Register target, int offset, void BaselineAssembler::StoreTaggedSignedField(Register target, int offset,
Smi value) { Smi value) {
ASM_CODE_COMMENT(masm_); ASM_CODE_COMMENT(masm_);
...@@ -371,6 +390,7 @@ void BaselineAssembler::StoreTaggedSignedField(Register target, int offset, ...@@ -371,6 +390,7 @@ void BaselineAssembler::StoreTaggedSignedField(Register target, int offset,
__ mov(tmp, Operand(value)); __ mov(tmp, Operand(value));
__ str(tmp, FieldMemOperand(target, offset)); __ str(tmp, FieldMemOperand(target, offset));
} }
void BaselineAssembler::StoreTaggedFieldWithWriteBarrier(Register target, void BaselineAssembler::StoreTaggedFieldWithWriteBarrier(Register target,
int offset, int offset,
Register value) { Register value) {
...@@ -380,6 +400,7 @@ void BaselineAssembler::StoreTaggedFieldWithWriteBarrier(Register target, ...@@ -380,6 +400,7 @@ void BaselineAssembler::StoreTaggedFieldWithWriteBarrier(Register target,
__ RecordWriteField(target, offset, value, kLRHasNotBeenSaved, __ RecordWriteField(target, offset, value, kLRHasNotBeenSaved,
SaveFPRegsMode::kIgnore); SaveFPRegsMode::kIgnore);
} }
void BaselineAssembler::StoreTaggedFieldNoWriteBarrier(Register target, void BaselineAssembler::StoreTaggedFieldNoWriteBarrier(Register target,
int offset, int offset,
Register value) { Register value) {
...@@ -432,6 +453,10 @@ void BaselineAssembler::AddSmi(Register lhs, Smi rhs) { ...@@ -432,6 +453,10 @@ void BaselineAssembler::AddSmi(Register lhs, Smi rhs) {
__ add(lhs, lhs, Operand(rhs)); __ add(lhs, lhs, Operand(rhs));
} }
void BaselineAssembler::Word32And(Register output, Register lhs, int rhs) {
__ and_(output, lhs, Operand(rhs));
}
void BaselineAssembler::Switch(Register reg, int case_value_base, void BaselineAssembler::Switch(Register reg, int case_value_base,
Label** labels, int num_labels) { Label** labels, int num_labels) {
ASM_CODE_COMMENT(masm_); ASM_CODE_COMMENT(masm_);
......
...@@ -100,23 +100,33 @@ void BaselineAssembler::JumpTarget() { __ JumpTarget(); } ...@@ -100,23 +100,33 @@ void BaselineAssembler::JumpTarget() { __ JumpTarget(); }
void BaselineAssembler::Jump(Label* target, Label::Distance distance) { void BaselineAssembler::Jump(Label* target, Label::Distance distance) {
__ B(target); __ B(target);
} }
void BaselineAssembler::JumpIfRoot(Register value, RootIndex index, void BaselineAssembler::JumpIfRoot(Register value, RootIndex index,
Label* target, Label::Distance) { Label* target, Label::Distance) {
__ JumpIfRoot(value, index, target); __ JumpIfRoot(value, index, target);
} }
void BaselineAssembler::JumpIfNotRoot(Register value, RootIndex index, void BaselineAssembler::JumpIfNotRoot(Register value, RootIndex index,
Label* target, Label::Distance) { Label* target, Label::Distance) {
__ JumpIfNotRoot(value, index, target); __ JumpIfNotRoot(value, index, target);
} }
void BaselineAssembler::JumpIfSmi(Register value, Label* target, void BaselineAssembler::JumpIfSmi(Register value, Label* target,
Label::Distance) { Label::Distance) {
__ JumpIfSmi(value, target); __ JumpIfSmi(value, target);
} }
void BaselineAssembler::JumpIfNotSmi(Register value, Label* target, void BaselineAssembler::JumpIfNotSmi(Register value, Label* target,
Label::Distance) { Label::Distance) {
__ JumpIfNotSmi(value, target); __ JumpIfNotSmi(value, target);
} }
void BaselineAssembler::JumpIfImmediate(Condition cc, Register left, int right,
Label* target,
Label::Distance distance) {
JumpIf(cc, left, Immediate(right), target, distance);
}
void BaselineAssembler::CallBuiltin(Builtin builtin) { void BaselineAssembler::CallBuiltin(Builtin builtin) {
if (masm()->options().short_builtin_calls) { if (masm()->options().short_builtin_calls) {
// Generate pc-relative call. // Generate pc-relative call.
...@@ -424,18 +434,27 @@ void BaselineAssembler::LoadTaggedPointerField(Register output, Register source, ...@@ -424,18 +434,27 @@ void BaselineAssembler::LoadTaggedPointerField(Register output, Register source,
int offset) { int offset) {
__ LoadTaggedPointerField(output, FieldMemOperand(source, offset)); __ LoadTaggedPointerField(output, FieldMemOperand(source, offset));
} }
void BaselineAssembler::LoadTaggedSignedField(Register output, Register source, void BaselineAssembler::LoadTaggedSignedField(Register output, Register source,
int offset) { int offset) {
__ LoadTaggedSignedField(output, FieldMemOperand(source, offset)); __ LoadTaggedSignedField(output, FieldMemOperand(source, offset));
} }
void BaselineAssembler::LoadTaggedAnyField(Register output, Register source, void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
int offset) { int offset) {
__ LoadAnyTaggedField(output, FieldMemOperand(source, offset)); __ LoadAnyTaggedField(output, FieldMemOperand(source, offset));
} }
void BaselineAssembler::LoadByteField(Register output, Register source,
int offset) { void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
Register source, int offset) {
__ Ldrh(output, FieldMemOperand(source, offset));
}
void BaselineAssembler::LoadWord8Field(Register output, Register source,
int offset) {
__ Ldrb(output, FieldMemOperand(source, offset)); __ Ldrb(output, FieldMemOperand(source, offset));
} }
void BaselineAssembler::StoreTaggedSignedField(Register target, int offset, void BaselineAssembler::StoreTaggedSignedField(Register target, int offset,
Smi value) { Smi value) {
ASM_CODE_COMMENT(masm_); ASM_CODE_COMMENT(masm_);
...@@ -444,6 +463,7 @@ void BaselineAssembler::StoreTaggedSignedField(Register target, int offset, ...@@ -444,6 +463,7 @@ void BaselineAssembler::StoreTaggedSignedField(Register target, int offset,
__ Mov(tmp, Operand(value)); __ Mov(tmp, Operand(value));
__ StoreTaggedField(tmp, FieldMemOperand(target, offset)); __ StoreTaggedField(tmp, FieldMemOperand(target, offset));
} }
void BaselineAssembler::StoreTaggedFieldWithWriteBarrier(Register target, void BaselineAssembler::StoreTaggedFieldWithWriteBarrier(Register target,
int offset, int offset,
Register value) { Register value) {
...@@ -452,6 +472,7 @@ void BaselineAssembler::StoreTaggedFieldWithWriteBarrier(Register target, ...@@ -452,6 +472,7 @@ void BaselineAssembler::StoreTaggedFieldWithWriteBarrier(Register target,
__ RecordWriteField(target, offset, value, kLRHasNotBeenSaved, __ RecordWriteField(target, offset, value, kLRHasNotBeenSaved,
SaveFPRegsMode::kIgnore); SaveFPRegsMode::kIgnore);
} }
void BaselineAssembler::StoreTaggedFieldNoWriteBarrier(Register target, void BaselineAssembler::StoreTaggedFieldNoWriteBarrier(Register target,
int offset, int offset,
Register value) { Register value) {
...@@ -509,6 +530,10 @@ void BaselineAssembler::AddSmi(Register lhs, Smi rhs) { ...@@ -509,6 +530,10 @@ void BaselineAssembler::AddSmi(Register lhs, Smi rhs) {
} }
} }
void BaselineAssembler::Word32And(Register output, Register lhs, int rhs) {
__ And(output, lhs, Immediate(rhs));
}
void BaselineAssembler::Switch(Register reg, int case_value_base, void BaselineAssembler::Switch(Register reg, int case_value_base,
Label** labels, int num_labels) { Label** labels, int num_labels) {
ASM_CODE_COMMENT(masm_); ASM_CODE_COMMENT(masm_);
......
...@@ -79,6 +79,9 @@ class BaselineAssembler { ...@@ -79,6 +79,9 @@ class BaselineAssembler {
Label::Distance distance = Label::kFar); Label::Distance distance = Label::kFar);
inline void JumpIfSmi(Condition cc, Register lhs, Register rhs, Label* target, inline void JumpIfSmi(Condition cc, Register lhs, Register rhs, Label* target,
Label::Distance distance = Label::kFar); Label::Distance distance = Label::kFar);
inline void JumpIfImmediate(Condition cc, Register left, int right,
Label* target,
Label::Distance distance = Label::kFar);
inline void JumpIfTagged(Condition cc, Register value, MemOperand operand, inline void JumpIfTagged(Condition cc, Register value, MemOperand operand,
Label* target, Label* target,
Label::Distance distance = Label::kFar); Label::Distance distance = Label::kFar);
...@@ -152,7 +155,9 @@ class BaselineAssembler { ...@@ -152,7 +155,9 @@ class BaselineAssembler {
inline void LoadTaggedSignedField(Register output, Register source, inline void LoadTaggedSignedField(Register output, Register source,
int offset); int offset);
inline void LoadTaggedAnyField(Register output, Register source, int offset); inline void LoadTaggedAnyField(Register output, Register source, int offset);
inline void LoadByteField(Register output, Register source, int offset); inline void LoadWord16FieldZeroExtend(Register output, Register source,
int offset);
inline void LoadWord8Field(Register output, Register source, int offset);
inline void StoreTaggedSignedField(Register target, int offset, Smi value); inline void StoreTaggedSignedField(Register target, int offset, Smi value);
inline void StoreTaggedFieldWithWriteBarrier(Register target, int offset, inline void StoreTaggedFieldWithWriteBarrier(Register target, int offset,
Register value); Register value);
...@@ -173,6 +178,8 @@ class BaselineAssembler { ...@@ -173,6 +178,8 @@ class BaselineAssembler {
inline void SmiUntag(Register value); inline void SmiUntag(Register value);
inline void SmiUntag(Register output, Register value); inline void SmiUntag(Register output, Register value);
inline void Word32And(Register output, Register lhs, int rhs);
inline void Switch(Register reg, int case_value_base, Label** labels, inline void Switch(Register reg, int case_value_base, Label** labels,
int num_labels); int num_labels);
......
...@@ -1538,7 +1538,7 @@ void BaselineCompiler::VisitTestUndetectable() { ...@@ -1538,7 +1538,7 @@ void BaselineCompiler::VisitTestUndetectable() {
Register map_bit_field = kInterpreterAccumulatorRegister; Register map_bit_field = kInterpreterAccumulatorRegister;
__ LoadMap(map_bit_field, kInterpreterAccumulatorRegister); __ LoadMap(map_bit_field, kInterpreterAccumulatorRegister);
__ LoadByteField(map_bit_field, map_bit_field, Map::kBitFieldOffset); __ LoadWord8Field(map_bit_field, map_bit_field, Map::kBitFieldOffset);
__ TestAndBranch(map_bit_field, Map::Bits1::IsUndetectableBit::kMask, __ TestAndBranch(map_bit_field, Map::Bits1::IsUndetectableBit::kMask,
Condition::kZero, &not_undetectable, Label::kNear); Condition::kZero, &not_undetectable, Label::kNear);
...@@ -1665,7 +1665,7 @@ void BaselineCompiler::VisitTestTypeOf() { ...@@ -1665,7 +1665,7 @@ void BaselineCompiler::VisitTestTypeOf() {
// All other undetectable maps are typeof undefined. // All other undetectable maps are typeof undefined.
Register map_bit_field = kInterpreterAccumulatorRegister; Register map_bit_field = kInterpreterAccumulatorRegister;
__ LoadMap(map_bit_field, kInterpreterAccumulatorRegister); __ LoadMap(map_bit_field, kInterpreterAccumulatorRegister);
__ LoadByteField(map_bit_field, map_bit_field, Map::kBitFieldOffset); __ LoadWord8Field(map_bit_field, map_bit_field, Map::kBitFieldOffset);
__ TestAndBranch(map_bit_field, Map::Bits1::IsUndetectableBit::kMask, __ TestAndBranch(map_bit_field, Map::Bits1::IsUndetectableBit::kMask,
Condition::kZero, &not_undetectable, Label::kNear); Condition::kZero, &not_undetectable, Label::kNear);
...@@ -1685,7 +1685,7 @@ void BaselineCompiler::VisitTestTypeOf() { ...@@ -1685,7 +1685,7 @@ void BaselineCompiler::VisitTestTypeOf() {
// Check if the map is callable but not undetectable. // Check if the map is callable but not undetectable.
Register map_bit_field = kInterpreterAccumulatorRegister; Register map_bit_field = kInterpreterAccumulatorRegister;
__ LoadMap(map_bit_field, kInterpreterAccumulatorRegister); __ LoadMap(map_bit_field, kInterpreterAccumulatorRegister);
__ LoadByteField(map_bit_field, map_bit_field, Map::kBitFieldOffset); __ LoadWord8Field(map_bit_field, map_bit_field, Map::kBitFieldOffset);
__ TestAndBranch(map_bit_field, Map::Bits1::IsCallableBit::kMask, __ TestAndBranch(map_bit_field, Map::Bits1::IsCallableBit::kMask,
Condition::kZero, &not_callable, Label::kNear); Condition::kZero, &not_callable, Label::kNear);
__ TestAndBranch(map_bit_field, Map::Bits1::IsUndetectableBit::kMask, __ TestAndBranch(map_bit_field, Map::Bits1::IsUndetectableBit::kMask,
...@@ -1717,7 +1717,7 @@ void BaselineCompiler::VisitTestTypeOf() { ...@@ -1717,7 +1717,7 @@ void BaselineCompiler::VisitTestTypeOf() {
// If the map is undetectable or callable, return false. // If the map is undetectable or callable, return false.
Register map_bit_field = kInterpreterAccumulatorRegister; Register map_bit_field = kInterpreterAccumulatorRegister;
__ LoadByteField(map_bit_field, map, Map::kBitFieldOffset); __ LoadWord8Field(map_bit_field, map, Map::kBitFieldOffset);
__ TestAndBranch(map_bit_field, __ TestAndBranch(map_bit_field,
Map::Bits1::IsUndetectableBit::kMask | Map::Bits1::IsUndetectableBit::kMask |
Map::Bits1::IsCallableBit::kMask, Map::Bits1::IsCallableBit::kMask,
...@@ -1925,21 +1925,50 @@ void BaselineCompiler::VisitCreateRestParameter() { ...@@ -1925,21 +1925,50 @@ void BaselineCompiler::VisitCreateRestParameter() {
} }
void BaselineCompiler::VisitJumpLoop() { void BaselineCompiler::VisitJumpLoop() {
BaselineAssembler::ScratchRegisterScope scope(&basm_); Label osr_not_armed, osr;
Register scratch = scope.AcquireScratch();
Label osr_not_armed;
{ {
BaselineAssembler::ScratchRegisterScope scope(&basm_);
Register osr_urgency_and_install_target = scope.AcquireScratch();
ASM_CODE_COMMENT_STRING(&masm_, "OSR Check Armed"); ASM_CODE_COMMENT_STRING(&masm_, "OSR Check Armed");
Register osr_urgency = scratch; __ LoadRegister(osr_urgency_and_install_target,
__ LoadRegister(osr_urgency, interpreter::Register::bytecode_array()); interpreter::Register::bytecode_array());
__ LoadByteField(osr_urgency, osr_urgency, __ LoadWord16FieldZeroExtend(
BytecodeArray::kOsrUrgencyOffset); osr_urgency_and_install_target, osr_urgency_and_install_target,
BytecodeArray::kOsrUrgencyAndInstallTargetOffset);
int loop_depth = iterator().GetImmediateOperand(1); int loop_depth = iterator().GetImmediateOperand(1);
__ JumpIfByte(Condition::kUnsignedLessThanEqual, osr_urgency, loop_depth, __ JumpIfImmediate(Condition::kUnsignedLessThanEqual,
&osr_not_armed); osr_urgency_and_install_target, loop_depth,
CallBuiltin<Builtin::kBaselineOnStackReplacement>(); &osr_not_armed, Label::kNear);
// TODO(jgruber): Move the extended checks into the
// BaselineOnStackReplacement builtin.
// OSR based on urgency, i.e. is the OSR urgency greater than the current
// loop depth?
STATIC_ASSERT(BytecodeArray::OsrUrgencyBits::kShift == 0);
Register scratch2 = scope.AcquireScratch();
__ Word32And(scratch2, osr_urgency_and_install_target,
BytecodeArray::OsrUrgencyBits::kMask);
__ JumpIfImmediate(Condition::kUnsignedGreaterThan, scratch2, loop_depth,
&osr, Label::kNear);
// OSR based on the install target offset, i.e. does the current bytecode
// offset match the install target offset?
static constexpr int kShift = BytecodeArray::OsrInstallTargetBits::kShift;
static constexpr int kMask = BytecodeArray::OsrInstallTargetBits::kMask;
const int encoded_current_offset =
BytecodeArray::OsrInstallTargetFor(
BytecodeOffset{iterator().current_offset()})
<< kShift;
__ Word32And(scratch2, osr_urgency_and_install_target, kMask);
__ JumpIfImmediate(Condition::kNotEqual, scratch2, encoded_current_offset,
&osr_not_armed, Label::kNear);
} }
__ Bind(&osr);
CallBuiltin<Builtin::kBaselineOnStackReplacement>();
__ Bind(&osr_not_armed); __ Bind(&osr_not_armed);
Label* label = &labels_[iterator().GetJumpTargetOffset()]->unlinked; Label* label = &labels_[iterator().GetJumpTargetOffset()]->unlinked;
int weight = iterator().GetRelativeJumpTargetOffset() - int weight = iterator().GetRelativeJumpTargetOffset() -
...@@ -2184,7 +2213,7 @@ void BaselineCompiler::VisitThrowIfNotSuperConstructor() { ...@@ -2184,7 +2213,7 @@ void BaselineCompiler::VisitThrowIfNotSuperConstructor() {
LoadRegister(reg, 0); LoadRegister(reg, 0);
Register map_bit_field = scratch_scope.AcquireScratch(); Register map_bit_field = scratch_scope.AcquireScratch();
__ LoadMap(map_bit_field, reg); __ LoadMap(map_bit_field, reg);
__ LoadByteField(map_bit_field, map_bit_field, Map::kBitFieldOffset); __ LoadWord8Field(map_bit_field, map_bit_field, Map::kBitFieldOffset);
__ TestAndBranch(map_bit_field, Map::Bits1::IsConstructorBit::kMask, __ TestAndBranch(map_bit_field, Map::Bits1::IsConstructorBit::kMask,
Condition::kNotZero, &done, Label::kNear); Condition::kNotZero, &done, Label::kNear);
......
...@@ -103,18 +103,29 @@ void BaselineAssembler::JumpTarget() { ...@@ -103,18 +103,29 @@ void BaselineAssembler::JumpTarget() {
void BaselineAssembler::Jump(Label* target, Label::Distance distance) { void BaselineAssembler::Jump(Label* target, Label::Distance distance) {
__ jmp(target, distance); __ jmp(target, distance);
} }
void BaselineAssembler::JumpIfRoot(Register value, RootIndex index, void BaselineAssembler::JumpIfRoot(Register value, RootIndex index,
Label* target, Label::Distance distance) { Label* target, Label::Distance distance) {
__ JumpIfRoot(value, index, target, distance); __ JumpIfRoot(value, index, target, distance);
} }
void BaselineAssembler::JumpIfNotRoot(Register value, RootIndex index, void BaselineAssembler::JumpIfNotRoot(Register value, RootIndex index,
Label* target, Label::Distance distance) { Label* target, Label::Distance distance) {
__ JumpIfNotRoot(value, index, target, distance); __ JumpIfNotRoot(value, index, target, distance);
} }
void BaselineAssembler::JumpIfSmi(Register value, Label* target, void BaselineAssembler::JumpIfSmi(Register value, Label* target,
Label::Distance distance) { Label::Distance distance) {
__ JumpIfSmi(value, target, distance); __ JumpIfSmi(value, target, distance);
} }
void BaselineAssembler::JumpIfImmediate(Condition cc, Register left, int right,
Label* target,
Label::Distance distance) {
__ cmp(left, Immediate(right));
__ j(AsMasmCondition(cc), target, distance);
}
void BaselineAssembler::JumpIfNotSmi(Register value, Label* target, void BaselineAssembler::JumpIfNotSmi(Register value, Label* target,
Label::Distance distance) { Label::Distance distance) {
__ JumpIfNotSmi(value, target, distance); __ JumpIfNotSmi(value, target, distance);
...@@ -323,22 +334,32 @@ void BaselineAssembler::LoadTaggedPointerField(Register output, Register source, ...@@ -323,22 +334,32 @@ void BaselineAssembler::LoadTaggedPointerField(Register output, Register source,
int offset) { int offset) {
__ mov(output, FieldOperand(source, offset)); __ mov(output, FieldOperand(source, offset));
} }
void BaselineAssembler::LoadTaggedSignedField(Register output, Register source, void BaselineAssembler::LoadTaggedSignedField(Register output, Register source,
int offset) { int offset) {
__ mov(output, FieldOperand(source, offset)); __ mov(output, FieldOperand(source, offset));
} }
void BaselineAssembler::LoadTaggedAnyField(Register output, Register source, void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
int offset) { int offset) {
__ mov(output, FieldOperand(source, offset)); __ mov(output, FieldOperand(source, offset));
} }
void BaselineAssembler::LoadByteField(Register output, Register source,
int offset) { void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
Register source, int offset) {
__ movzx_w(output, FieldOperand(source, offset));
}
void BaselineAssembler::LoadWord8Field(Register output, Register source,
int offset) {
__ mov_b(output, FieldOperand(source, offset)); __ mov_b(output, FieldOperand(source, offset));
} }
void BaselineAssembler::StoreTaggedSignedField(Register target, int offset, void BaselineAssembler::StoreTaggedSignedField(Register target, int offset,
Smi value) { Smi value) {
__ mov(FieldOperand(target, offset), Immediate(value)); __ mov(FieldOperand(target, offset), Immediate(value));
} }
void BaselineAssembler::StoreTaggedFieldWithWriteBarrier(Register target, void BaselineAssembler::StoreTaggedFieldWithWriteBarrier(Register target,
int offset, int offset,
Register value) { Register value) {
...@@ -349,6 +370,7 @@ void BaselineAssembler::StoreTaggedFieldWithWriteBarrier(Register target, ...@@ -349,6 +370,7 @@ void BaselineAssembler::StoreTaggedFieldWithWriteBarrier(Register target,
__ mov(FieldOperand(target, offset), value); __ mov(FieldOperand(target, offset), value);
__ RecordWriteField(target, offset, value, scratch, SaveFPRegsMode::kIgnore); __ RecordWriteField(target, offset, value, scratch, SaveFPRegsMode::kIgnore);
} }
void BaselineAssembler::StoreTaggedFieldNoWriteBarrier(Register target, void BaselineAssembler::StoreTaggedFieldNoWriteBarrier(Register target,
int offset, int offset,
Register value) { Register value) {
...@@ -391,6 +413,11 @@ void BaselineAssembler::AddSmi(Register lhs, Smi rhs) { ...@@ -391,6 +413,11 @@ void BaselineAssembler::AddSmi(Register lhs, Smi rhs) {
__ add(lhs, Immediate(rhs)); __ add(lhs, Immediate(rhs));
} }
void BaselineAssembler::Word32And(Register output, Register lhs, int rhs) {
Move(output, lhs);
__ and_(output, Immediate(rhs));
}
void BaselineAssembler::Switch(Register reg, int case_value_base, void BaselineAssembler::Switch(Register reg, int case_value_base,
Label** labels, int num_labels) { Label** labels, int num_labels) {
ASM_CODE_COMMENT(masm_); ASM_CODE_COMMENT(masm_);
......
...@@ -346,8 +346,8 @@ void BaselineAssembler::LoadTaggedAnyField(Register output, Register source, ...@@ -346,8 +346,8 @@ void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
int offset) { int offset) {
__ Ld_d(output, FieldMemOperand(source, offset)); __ Ld_d(output, FieldMemOperand(source, offset));
} }
void BaselineAssembler::LoadByteField(Register output, Register source, void BaselineAssembler::LoadWord8Field(Register output, Register source,
int offset) { int offset) {
__ Ld_b(output, FieldMemOperand(source, offset)); __ Ld_b(output, FieldMemOperand(source, offset));
} }
void BaselineAssembler::StoreTaggedSignedField(Register target, int offset, void BaselineAssembler::StoreTaggedSignedField(Register target, int offset,
......
...@@ -356,8 +356,8 @@ void BaselineAssembler::LoadTaggedAnyField(Register output, Register source, ...@@ -356,8 +356,8 @@ void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
int offset) { int offset) {
__ Lw(output, FieldMemOperand(source, offset)); __ Lw(output, FieldMemOperand(source, offset));
} }
void BaselineAssembler::LoadByteField(Register output, Register source, void BaselineAssembler::LoadWord8Field(Register output, Register source,
int offset) { int offset) {
__ lb(output, FieldMemOperand(source, offset)); __ lb(output, FieldMemOperand(source, offset));
} }
void BaselineAssembler::StoreTaggedSignedField(Register target, int offset, void BaselineAssembler::StoreTaggedSignedField(Register target, int offset,
......
...@@ -354,8 +354,8 @@ void BaselineAssembler::LoadTaggedAnyField(Register output, Register source, ...@@ -354,8 +354,8 @@ void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
int offset) { int offset) {
__ Ld(output, FieldMemOperand(source, offset)); __ Ld(output, FieldMemOperand(source, offset));
} }
void BaselineAssembler::LoadByteField(Register output, Register source, void BaselineAssembler::LoadWord8Field(Register output, Register source,
int offset) { int offset) {
__ Lb(output, FieldMemOperand(source, offset)); __ Lb(output, FieldMemOperand(source, offset));
} }
void BaselineAssembler::StoreTaggedSignedField(Register target, int offset, void BaselineAssembler::StoreTaggedSignedField(Register target, int offset,
......
...@@ -455,8 +455,8 @@ void BaselineAssembler::LoadTaggedAnyField(Register output, Register source, ...@@ -455,8 +455,8 @@ void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
int offset) { int offset) {
UNIMPLEMENTED(); UNIMPLEMENTED();
} }
void BaselineAssembler::LoadByteField(Register output, Register source, void BaselineAssembler::LoadWord8Field(Register output, Register source,
int offset) { int offset) {
UNIMPLEMENTED(); UNIMPLEMENTED();
} }
void BaselineAssembler::StoreTaggedSignedField(Register target, int offset, void BaselineAssembler::StoreTaggedSignedField(Register target, int offset,
......
...@@ -347,8 +347,8 @@ void BaselineAssembler::LoadTaggedAnyField(Register output, Register source, ...@@ -347,8 +347,8 @@ void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
int offset) { int offset) {
__ LoadAnyTaggedField(output, FieldMemOperand(source, offset)); __ LoadAnyTaggedField(output, FieldMemOperand(source, offset));
} }
void BaselineAssembler::LoadByteField(Register output, Register source, void BaselineAssembler::LoadWord8Field(Register output, Register source,
int offset) { int offset) {
__ Lb(output, FieldMemOperand(source, offset)); __ Lb(output, FieldMemOperand(source, offset));
} }
void BaselineAssembler::StoreTaggedSignedField(Register target, int offset, void BaselineAssembler::StoreTaggedSignedField(Register target, int offset,
......
...@@ -439,8 +439,8 @@ void BaselineAssembler::LoadTaggedAnyField(Register output, Register source, ...@@ -439,8 +439,8 @@ void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
int offset) { int offset) {
__ LoadAnyTaggedField(output, FieldMemOperand(source, offset), r0); __ LoadAnyTaggedField(output, FieldMemOperand(source, offset), r0);
} }
void BaselineAssembler::LoadByteField(Register output, Register source, void BaselineAssembler::LoadWord8Field(Register output, Register source,
int offset) { int offset) {
__ LoadU8(output, FieldMemOperand(source, offset)); __ LoadU8(output, FieldMemOperand(source, offset));
} }
void BaselineAssembler::StoreTaggedSignedField(Register target, int offset, void BaselineAssembler::StoreTaggedSignedField(Register target, int offset,
......
...@@ -195,6 +195,14 @@ void BaselineAssembler::JumpIfSmi(Condition cc, Register lhs, Register rhs, ...@@ -195,6 +195,14 @@ void BaselineAssembler::JumpIfSmi(Condition cc, Register lhs, Register rhs,
__ SmiCompare(lhs, rhs); __ SmiCompare(lhs, rhs);
__ j(AsMasmCondition(cc), target, distance); __ j(AsMasmCondition(cc), target, distance);
} }
void BaselineAssembler::JumpIfImmediate(Condition cc, Register left, int right,
Label* target,
Label::Distance distance) {
__ cmpq(left, Immediate(right));
__ j(AsMasmCondition(cc), target, distance);
}
// cmp_tagged // cmp_tagged
void BaselineAssembler::JumpIfTagged(Condition cc, Register value, void BaselineAssembler::JumpIfTagged(Condition cc, Register value,
MemOperand operand, Label* target, MemOperand operand, Label* target,
...@@ -338,8 +346,12 @@ void BaselineAssembler::LoadTaggedAnyField(Register output, Register source, ...@@ -338,8 +346,12 @@ void BaselineAssembler::LoadTaggedAnyField(Register output, Register source,
int offset) { int offset) {
__ LoadAnyTaggedField(output, FieldOperand(source, offset)); __ LoadAnyTaggedField(output, FieldOperand(source, offset));
} }
void BaselineAssembler::LoadByteField(Register output, Register source, void BaselineAssembler::LoadWord16FieldZeroExtend(Register output,
int offset) { Register source, int offset) {
__ movzxwq(output, FieldOperand(source, offset));
}
void BaselineAssembler::LoadWord8Field(Register output, Register source,
int offset) {
__ movb(output, FieldOperand(source, offset)); __ movb(output, FieldOperand(source, offset));
} }
void BaselineAssembler::StoreTaggedSignedField(Register target, int offset, void BaselineAssembler::StoreTaggedSignedField(Register target, int offset,
...@@ -402,6 +414,11 @@ void BaselineAssembler::AddSmi(Register lhs, Smi rhs) { ...@@ -402,6 +414,11 @@ void BaselineAssembler::AddSmi(Register lhs, Smi rhs) {
} }
} }
void BaselineAssembler::Word32And(Register output, Register lhs, int rhs) {
Move(output, lhs);
__ andq(output, Immediate(rhs));
}
void BaselineAssembler::Switch(Register reg, int case_value_base, void BaselineAssembler::Switch(Register reg, int case_value_base,
Label** labels, int num_labels) { Label** labels, int num_labels) {
ASM_CODE_COMMENT(masm_); ASM_CODE_COMMENT(masm_);
......
...@@ -1065,6 +1065,22 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot( ...@@ -1065,6 +1065,22 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
TailCallOptimizedCodeSlot(masm, optimized_code_entry, r6); TailCallOptimizedCodeSlot(masm, optimized_code_entry, r6);
} }
namespace {
void ResetBytecodeAgeAndOsrState(MacroAssembler* masm, Register bytecode_array,
Register scratch) {
// Reset the bytecode age and OSR state (optimized to a single write).
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
BytecodeArray::kOsrUrgencyAndInstallTargetOffset + kUInt16Size);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ mov(scratch, Operand(0));
__ str(scratch,
FieldMemOperand(bytecode_array,
BytecodeArray::kOsrUrgencyAndInstallTargetOffset));
}
} // namespace
// static // static
void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
UseScratchRegisterScope temps(masm); UseScratchRegisterScope temps(masm);
...@@ -1135,21 +1151,10 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { ...@@ -1135,21 +1151,10 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
// the frame, so load it into a register. // the frame, so load it into a register.
Register bytecodeArray = descriptor.GetRegisterParameter( Register bytecodeArray = descriptor.GetRegisterParameter(
BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray); BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray);
// Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset
// are 8-bit fields next to each other, so we could just optimize by writing
// a 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
BytecodeArray::kOsrUrgencyOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
{ {
UseScratchRegisterScope temps(masm); UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire(); ResetBytecodeAgeAndOsrState(masm, bytecodeArray, temps.Acquire());
__ mov(scratch, Operand(0));
__ strh(scratch,
FieldMemOperand(bytecodeArray, BytecodeArray::kOsrUrgencyOffset));
} }
__ Push(argc, bytecodeArray); __ Push(argc, bytecodeArray);
// Baseline code frames store the feedback vector where interpreter would // Baseline code frames store the feedback vector where interpreter would
...@@ -1289,15 +1294,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { ...@@ -1289,15 +1294,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
FrameScope frame_scope(masm, StackFrame::MANUAL); FrameScope frame_scope(masm, StackFrame::MANUAL);
__ PushStandardFrame(closure); __ PushStandardFrame(closure);
// Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset are ResetBytecodeAgeAndOsrState(masm, kInterpreterBytecodeArrayRegister, r9);
// 8-bit fields next to each other, so we could just optimize by writing a
// 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
BytecodeArray::kOsrUrgencyOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ mov(r9, Operand(0));
__ strh(r9, FieldMemOperand(kInterpreterBytecodeArrayRegister,
BytecodeArray::kOsrUrgencyOffset));
// Load the initial bytecode offset. // Load the initial bytecode offset.
__ mov(kInterpreterBytecodeOffsetRegister, __ mov(kInterpreterBytecodeOffsetRegister,
...@@ -3659,14 +3656,11 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, ...@@ -3659,14 +3656,11 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
__ Pop(kInterpreterAccumulatorRegister); __ Pop(kInterpreterAccumulatorRegister);
if (is_osr) { if (is_osr) {
// Reset the OSR loop nesting depth to disarm back edges. // TODO(pthier): Separate baseline Sparkplug from TF arming and don't
// TODO(pthier): Separate baseline Sparkplug from TF arming and don't disarm // disarm Sparkplug here.
// Sparkplug here.
UseScratchRegisterScope temps(masm); UseScratchRegisterScope temps(masm);
Register scratch = temps.Acquire(); ResetBytecodeAgeAndOsrState(masm, kInterpreterBytecodeArrayRegister,
__ mov(scratch, Operand(0)); temps.Acquire());
__ strh(scratch, FieldMemOperand(kInterpreterBytecodeArrayRegister,
BytecodeArray::kOsrUrgencyOffset));
Generate_OSREntry(masm, code_obj, Generate_OSREntry(masm, code_obj,
Operand(Code::kHeaderSize - kHeapObjectTag)); Operand(Code::kHeaderSize - kHeapObjectTag));
} else { } else {
......
...@@ -1248,6 +1248,21 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot( ...@@ -1248,6 +1248,21 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
TailCallOptimizedCodeSlot(masm, optimized_code_entry, x4); TailCallOptimizedCodeSlot(masm, optimized_code_entry, x4);
} }
namespace {
void ResetBytecodeAgeAndOsrState(MacroAssembler* masm,
Register bytecode_array) {
// Reset the bytecode age and OSR state (optimized to a single write).
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
BytecodeArray::kOsrUrgencyAndInstallTargetOffset + kUInt16Size);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ Str(wzr,
FieldMemOperand(bytecode_array,
BytecodeArray::kOsrUrgencyAndInstallTargetOffset));
}
} // namespace
// static // static
void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
UseScratchRegisterScope temps(masm); UseScratchRegisterScope temps(masm);
...@@ -1310,16 +1325,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { ...@@ -1310,16 +1325,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
// the frame, so load it into a register. // the frame, so load it into a register.
Register bytecode_array = descriptor.GetRegisterParameter( Register bytecode_array = descriptor.GetRegisterParameter(
BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray); BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray);
ResetBytecodeAgeAndOsrState(masm, bytecode_array);
// Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset
// are 8-bit fields next to each other, so we could just optimize by writing
// a 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
BytecodeArray::kOsrUrgencyOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ Strh(wzr,
FieldMemOperand(bytecode_array, BytecodeArray::kOsrUrgencyOffset));
__ Push(argc, bytecode_array); __ Push(argc, bytecode_array);
// Baseline code frames store the feedback vector where interpreter would // Baseline code frames store the feedback vector where interpreter would
...@@ -1469,15 +1475,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { ...@@ -1469,15 +1475,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ mov(fp, sp); __ mov(fp, sp);
__ Push(cp, closure); __ Push(cp, closure);
// Reset code age. ResetBytecodeAgeAndOsrState(masm, kInterpreterBytecodeArrayRegister);
// Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset are
// 8-bit fields next to each other, so we could just optimize by writing a
// 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
BytecodeArray::kOsrUrgencyOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ Strh(wzr, FieldMemOperand(kInterpreterBytecodeArrayRegister,
BytecodeArray::kOsrUrgencyOffset));
// Load the initial bytecode offset. // Load the initial bytecode offset.
__ Mov(kInterpreterBytecodeOffsetRegister, __ Mov(kInterpreterBytecodeOffsetRegister,
...@@ -4182,11 +4180,9 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, ...@@ -4182,11 +4180,9 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
__ Pop(kInterpreterAccumulatorRegister, padreg); __ Pop(kInterpreterAccumulatorRegister, padreg);
if (is_osr) { if (is_osr) {
// Reset the OSR loop nesting depth to disarm back edges. // TODO(pthier): Separate baseline Sparkplug from TF arming and don't
// TODO(pthier): Separate baseline Sparkplug from TF arming and don't disarm // disarm Sparkplug here.
// Sparkplug here. ResetBytecodeAgeAndOsrState(masm, kInterpreterBytecodeArrayRegister);
__ Strh(wzr, FieldMemOperand(kInterpreterBytecodeArrayRegister,
BytecodeArray::kOsrUrgencyOffset));
Generate_OSREntry(masm, code_obj, Code::kHeaderSize - kHeapObjectTag); Generate_OSREntry(masm, code_obj, Code::kHeaderSize - kHeapObjectTag);
} else { } else {
__ Add(code_obj, code_obj, Code::kHeaderSize - kHeapObjectTag); __ Add(code_obj, code_obj, Code::kHeaderSize - kHeapObjectTag);
......
...@@ -1036,6 +1036,21 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot( ...@@ -1036,6 +1036,21 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
TailCallOptimizedCodeSlot(masm, optimized_code_entry); TailCallOptimizedCodeSlot(masm, optimized_code_entry);
} }
namespace {
void ResetBytecodeAgeAndOsrState(MacroAssembler* masm,
Register bytecode_array) {
// Reset the bytecode age and OSR state (optimized to a single write).
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
BytecodeArray::kOsrUrgencyAndInstallTargetOffset + kUInt16Size);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ mov_w(FieldOperand(bytecode_array,
BytecodeArray::kOsrUrgencyAndInstallTargetOffset),
Immediate(0));
}
} // namespace
// Generate code for entering a JS function with the interpreter. // Generate code for entering a JS function with the interpreter.
// On entry to the function the receiver and arguments have been pushed on the // On entry to the function the receiver and arguments have been pushed on the
// stack left to right. // stack left to right.
...@@ -1124,15 +1139,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { ...@@ -1124,15 +1139,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry); AbortReason::kFunctionDataShouldBeBytecodeArrayOnInterpreterEntry);
} }
// Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset are ResetBytecodeAgeAndOsrState(masm, kInterpreterBytecodeArrayRegister);
// 8-bit fields next to each other, so we could just optimize by writing a
// 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
BytecodeArray::kOsrUrgencyOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ mov_w(FieldOperand(kInterpreterBytecodeArrayRegister,
BytecodeArray::kOsrUrgencyOffset),
Immediate(0));
// Push bytecode array. // Push bytecode array.
__ push(kInterpreterBytecodeArrayRegister); __ push(kInterpreterBytecodeArrayRegister);
...@@ -1744,14 +1751,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { ...@@ -1744,14 +1751,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
// the frame, so load it into a register. // the frame, so load it into a register.
Register bytecode_array = scratch; Register bytecode_array = scratch;
__ movd(bytecode_array, saved_bytecode_array); __ movd(bytecode_array, saved_bytecode_array);
// Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset ResetBytecodeAgeAndOsrState(masm, bytecode_array);
// are 8-bit fields next to each other, so we could just optimize by writing
// a 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
BytecodeArray::kOsrUrgencyOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ mov_w(FieldOperand(bytecode_array, BytecodeArray::kOsrUrgencyOffset),
Immediate(0));
__ Push(bytecode_array); __ Push(bytecode_array);
// Baseline code frames store the feedback vector where interpreter would // Baseline code frames store the feedback vector where interpreter would
...@@ -4275,12 +4275,9 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, ...@@ -4275,12 +4275,9 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
__ pop(kInterpreterAccumulatorRegister); __ pop(kInterpreterAccumulatorRegister);
if (is_osr) { if (is_osr) {
// Reset the OSR loop nesting depth to disarm back edges. // TODO(pthier): Separate baseline Sparkplug from TF arming and don't
// TODO(pthier): Separate baseline Sparkplug from TF arming and don't disarm // disarm Sparkplug here.
// Sparkplug here. ResetBytecodeAgeAndOsrState(masm, kInterpreterBytecodeArrayRegister);
__ mov_w(FieldOperand(kInterpreterBytecodeArrayRegister,
BytecodeArray::kOsrUrgencyOffset),
Immediate(0));
Generate_OSREntry(masm, code_obj); Generate_OSREntry(masm, code_obj);
} else { } else {
__ jmp(code_obj); __ jmp(code_obj);
......
...@@ -1144,6 +1144,21 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot( ...@@ -1144,6 +1144,21 @@ static void MaybeOptimizeCodeOrTailCallOptimizedCodeSlot(
jump_mode); jump_mode);
} }
namespace {
void ResetBytecodeAgeAndOsrState(MacroAssembler* masm,
Register bytecode_array) {
// Reset the bytecode age and OSR state (optimized to a single write).
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
BytecodeArray::kOsrUrgencyAndInstallTargetOffset + kUInt16Size);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ movl(FieldOperand(bytecode_array,
BytecodeArray::kOsrUrgencyAndInstallTargetOffset),
Immediate(0));
}
} // namespace
// Generate code for entering a JS function with the interpreter. // Generate code for entering a JS function with the interpreter.
// On entry to the function the receiver and arguments have been pushed on the // On entry to the function the receiver and arguments have been pushed on the
// stack left to right. // stack left to right.
...@@ -1219,15 +1234,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) { ...@@ -1219,15 +1234,7 @@ void Builtins::Generate_InterpreterEntryTrampoline(MacroAssembler* masm) {
__ Push(kJavaScriptCallTargetRegister); // Callee's JS function. __ Push(kJavaScriptCallTargetRegister); // Callee's JS function.
__ Push(kJavaScriptCallArgCountRegister); // Actual argument count. __ Push(kJavaScriptCallArgCountRegister); // Actual argument count.
// Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset are ResetBytecodeAgeAndOsrState(masm, kInterpreterBytecodeArrayRegister);
// 8-bit fields next to each other, so we could just optimize by writing a
// 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
BytecodeArray::kOsrUrgencyOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ movw(FieldOperand(kInterpreterBytecodeArrayRegister,
BytecodeArray::kOsrUrgencyOffset),
Immediate(0));
// Load initial bytecode offset. // Load initial bytecode offset.
__ Move(kInterpreterBytecodeOffsetRegister, __ Move(kInterpreterBytecodeOffsetRegister,
...@@ -1733,15 +1740,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) { ...@@ -1733,15 +1740,7 @@ void Builtins::Generate_BaselineOutOfLinePrologue(MacroAssembler* masm) {
// onto the frame, so load it into a register. // onto the frame, so load it into a register.
Register bytecode_array = descriptor.GetRegisterParameter( Register bytecode_array = descriptor.GetRegisterParameter(
BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray); BaselineOutOfLinePrologueDescriptor::kInterpreterBytecodeArray);
ResetBytecodeAgeAndOsrState(masm, bytecode_array);
// Reset code age and the OSR arming. The OSR field and BytecodeAgeOffset
// are 8-bit fields next to each other, so we could just optimize by
// writing a 16-bit. These static asserts guard our assumption is valid.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset ==
BytecodeArray::kOsrUrgencyOffset + kCharSize);
STATIC_ASSERT(BytecodeArray::kNoAgeBytecodeAge == 0);
__ movw(FieldOperand(bytecode_array, BytecodeArray::kOsrUrgencyOffset),
Immediate(0));
__ Push(bytecode_array); __ Push(bytecode_array);
// Baseline code frames store the feedback vector where interpreter would // Baseline code frames store the feedback vector where interpreter would
...@@ -2726,21 +2725,26 @@ void Generate_OSREntry(MacroAssembler* masm, Register entry_address) { ...@@ -2726,21 +2725,26 @@ void Generate_OSREntry(MacroAssembler* masm, Register entry_address) {
__ ret(0); __ ret(0);
} }
void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) { enum class OsrSourceTier {
kInterpreter,
kBaseline,
};
void OnStackReplacement(MacroAssembler* masm, OsrSourceTier source) {
{ {
FrameScope scope(masm, StackFrame::INTERNAL); FrameScope scope(masm, StackFrame::INTERNAL);
__ CallRuntime(Runtime::kCompileOptimizedOSR); __ CallRuntime(Runtime::kCompileOptimizedOSR);
} }
Label skip; Label jump_to_returned_code;
// If the code object is null, just return to the caller. // If the code object is null, just return to the caller.
__ testq(rax, rax); __ testq(rax, rax);
__ j(not_equal, &skip, Label::kNear); __ j(not_equal, &jump_to_returned_code, Label::kNear);
__ ret(0); __ ret(0);
__ bind(&skip); __ bind(&jump_to_returned_code);
if (is_interpreter) { if (source == OsrSourceTier::kInterpreter) {
// Drop the handler frame that is be sitting on top of the actual // Drop the handler frame that is be sitting on top of the actual
// JavaScript frame. This is the case then OSR is triggered from bytecode. // JavaScript frame. This is the case then OSR is triggered from bytecode.
__ leave(); __ leave();
...@@ -2768,13 +2772,13 @@ void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) { ...@@ -2768,13 +2772,13 @@ void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
} // namespace } // namespace
void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) { void Builtins::Generate_InterpreterOnStackReplacement(MacroAssembler* masm) {
return OnStackReplacement(masm, true); OnStackReplacement(masm, OsrSourceTier::kInterpreter);
} }
void Builtins::Generate_BaselineOnStackReplacement(MacroAssembler* masm) { void Builtins::Generate_BaselineOnStackReplacement(MacroAssembler* masm) {
__ movq(kContextRegister, __ movq(kContextRegister,
MemOperand(rbp, BaselineFrameConstants::kContextOffset)); MemOperand(rbp, BaselineFrameConstants::kContextOffset));
return OnStackReplacement(masm, false); OnStackReplacement(masm, OsrSourceTier::kBaseline);
} }
#if V8_ENABLE_WEBASSEMBLY #if V8_ENABLE_WEBASSEMBLY
...@@ -5124,12 +5128,8 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm, ...@@ -5124,12 +5128,8 @@ void Generate_BaselineOrInterpreterEntry(MacroAssembler* masm,
__ popq(kInterpreterAccumulatorRegister); __ popq(kInterpreterAccumulatorRegister);
if (is_osr) { if (is_osr) {
// Reset the OSR loop nesting depth to disarm back edges. // TODO(pthier): Separate Sparkplug and Turbofan OSR states.
// TODO(pthier): Separate baseline Sparkplug from TF arming and don't disarm ResetBytecodeAgeAndOsrState(masm, kInterpreterBytecodeArrayRegister);
// Sparkplug here.
__ movw(FieldOperand(kInterpreterBytecodeArrayRegister,
BytecodeArray::kOsrUrgencyOffset),
Immediate(0));
Generate_OSREntry(masm, code_obj); Generate_OSREntry(masm, code_obj);
} else { } else {
__ jmp(code_obj); __ jmp(code_obj);
......
...@@ -3363,7 +3363,7 @@ MaybeHandle<CodeT> Compiler::CompileOptimizedOSR(Isolate* isolate, ...@@ -3363,7 +3363,7 @@ MaybeHandle<CodeT> Compiler::CompileOptimizedOSR(Isolate* isolate,
// -- Alright, decided to proceed. -- // -- Alright, decided to proceed. --
// Disarm all back edges, i.e. reset the OSR urgency. // Disarm all back edges, i.e. reset the OSR urgency and install target.
// //
// Note that the bytecode array active on the stack might be different from // Note that the bytecode array active on the stack might be different from
// the one installed on the function (e.g. patched by debugger). This however // the one installed on the function (e.g. patched by debugger). This however
...@@ -3371,7 +3371,7 @@ MaybeHandle<CodeT> Compiler::CompileOptimizedOSR(Isolate* isolate, ...@@ -3371,7 +3371,7 @@ MaybeHandle<CodeT> Compiler::CompileOptimizedOSR(Isolate* isolate,
// BytecodeOffset representing the entry point will be valid for any copy of // BytecodeOffset representing the entry point will be valid for any copy of
// the bytecode. // the bytecode.
Handle<BytecodeArray> bytecode(frame->GetBytecodeArray(), isolate); Handle<BytecodeArray> bytecode(frame->GetBytecodeArray(), isolate);
bytecode->reset_osr_urgency(); bytecode->reset_osr_urgency_and_install_target();
CompilerTracer::TraceOptimizeOSR(isolate, function, osr_offset, mode); CompilerTracer::TraceOptimizeOSR(isolate, function, osr_offset, mode);
MaybeHandle<CodeT> result = GetOrCompileOptimized( MaybeHandle<CodeT> result = GetOrCompileOptimized(
...@@ -3435,9 +3435,12 @@ bool Compiler::FinalizeTurbofanCompilationJob(TurbofanCompilationJob* job, ...@@ -3435,9 +3435,12 @@ bool Compiler::FinalizeTurbofanCompilationJob(TurbofanCompilationJob* job,
OptimizedCodeCache::Insert(compilation_info); OptimizedCodeCache::Insert(compilation_info);
CompilerTracer::TraceCompletedJob(isolate, compilation_info); CompilerTracer::TraceCompletedJob(isolate, compilation_info);
if (IsOSR(osr_offset)) { if (IsOSR(osr_offset)) {
// TODO(jgruber): Implement a targeted install request for the if (FLAG_trace_osr) {
// specific osr_offset. PrintF(CodeTracer::Scope{isolate->GetCodeTracer()}.file(),
shared->GetBytecodeArray(isolate).RequestOsrAtNextOpportunity(); "[OSR - requesting install. function: %s, osr offset: %d]\n",
function->DebugNameCStr().get(), osr_offset.ToInt());
}
shared->GetBytecodeArray(isolate).set_osr_install_target(osr_offset);
} else { } else {
function->set_code(*compilation_info->code(), kReleaseStore); function->set_code(*compilation_info->code(), kReleaseStore);
} }
......
...@@ -1086,6 +1086,20 @@ class V8_EXPORT_PRIVATE CodeAssembler { ...@@ -1086,6 +1086,20 @@ class V8_EXPORT_PRIVATE CodeAssembler {
TNode<Word32T> Word32Shr(TNode<Word32T> value, int shift); TNode<Word32T> Word32Shr(TNode<Word32T> value, int shift);
TNode<Word32T> Word32Sar(TNode<Word32T> value, int shift); TNode<Word32T> Word32Sar(TNode<Word32T> value, int shift);
// Convenience overloads.
TNode<Int32T> Int32Sub(TNode<Int32T> left, int right) {
return Int32Sub(left, Int32Constant(right));
}
TNode<Word32T> Word32And(TNode<Word32T> left, int right) {
return Word32And(left, Int32Constant(right));
}
TNode<Int32T> Word32Shl(TNode<Int32T> left, int right) {
return Word32Shl(left, Int32Constant(right));
}
TNode<BoolT> Word32Equal(TNode<Word32T> left, int right) {
return Word32Equal(left, Int32Constant(right));
}
// Unary // Unary
#define DECLARE_CODE_ASSEMBLER_UNARY_OP(name, ResType, ArgType) \ #define DECLARE_CODE_ASSEMBLER_UNARY_OP(name, ResType, ArgType) \
TNode<ResType> name(TNode<ArgType> a); TNode<ResType> name(TNode<ArgType> a);
......
...@@ -247,7 +247,7 @@ Handle<BytecodeArray> FactoryBase<Impl>::NewBytecodeArray( ...@@ -247,7 +247,7 @@ Handle<BytecodeArray> FactoryBase<Impl>::NewBytecodeArray(
instance.set_parameter_count(parameter_count); instance.set_parameter_count(parameter_count);
instance.set_incoming_new_target_or_generator_register( instance.set_incoming_new_target_or_generator_register(
interpreter::Register::invalid_value()); interpreter::Register::invalid_value());
instance.reset_osr_urgency(); instance.reset_osr_urgency_and_install_target();
instance.set_bytecode_age(BytecodeArray::kNoAgeBytecodeAge); instance.set_bytecode_age(BytecodeArray::kNoAgeBytecodeAge);
instance.set_constant_pool(*constant_pool); instance.set_constant_pool(*constant_pool);
instance.set_handler_table(read_only_roots().empty_byte_array(), instance.set_handler_table(read_only_roots().empty_byte_array(),
......
...@@ -1307,16 +1307,18 @@ void InterpreterAssembler::UpdateInterruptBudgetOnReturn() { ...@@ -1307,16 +1307,18 @@ void InterpreterAssembler::UpdateInterruptBudgetOnReturn() {
// length of the back-edge, so we just have to correct for the non-zero offset // length of the back-edge, so we just have to correct for the non-zero offset
// of the first bytecode. // of the first bytecode.
const int kFirstBytecodeOffset = BytecodeArray::kHeaderSize - kHeapObjectTag;
TNode<Int32T> profiling_weight = TNode<Int32T> profiling_weight =
Int32Sub(TruncateIntPtrToInt32(BytecodeOffset()), Int32Sub(TruncateIntPtrToInt32(BytecodeOffset()),
Int32Constant(kFirstBytecodeOffset)); Int32Constant(kFirstBytecodeOffset));
UpdateInterruptBudget(profiling_weight, true); UpdateInterruptBudget(profiling_weight, true);
} }
TNode<Int8T> InterpreterAssembler::LoadOsrUrgency() { TNode<Int16T> InterpreterAssembler::LoadOsrUrgencyAndInstallTarget() {
return LoadObjectField<Int8T>(BytecodeArrayTaggedPointer(), // We're loading a 16-bit field, mask it.
BytecodeArray::kOsrUrgencyOffset); return UncheckedCast<Int16T>(Word32And(
LoadObjectField<Int16T>(BytecodeArrayTaggedPointer(),
BytecodeArray::kOsrUrgencyAndInstallTargetOffset),
0xFFFF));
} }
void InterpreterAssembler::Abort(AbortReason abort_reason) { void InterpreterAssembler::Abort(AbortReason abort_reason) {
......
...@@ -234,8 +234,8 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler { ...@@ -234,8 +234,8 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
// Updates the profiler interrupt budget for a return. // Updates the profiler interrupt budget for a return.
void UpdateInterruptBudgetOnReturn(); void UpdateInterruptBudgetOnReturn();
// Returns the OSR urgency from the bytecode header. // Returns the OSR urgency and install target from the bytecode header.
TNode<Int8T> LoadOsrUrgency(); TNode<Int16T> LoadOsrUrgencyAndInstallTarget();
// Dispatch to the bytecode. // Dispatch to the bytecode.
void Dispatch(); void Dispatch();
...@@ -266,6 +266,12 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler { ...@@ -266,6 +266,12 @@ class V8_EXPORT_PRIVATE InterpreterAssembler : public CodeStubAssembler {
// Perform OnStackReplacement. // Perform OnStackReplacement.
void OnStackReplacement(TNode<Context> context, TNode<IntPtrT> relative_jump); void OnStackReplacement(TNode<Context> context, TNode<IntPtrT> relative_jump);
// The BytecodeOffset() is the offset from the ByteCodeArray pointer; to
// translate into runtime `BytecodeOffset` (defined in utils.h as the offset
// from the start of the bytecode section), this constant has to be applied.
static constexpr int kFirstBytecodeOffset =
BytecodeArray::kHeaderSize - kHeapObjectTag;
// Returns the offset from the BytecodeArrayPointer of the current bytecode. // Returns the offset from the BytecodeArrayPointer of the current bytecode.
TNode<IntPtrT> BytecodeOffset(); TNode<IntPtrT> BytecodeOffset();
......
...@@ -2166,26 +2166,50 @@ IGNITION_HANDLER(JumpIfJSReceiverConstant, InterpreterAssembler) { ...@@ -2166,26 +2166,50 @@ IGNITION_HANDLER(JumpIfJSReceiverConstant, InterpreterAssembler) {
// JumpLoop <imm> <loop_depth> // JumpLoop <imm> <loop_depth>
// //
// Jump by the number of bytes represented by the immediate operand |imm|. Also // Jump by the number of bytes represented by the immediate operand |imm|. Also
// performs a loop nesting check, a stack check, and potentially triggers OSR // performs a loop nesting check, a stack check, and potentially triggers OSR.
// in case `loop_depth < osr_urgency`.
IGNITION_HANDLER(JumpLoop, InterpreterAssembler) { IGNITION_HANDLER(JumpLoop, InterpreterAssembler) {
TNode<IntPtrT> relative_jump = Signed(BytecodeOperandUImmWord(0)); TNode<IntPtrT> relative_jump = Signed(BytecodeOperandUImmWord(0));
TNode<Int32T> loop_depth = BytecodeOperandImm(1); TNode<Int32T> loop_depth = BytecodeOperandImm(1);
TNode<Int8T> osr_urgency = LoadOsrUrgency(); TNode<Int16T> osr_urgency_and_install_target =
LoadOsrUrgencyAndInstallTarget();
TNode<Context> context = GetContext(); TNode<Context> context = GetContext();
// Check if OSR points at the given {loop_depth} are armed by comparing it to // OSR requests can be triggered either through urgency (when > the current
// the current {osr_urgency} loaded from the header of the BytecodeArray. // loop depth), or an explicit install target (= the lower bits of the
Label ok(this), osr_armed(this, Label::kDeferred); // targeted bytecode offset).
TNode<BoolT> condition = Int32GreaterThanOrEqual(loop_depth, osr_urgency); Label ok(this), maybe_osr(this, Label::kDeferred);
Branch(condition, &ok, &osr_armed); Branch(Int32GreaterThanOrEqual(loop_depth, osr_urgency_and_install_target),
&ok, &maybe_osr);
BIND(&ok); BIND(&ok);
// The backward jump can trigger a budget interrupt, which can handle stack // The backward jump can trigger a budget interrupt, which can handle stack
// interrupts, so we don't need to explicitly handle them here. // interrupts, so we don't need to explicitly handle them here.
JumpBackward(relative_jump); JumpBackward(relative_jump);
BIND(&osr_armed); BIND(&maybe_osr);
Label osr(this);
// OSR based on urgency, i.e. is the OSR urgency greater than the current
// loop depth?
STATIC_ASSERT(BytecodeArray::OsrUrgencyBits::kShift == 0);
TNode<Word32T> osr_urgency = Word32And(osr_urgency_and_install_target,
BytecodeArray::OsrUrgencyBits::kMask);
GotoIf(Int32GreaterThan(osr_urgency, loop_depth), &osr);
// OSR based on the install target offset, i.e. does the current bytecode
// offset match the install target offset?
//
// if (((offset << kShift) & kMask) == (target & kMask)) { ... }
static constexpr int kShift = BytecodeArray::OsrInstallTargetBits::kShift;
static constexpr int kMask = BytecodeArray::OsrInstallTargetBits::kMask;
// Note: We OR in 1 to avoid 0 offsets, see Code::OsrInstallTargetFor.
TNode<Word32T> actual = Word32Or(
Int32Sub(TruncateIntPtrToInt32(BytecodeOffset()), kFirstBytecodeOffset),
Int32Constant(1));
actual = Word32And(Word32Shl(UncheckedCast<Int32T>(actual), kShift), kMask);
TNode<Word32T> expected = Word32And(osr_urgency_and_install_target, kMask);
Branch(Word32Equal(actual, expected), &osr, &ok);
BIND(&osr);
OnStackReplacement(context, relative_jump); OnStackReplacement(context, relative_jump);
} }
......
...@@ -1188,13 +1188,14 @@ void BytecodeArray::set_incoming_new_target_or_generator_register( ...@@ -1188,13 +1188,14 @@ void BytecodeArray::set_incoming_new_target_or_generator_register(
} }
int BytecodeArray::osr_urgency() const { int BytecodeArray::osr_urgency() const {
return ACQUIRE_READ_INT8_FIELD(*this, kOsrUrgencyOffset); return OsrUrgencyBits::decode(osr_urgency_and_install_target());
} }
void BytecodeArray::set_osr_urgency(int urgency) { void BytecodeArray::set_osr_urgency(int urgency) {
DCHECK(0 <= urgency && urgency <= BytecodeArray::kMaxOsrUrgency); DCHECK(0 <= urgency && urgency <= BytecodeArray::kMaxOsrUrgency);
STATIC_ASSERT(BytecodeArray::kMaxOsrUrgency < kMaxInt8); STATIC_ASSERT(BytecodeArray::kMaxOsrUrgency <= OsrUrgencyBits::kMax);
RELEASE_WRITE_INT8_FIELD(*this, kOsrUrgencyOffset, urgency); uint32_t value = osr_urgency_and_install_target();
set_osr_urgency_and_install_target(OsrUrgencyBits::update(value, urgency));
} }
BytecodeArray::Age BytecodeArray::bytecode_age() const { BytecodeArray::Age BytecodeArray::bytecode_age() const {
...@@ -1208,6 +1209,27 @@ void BytecodeArray::RequestOsrAtNextOpportunity() { ...@@ -1208,6 +1209,27 @@ void BytecodeArray::RequestOsrAtNextOpportunity() {
set_osr_urgency(kMaxOsrUrgency); set_osr_urgency(kMaxOsrUrgency);
} }
int BytecodeArray::osr_install_target() {
return OsrInstallTargetBits::decode(osr_urgency_and_install_target());
}
void BytecodeArray::set_osr_install_target(BytecodeOffset jump_loop_offset) {
DCHECK_LE(jump_loop_offset.ToInt(), length());
set_osr_urgency_and_install_target(OsrInstallTargetBits::update(
osr_urgency_and_install_target(), OsrInstallTargetFor(jump_loop_offset)));
}
void BytecodeArray::reset_osr_install_target() {
uint32_t value = osr_urgency_and_install_target();
set_osr_urgency_and_install_target(
OsrInstallTargetBits::update(value, kNoOsrInstallTarget));
}
void BytecodeArray::reset_osr_urgency_and_install_target() {
set_osr_urgency_and_install_target(OsrUrgencyBits::encode(0) |
OsrInstallTargetBits::encode(0));
}
void BytecodeArray::set_bytecode_age(BytecodeArray::Age age) { void BytecodeArray::set_bytecode_age(BytecodeArray::Age age) {
DCHECK_GE(age, kFirstBytecodeAge); DCHECK_GE(age, kFirstBytecodeAge);
DCHECK_LE(age, kLastBytecodeAge); DCHECK_LE(age, kLastBytecodeAge);
......
...@@ -952,6 +952,8 @@ DEFINE_OPERATORS_FOR_FLAGS(DependentCode::DependencyGroups) ...@@ -952,6 +952,8 @@ DEFINE_OPERATORS_FOR_FLAGS(DependentCode::DependencyGroups)
class BytecodeArray class BytecodeArray
: public TorqueGeneratedBytecodeArray<BytecodeArray, FixedArrayBase> { : public TorqueGeneratedBytecodeArray<BytecodeArray, FixedArrayBase> {
public: public:
DEFINE_TORQUE_GENERATED_OSRURGENCY_AND_INSTALL_TARGET()
enum Age { enum Age {
kNoAgeBytecodeAge = 0, kNoAgeBytecodeAge = 0,
kQuadragenarianBytecodeAge, kQuadragenarianBytecodeAge,
...@@ -994,11 +996,32 @@ class BytecodeArray ...@@ -994,11 +996,32 @@ class BytecodeArray
// the function becomes hotter. When the current loop depth is less than the // the function becomes hotter. When the current loop depth is less than the
// osr_urgency, JumpLoop calls into runtime to attempt OSR optimization. // osr_urgency, JumpLoop calls into runtime to attempt OSR optimization.
static constexpr int kMaxOsrUrgency = 6; static constexpr int kMaxOsrUrgency = 6;
STATIC_ASSERT(kMaxOsrUrgency <= OsrUrgencyBits::kMax);
inline int osr_urgency() const; inline int osr_urgency() const;
inline void set_osr_urgency(int urgency); inline void set_osr_urgency(int urgency);
inline void reset_osr_urgency(); inline void reset_osr_urgency();
inline void RequestOsrAtNextOpportunity(); inline void RequestOsrAtNextOpportunity();
// The [osr_install_target] is used upon finishing concurrent OSR
// compilation; instead of bumping the osr_urgency (which would target all
// JumpLoops of appropriate loop_depth), we target a specific JumpLoop at the
// given bytecode offset.
static constexpr int kNoOsrInstallTarget = 0;
static constexpr int OsrInstallTargetFor(BytecodeOffset offset) {
// Any set `osr_install_target` must be non-zero since zero is the 'unset'
// value and is ignored by generated code. For branchless code (both here
// and in generated code), we simply OR in a 1.
STATIC_ASSERT(kNoOsrInstallTarget == 0);
return (offset.ToInt() | 1) &
(OsrInstallTargetBits::kMask >> OsrInstallTargetBits::kShift);
}
inline int osr_install_target();
inline void set_osr_install_target(BytecodeOffset jump_loop_offset);
inline void reset_osr_install_target();
inline void reset_osr_urgency_and_install_target();
inline Age bytecode_age() const; inline Age bytecode_age() const;
inline void set_bytecode_age(Age age); inline void set_bytecode_age(Age age);
...@@ -1017,8 +1040,6 @@ class BytecodeArray ...@@ -1017,8 +1040,6 @@ class BytecodeArray
inline int BytecodeArraySize(); inline int BytecodeArraySize();
inline int raw_instruction_size();
// Returns the size of bytecode and its metadata. This includes the size of // Returns the size of bytecode and its metadata. This includes the size of
// bytecode, constant pool, source position table, and handler table. // bytecode, constant pool, source position table, and handler table.
inline int SizeIncludingMetadata(); inline int SizeIncludingMetadata();
...@@ -1039,9 +1060,9 @@ class BytecodeArray ...@@ -1039,9 +1060,9 @@ class BytecodeArray
inline void clear_padding(); inline void clear_padding();
// InterpreterEntryTrampoline expects these fields to be next to each other // InterpreterEntryTrampoline expects these fields to be next to each other
// and writes a 16-bit value to reset them. // and writes a 32-bit value to reset them.
STATIC_ASSERT(BytecodeArray::kBytecodeAgeOffset == STATIC_ASSERT(kBytecodeAgeOffset ==
kOsrUrgencyOffset + kCharSize); kOsrUrgencyAndInstallTargetOffset + kUInt16Size);
// Maximal memory consumption for a single BytecodeArray. // Maximal memory consumption for a single BytecodeArray.
static const int kMaxSize = 512 * MB; static const int kMaxSize = 512 * MB;
......
...@@ -4,6 +4,14 @@ ...@@ -4,6 +4,14 @@
type DependentCode extends WeakFixedArray; type DependentCode extends WeakFixedArray;
bitfield struct OSRUrgencyAndInstallTarget extends uint16 {
// The layout is chosen s.t. urgency and the install target offset can be
// loaded with a single 16-bit load (i.e. no masking required).
osr_urgency: uint32: 3 bit;
// The 13 LSB of the install target bytecode offset.
osr_install_target: uint32: 13 bit;
}
extern class BytecodeArray extends FixedArrayBase { extern class BytecodeArray extends FixedArrayBase {
// TODO(v8:8983): bytecode array object sizes vary based on their contents. // TODO(v8:8983): bytecode array object sizes vary based on their contents.
constant_pool: FixedArray; constant_pool: FixedArray;
...@@ -22,10 +30,8 @@ extern class BytecodeArray extends FixedArrayBase { ...@@ -22,10 +30,8 @@ extern class BytecodeArray extends FixedArrayBase {
frame_size: int32; frame_size: int32;
parameter_size: int32; parameter_size: int32;
incoming_new_target_or_generator_register: int32; incoming_new_target_or_generator_register: int32;
// TODO(jgruber): We only use 3 bits for the urgency; consider folding osr_urgency_and_install_target: OSRUrgencyAndInstallTarget;
// into other fields. bytecode_age: uint16; // Only 3 bits used.
osr_urgency: int8;
bytecode_age: int8;
} }
extern class CodeDataContainer extends HeapObject; extern class CodeDataContainer extends HeapObject;
...@@ -491,7 +491,7 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os, FeedbackSlot); ...@@ -491,7 +491,7 @@ V8_EXPORT_PRIVATE std::ostream& operator<<(std::ostream& os, FeedbackSlot);
class BytecodeOffset { class BytecodeOffset {
public: public:
explicit constexpr BytecodeOffset(int id) : id_(id) {} explicit constexpr BytecodeOffset(int id) : id_(id) {}
int ToInt() const { return id_; } constexpr int ToInt() const { return id_; }
static constexpr BytecodeOffset None() { return BytecodeOffset(kNoneId); } static constexpr BytecodeOffset None() { return BytecodeOffset(kNoneId); }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment