Commit 151ecb57 authored by Yahan Lu's avatar Yahan Lu Committed by Commit Bot

[riscv64]Implement pc-relative builtin-to-builtin calls

Port pc-relative builtin-to-builtin calls.

Port: ccc068d5
Change-Id: I1d11dd1e77ca578f7714864e4e090493fa8bca0a
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2814722
Commit-Queue: Yahan Lu <yahan@iscas.ac.cn>
Reviewed-by: 's avatarJakob Gruber <jgruber@chromium.org>
Reviewed-by: 's avatarBrice Dobry <brice.dobry@futurewei.com>
Cr-Commit-Position: refs/heads/master@{#73894}
parent 1c6d5369
...@@ -83,6 +83,8 @@ MemOperand BaselineAssembler::FeedbackVectorOperand() { ...@@ -83,6 +83,8 @@ MemOperand BaselineAssembler::FeedbackVectorOperand() {
void BaselineAssembler::Bind(Label* label) { __ bind(label); } void BaselineAssembler::Bind(Label* label) { __ bind(label); }
void BaselineAssembler::BindWithoutJumpTarget(Label* label) { __ bind(label); }
void BaselineAssembler::JumpTarget() { void BaselineAssembler::JumpTarget() {
// Nop // Nop
} }
......
...@@ -1770,6 +1770,16 @@ void Builtins::Generate_TailCallOptimizedCodeSlot(MacroAssembler* masm) { ...@@ -1770,6 +1770,16 @@ void Builtins::Generate_TailCallOptimizedCodeSlot(MacroAssembler* masm) {
TailCallOptimizedCodeSlot(masm, optimized_code_entry, t4, t0); TailCallOptimizedCodeSlot(masm, optimized_code_entry, t4, t0);
} }
namespace { namespace {
void Generate_OSREntry(MacroAssembler* masm, Register entry_address,
Operand offset = Operand(int64_t(0))) {
// Pop the return address to this function's caller from the return stack
// buffer, since we'll never return to it.
__ Add64(ra, entry_address, offset);
// And "return" to the OSR entry point of the function.
__ Ret();
}
void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) { void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
{ {
FrameScope scope(masm, StackFrame::INTERNAL); FrameScope scope(masm, StackFrame::INTERNAL);
...@@ -1796,9 +1806,7 @@ void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) { ...@@ -1796,9 +1806,7 @@ void OnStackReplacement(MacroAssembler* masm, bool is_interpreter) {
// Compute the target address = code_obj + header_size + osr_offset // Compute the target address = code_obj + header_size + osr_offset
// <entry_addr> = <code_obj> + #header_size + <osr_offset> // <entry_addr> = <code_obj> + #header_size + <osr_offset>
__ Add64(a0, a0, a1); __ Add64(a0, a0, a1);
__ Add64(ra, a0, Code::kHeaderSize - kHeapObjectTag); Generate_OSREntry(masm, a0, Operand(Code::kHeaderSize - kHeapObjectTag));
// And "return" to the OSR entry point of the function.
__ Ret();
} }
} // namespace } // namespace
...@@ -3489,15 +3497,16 @@ void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) { ...@@ -3489,15 +3497,16 @@ void Builtins::Generate_DeoptimizationEntry_Lazy(MacroAssembler* masm) {
Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy); Generate_DeoptimizationEntry(masm, DeoptimizeKind::kLazy);
} }
namespace {
// Converts an interpreter frame into a baseline frame and continues execution // Converts an interpreter frame into a baseline frame and continues execution
// in baseline code (baseline code has to exist on the shared function info), // in baseline code (baseline code has to exist on the shared function info),
// either at the start or the end of the current bytecode. // either at the start or the end of the current bytecode.
void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode) { void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode,
// Get bytecode array and bytecode offset from the stack frame. bool is_osr = false) {
__ Ld(kInterpreterBytecodeArrayRegister, __ Push(zero_reg, kInterpreterAccumulatorRegister);
MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp)); Label start;
__ SmiUntag(kInterpreterBytecodeOffsetRegister, __ bind(&start);
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
// Get function from the frame. // Get function from the frame.
Register closure = a1; Register closure = a1;
...@@ -3508,12 +3517,15 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode) { ...@@ -3508,12 +3517,15 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode) {
__ Ld(feedback_vector, __ Ld(feedback_vector,
FieldMemOperand(closure, JSFunction::kFeedbackCellOffset)); FieldMemOperand(closure, JSFunction::kFeedbackCellOffset));
__ Ld(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset)); __ Ld(feedback_vector, FieldMemOperand(feedback_vector, Cell::kValueOffset));
if (__ emit_debug_code()) { Label install_baseline_code;
Register scratch = t0; // Check if feedback vector is valid. If not, call prepare for baseline to
__ GetObjectType(feedback_vector, scratch, scratch); // allocate it.
__ Assert(eq, AbortReason::kExpectedFeedbackVector, scratch, __ GetObjectType(feedback_vector, t0, t0);
Operand(FEEDBACK_VECTOR_TYPE)); __ Branch(&install_baseline_code, eq, t0, Operand(FEEDBACK_VECTOR_TYPE));
} // Save BytecodeOffset from the stack frame.
__ SmiUntag(kInterpreterBytecodeOffsetRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
// Replace BytecodeOffset with the feedback vector.
__ Sd(feedback_vector, __ Sd(feedback_vector,
MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp)); MemOperand(fp, InterpreterFrameConstants::kBytecodeOffsetFromFp));
feedback_vector = no_reg; feedback_vector = no_reg;
...@@ -3526,14 +3538,17 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode) { ...@@ -3526,14 +3538,17 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode) {
__ Ld(code_obj, __ Ld(code_obj,
FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset)); FieldMemOperand(code_obj, SharedFunctionInfo::kFunctionDataOffset));
__ Ld(code_obj, FieldMemOperand(code_obj, BaselineData::kBaselineCodeOffset)); __ Ld(code_obj, FieldMemOperand(code_obj, BaselineData::kBaselineCodeOffset));
closure = no_reg;
// Compute baseline pc for bytecode offset. // Compute baseline pc for bytecode offset.
__ Push(zero_reg, kInterpreterAccumulatorRegister); __ Push(zero_reg, kInterpreterAccumulatorRegister);
ExternalReference get_baseline_pc_extref = ExternalReference get_baseline_pc_extref;
next_bytecode if (next_bytecode || is_osr) {
? ExternalReference::baseline_end_pc_for_bytecode_offset() get_baseline_pc_extref =
: ExternalReference::baseline_start_pc_for_bytecode_offset(); ExternalReference::baseline_pc_for_next_executed_bytecode();
} else {
get_baseline_pc_extref =
ExternalReference::baseline_pc_for_bytecode_offset();
}
Register get_baseline_pc = a3; Register get_baseline_pc = a3;
__ li(get_baseline_pc, get_baseline_pc_extref); __ li(get_baseline_pc, get_baseline_pc_extref);
...@@ -3544,41 +3559,71 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode) { ...@@ -3544,41 +3559,71 @@ void Generate_BaselineEntry(MacroAssembler* masm, bool next_bytecode) {
// TODO(pthier): Investigate if it is feasible to handle this special case // TODO(pthier): Investigate if it is feasible to handle this special case
// in TurboFan instead of here. // in TurboFan instead of here.
Label valid_bytecode_offset, function_entry_bytecode; Label valid_bytecode_offset, function_entry_bytecode;
__ Branch(&function_entry_bytecode, eq, kInterpreterBytecodeOffsetRegister, if (!is_osr) {
Operand(BytecodeArray::kHeaderSize - kHeapObjectTag + __ Branch(&function_entry_bytecode, eq, kInterpreterBytecodeOffsetRegister,
kFunctionEntryBytecodeOffset)); Operand(BytecodeArray::kHeaderSize - kHeapObjectTag +
kFunctionEntryBytecodeOffset));
}
__ Sub64(kInterpreterBytecodeOffsetRegister, __ Sub64(kInterpreterBytecodeOffsetRegister,
kInterpreterBytecodeOffsetRegister, kInterpreterBytecodeOffsetRegister,
(BytecodeArray::kHeaderSize - kHeapObjectTag)); (BytecodeArray::kHeaderSize - kHeapObjectTag));
__ bind(&valid_bytecode_offset); __ bind(&valid_bytecode_offset);
// Get bytecode array from the stack frame.
__ Ld(kInterpreterBytecodeArrayRegister,
MemOperand(fp, InterpreterFrameConstants::kBytecodeArrayFromFp));
{ {
FrameScope scope(masm, StackFrame::INTERNAL);
Register arg_reg_1 = a0; Register arg_reg_1 = a0;
Register arg_reg_2 = a1; Register arg_reg_2 = a1;
Register arg_reg_3 = a2; Register arg_reg_3 = a2;
__ Move(arg_reg_1, code_obj); __ Move(arg_reg_1, code_obj);
__ Move(arg_reg_2, kInterpreterBytecodeOffsetRegister); __ Move(arg_reg_2, kInterpreterBytecodeOffsetRegister);
__ Move(arg_reg_3, kInterpreterBytecodeArrayRegister); __ Move(arg_reg_3, kInterpreterBytecodeArrayRegister);
FrameScope scope(masm, StackFrame::INTERNAL);
__ CallCFunction(get_baseline_pc, 3, 0); __ CallCFunction(get_baseline_pc, 3, 0);
} }
__ Add64(code_obj, code_obj, Code::kHeaderSize - kHeapObjectTag);
__ Add64(code_obj, code_obj, kReturnRegister0); __ Add64(code_obj, code_obj, kReturnRegister0);
__ Pop(kInterpreterAccumulatorRegister, zero_reg); __ Pop(kInterpreterAccumulatorRegister, zero_reg);
__ Jump(code_obj); if (is_osr) {
// Reset the OSR loop nesting depth to disarm back edges.
// TODO(pthier): Separate baseline Sparkplug from TF arming and don't disarm
// Sparkplug here.
__ Sd(zero_reg, FieldMemOperand(kInterpreterBytecodeArrayRegister,
BytecodeArray::kOsrNestingLevelOffset));
Generate_OSREntry(masm, code_obj,
Operand(Code::kHeaderSize - kHeapObjectTag));
} else {
__ Add64(code_obj, code_obj, Code::kHeaderSize - kHeapObjectTag);
__ Jump(code_obj);
}
__ Trap(); // Unreachable. __ Trap(); // Unreachable.
__ bind(&function_entry_bytecode); if (!is_osr) {
// If the bytecode offset is kFunctionEntryOffset, get the start address of __ bind(&function_entry_bytecode);
// the first bytecode. // If the bytecode offset is kFunctionEntryOffset, get the start address of
__ li(kInterpreterBytecodeOffsetRegister, Operand(int64_t(0))); // the first bytecode.
__ li(get_baseline_pc, __ li(kInterpreterBytecodeOffsetRegister, Operand(int64_t(0)));
ExternalReference::baseline_start_pc_for_bytecode_offset()); if (next_bytecode) {
__ Branch(&valid_bytecode_offset); __ li(get_baseline_pc,
ExternalReference::baseline_pc_for_bytecode_offset());
}
__ Branch(&valid_bytecode_offset);
}
__ bind(&install_baseline_code);
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ Push(closure);
__ CallRuntime(Runtime::kInstallBaselineCode, 1);
}
// Retry from the start after installing baseline code.
__ Branch(&start);
} }
} // namespace
void Builtins::Generate_BaselineEnterAtBytecode(MacroAssembler* masm) { void Builtins::Generate_BaselineEnterAtBytecode(MacroAssembler* masm) {
Generate_BaselineEntry(masm, false); Generate_BaselineEntry(masm, false);
} }
...@@ -3587,6 +3632,11 @@ void Builtins::Generate_BaselineEnterAtNextBytecode(MacroAssembler* masm) { ...@@ -3587,6 +3632,11 @@ void Builtins::Generate_BaselineEnterAtNextBytecode(MacroAssembler* masm) {
Generate_BaselineEntry(masm, true); Generate_BaselineEntry(masm, true);
} }
void Builtins::Generate_InterpreterOnStackReplacement_ToBaseline(
MacroAssembler* masm) {
Generate_BaselineEntry(masm, false, true);
}
void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) { void Builtins::Generate_DynamicCheckMapsTrampoline(MacroAssembler* masm) {
FrameScope scope(masm, StackFrame::MANUAL); FrameScope scope(masm, StackFrame::MANUAL);
__ EnterFrame(StackFrame::INTERNAL); __ EnterFrame(StackFrame::INTERNAL);
......
...@@ -63,11 +63,15 @@ void RelocInfo::apply(intptr_t delta) { ...@@ -63,11 +63,15 @@ void RelocInfo::apply(intptr_t delta) {
if (IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_)) { if (IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_)) {
// Absolute code pointer inside code object moves with the code object. // Absolute code pointer inside code object moves with the code object.
Assembler::RelocateInternalReference(rmode_, pc_, delta); Assembler::RelocateInternalReference(rmode_, pc_, delta);
} else {
DCHECK(IsRelativeCodeTarget(rmode_));
Assembler::RelocateRelativeReference(rmode_, pc_, delta);
} }
} }
Address RelocInfo::target_address() { Address RelocInfo::target_address() {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_)); DCHECK(IsCodeTargetMode(rmode_) || IsRuntimeEntry(rmode_) ||
IsWasmCall(rmode_));
return Assembler::target_address_at(pc_, constant_pool_); return Assembler::target_address_at(pc_, constant_pool_);
} }
...@@ -133,9 +137,13 @@ HeapObject RelocInfo::target_object_no_host(Isolate* isolate) { ...@@ -133,9 +137,13 @@ HeapObject RelocInfo::target_object_no_host(Isolate* isolate) {
} }
Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) { Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
DCHECK(IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_)); if (IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_)) {
return Handle<HeapObject>(reinterpret_cast<Address*>( return Handle<HeapObject>(reinterpret_cast<Address*>(
Assembler::target_address_at(pc_, constant_pool_))); Assembler::target_address_at(pc_, constant_pool_)));
} else {
DCHECK(IsRelativeCodeTarget(rmode_));
return origin->relative_code_target_object_handle_at(pc_);
}
} }
void RelocInfo::set_target_object(Heap* heap, HeapObject target, void RelocInfo::set_target_object(Heap* heap, HeapObject target,
...@@ -163,11 +171,11 @@ void RelocInfo::set_target_external_reference( ...@@ -163,11 +171,11 @@ void RelocInfo::set_target_external_reference(
} }
Address RelocInfo::target_internal_reference() { Address RelocInfo::target_internal_reference() {
if (rmode_ == INTERNAL_REFERENCE) { if (IsInternalReference(rmode_)) {
return Memory<Address>(pc_); return Memory<Address>(pc_);
} else { } else {
// Encoded internal references are j/jal instructions. // Encoded internal references are j/jal instructions.
DCHECK(rmode_ == INTERNAL_REFERENCE_ENCODED); DCHECK(IsInternalReferenceEncoded(rmode_));
DCHECK(Assembler::IsLui(Assembler::instr_at(pc_ + 0 * kInstrSize))); DCHECK(Assembler::IsLui(Assembler::instr_at(pc_ + 0 * kInstrSize)));
Address address = Assembler::target_address_at(pc_); Address address = Assembler::target_address_at(pc_);
return address; return address;
...@@ -175,10 +183,20 @@ Address RelocInfo::target_internal_reference() { ...@@ -175,10 +183,20 @@ Address RelocInfo::target_internal_reference() {
} }
Address RelocInfo::target_internal_reference_address() { Address RelocInfo::target_internal_reference_address() {
DCHECK(rmode_ == INTERNAL_REFERENCE || rmode_ == INTERNAL_REFERENCE_ENCODED); DCHECK(IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_));
return pc_; return pc_;
} }
Handle<Code> Assembler::relative_code_target_object_handle_at(
Address pc) const {
Instr instr1 = Assembler::instr_at(pc);
Instr instr2 = Assembler::instr_at(pc + kInstrSize);
DCHECK(IsAuipc(instr1));
DCHECK(IsJalr(instr2));
int32_t code_target_index = BrachlongOffset(instr1, instr2);
return GetCodeTarget(code_target_index);
}
Address RelocInfo::target_runtime_entry(Assembler* origin) { Address RelocInfo::target_runtime_entry(Assembler* origin) {
DCHECK(IsRuntimeEntry(rmode_)); DCHECK(IsRuntimeEntry(rmode_));
return target_address(); return target_address();
......
...@@ -128,7 +128,8 @@ Register ToRegister(int num) { ...@@ -128,7 +128,8 @@ Register ToRegister(int num) {
const int RelocInfo::kApplyMask = const int RelocInfo::kApplyMask =
RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) | RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE) |
RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED); RelocInfo::ModeMask(RelocInfo::INTERNAL_REFERENCE_ENCODED) |
RelocInfo::ModeMask(RelocInfo::RELATIVE_CODE_TARGET);
bool RelocInfo::IsCodedSpecially() { bool RelocInfo::IsCodedSpecially() {
// The deserializer needs to know whether a pointer is specially coded. Being // The deserializer needs to know whether a pointer is specially coded. Being
...@@ -454,6 +455,16 @@ static inline Instr SetAuipcOffset(int32_t offset, Instr instr) { ...@@ -454,6 +455,16 @@ static inline Instr SetAuipcOffset(int32_t offset, Instr instr) {
return instr; return instr;
} }
static inline Instr SetJalrOffset(int32_t offset, Instr instr) {
DCHECK(Assembler::IsJalr(instr));
DCHECK(is_int12(offset));
instr &= ~kImm12Mask;
int32_t imm12 = offset << kImm12Shift;
DCHECK(Assembler::IsJalr(instr | (imm12 & kImm12Mask)));
DCHECK(Assembler::JalrOffset(instr | (imm12 & kImm12Mask)) == offset);
return instr | (imm12 & kImm12Mask);
}
static inline Instr SetJalOffset(int32_t pos, int32_t target_pos, Instr instr) { static inline Instr SetJalOffset(int32_t pos, int32_t target_pos, Instr instr) {
DCHECK(Assembler::IsJal(instr)); DCHECK(Assembler::IsJal(instr));
int32_t imm = target_pos - pos; int32_t imm = target_pos - pos;
...@@ -689,17 +700,36 @@ int Assembler::CJumpOffset(Instr instr) { ...@@ -689,17 +700,36 @@ int Assembler::CJumpOffset(Instr instr) {
int Assembler::BrachlongOffset(Instr auipc, Instr instr_I) { int Assembler::BrachlongOffset(Instr auipc, Instr instr_I) {
DCHECK(reinterpret_cast<Instruction*>(&instr_I)->InstructionType() == DCHECK(reinterpret_cast<Instruction*>(&instr_I)->InstructionType() ==
InstructionBase::kIType); InstructionBase::kIType);
const int kImm19_0Mask = ((1 << 20) - 1); DCHECK(IsAuipc(auipc));
int32_t imm_auipc = auipc & (kImm19_0Mask << 12); int32_t imm_auipc = AuipcOffset(auipc);
int32_t imm_12 = instr_I >> 20; int32_t imm12 = (instr_I & kImm12Mask) >> 20;
int32_t offset = imm_12 + imm_auipc; int32_t offset = imm12 + imm_auipc;
return offset; return offset;
} }
int Assembler::PatchBranchlongOffset(Address pc, Instr instr_auipc,
Instr instr_jalr, int32_t offset) {
DCHECK(IsAuipc(instr_auipc));
DCHECK(IsJalr(instr_jalr));
int32_t Hi20 = (((int32_t)offset + 0x800) >> 12);
int32_t Lo12 = (int32_t)offset << 20 >> 20;
CHECK(is_int32(offset));
instr_at_put(pc, SetAuipcOffset(Hi20, instr_auipc));
instr_at_put(pc + 4, SetJalrOffset(Lo12, instr_jalr));
DCHECK(offset ==
BrachlongOffset(Assembler::instr_at(pc), Assembler::instr_at(pc + 4)));
return 2;
}
int Assembler::LdOffset(Instr instr) { int Assembler::LdOffset(Instr instr) {
DCHECK(IsLd(instr)); DCHECK(IsLd(instr));
int32_t imm12 = (instr & kImm12Mask) >> 20; int32_t imm12 = (instr & kImm12Mask) >> 20;
imm12 = imm12 << 12 >> 12; return imm12;
}
int Assembler::JalrOffset(Instr instr) {
DCHECK(IsJalr(instr));
int32_t imm12 = (instr & kImm12Mask) >> 20;
return imm12; return imm12;
} }
...@@ -2596,6 +2626,22 @@ int Assembler::RelocateInternalReference(RelocInfo::Mode rmode, Address pc, ...@@ -2596,6 +2626,22 @@ int Assembler::RelocateInternalReference(RelocInfo::Mode rmode, Address pc,
} }
} }
void Assembler::RelocateRelativeReference(RelocInfo::Mode rmode, Address pc,
intptr_t pc_delta) {
Instr instr = instr_at(pc);
Instr instr1 = instr_at(pc + 1 * kInstrSize);
DCHECK(RelocInfo::IsRelativeCodeTarget(rmode));
if (IsAuipc(instr) && IsJalr(instr1)) {
int32_t imm;
imm = BrachlongOffset(instr, instr1);
imm -= pc_delta;
PatchBranchlongOffset(pc, instr, instr1, imm);
return;
} else {
UNREACHABLE();
}
}
void Assembler::GrowBuffer() { void Assembler::GrowBuffer() {
DEBUG_PRINTF("GrowBuffer: %p -> ", buffer_start_); DEBUG_PRINTF("GrowBuffer: %p -> ", buffer_start_);
// Compute new buffer size. // Compute new buffer size.
...@@ -2766,12 +2812,23 @@ void Assembler::set_target_address_at(Address pc, Address constant_pool, ...@@ -2766,12 +2812,23 @@ void Assembler::set_target_address_at(Address pc, Address constant_pool,
ICacheFlushMode icache_flush_mode) { ICacheFlushMode icache_flush_mode) {
Instr* instr = reinterpret_cast<Instr*>(pc); Instr* instr = reinterpret_cast<Instr*>(pc);
if (IsAuipc(*instr)) { if (IsAuipc(*instr)) {
DCHECK(IsLd(*reinterpret_cast<Instr*>(pc + 4))); if (IsLd(*reinterpret_cast<Instr*>(pc + 4))) {
int32_t Hi20 = AuipcOffset(*instr); int32_t Hi20 = AuipcOffset(*instr);
int32_t Lo12 = LdOffset(*reinterpret_cast<Instr*>(pc + 4)); int32_t Lo12 = LdOffset(*reinterpret_cast<Instr*>(pc + 4));
Memory<Address>(pc + Hi20 + Lo12) = target; Memory<Address>(pc + Hi20 + Lo12) = target;
if (icache_flush_mode != SKIP_ICACHE_FLUSH) { if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
FlushInstructionCache(pc + Hi20 + Lo12, 2 * kInstrSize); FlushInstructionCache(pc + Hi20 + Lo12, 2 * kInstrSize);
}
} else {
DCHECK(IsJalr(*reinterpret_cast<Instr*>(pc + 4)));
int64_t imm = (int64_t)target - (int64_t)pc;
Instr instr = instr_at(pc);
Instr instr1 = instr_at(pc + 1 * kInstrSize);
DCHECK(is_int32(imm));
int num = PatchBranchlongOffset(pc, instr, instr1, (int32_t)imm);
if (icache_flush_mode != SKIP_ICACHE_FLUSH) {
FlushInstructionCache(pc, num * kInstrSize);
}
} }
} else { } else {
set_target_address_at(pc, target, icache_flush_mode); set_target_address_at(pc, target, icache_flush_mode);
...@@ -2781,10 +2838,17 @@ void Assembler::set_target_address_at(Address pc, Address constant_pool, ...@@ -2781,10 +2838,17 @@ void Assembler::set_target_address_at(Address pc, Address constant_pool,
Address Assembler::target_address_at(Address pc, Address constant_pool) { Address Assembler::target_address_at(Address pc, Address constant_pool) {
Instr* instr = reinterpret_cast<Instr*>(pc); Instr* instr = reinterpret_cast<Instr*>(pc);
if (IsAuipc(*instr)) { if (IsAuipc(*instr)) {
DCHECK(IsLd(*reinterpret_cast<Instr*>(pc + 4))); if (IsLd(*reinterpret_cast<Instr*>(pc + 4))) {
int32_t Hi20 = AuipcOffset(*instr); int32_t Hi20 = AuipcOffset(*instr);
int32_t Lo12 = LdOffset(*reinterpret_cast<Instr*>(pc + 4)); int32_t Lo12 = LdOffset(*reinterpret_cast<Instr*>(pc + 4));
return Memory<Address>(pc + Hi20 + Lo12); return Memory<Address>(pc + Hi20 + Lo12);
} else {
DCHECK(IsJalr(*reinterpret_cast<Instr*>(pc + 4)));
int32_t Hi20 = AuipcOffset(*instr);
int32_t Lo12 = JalrOffset(*reinterpret_cast<Instr*>(pc + 4));
return pc + Hi20 + Lo12;
}
} else { } else {
return target_address_at(pc); return target_address_at(pc);
} }
......
...@@ -208,11 +208,14 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { ...@@ -208,11 +208,14 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Get offset from instr. // Get offset from instr.
int BranchOffset(Instr instr); int BranchOffset(Instr instr);
int BrachlongOffset(Instr auipc, Instr jalr); static int BrachlongOffset(Instr auipc, Instr jalr);
static int PatchBranchlongOffset(Address pc, Instr auipc, Instr instr_I,
int32_t offset);
int JumpOffset(Instr instr); int JumpOffset(Instr instr);
int CJumpOffset(Instr instr); int CJumpOffset(Instr instr);
static int LdOffset(Instr instr); static int LdOffset(Instr instr);
static int AuipcOffset(Instr instr); static int AuipcOffset(Instr instr);
static int JalrOffset(Instr instr);
// Returns the branch offset to the given label from the current code // Returns the branch offset to the given label from the current code
// position. Links the label to the current position if it is still unbound. // position. Links the label to the current position if it is still unbound.
...@@ -800,6 +803,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { ...@@ -800,6 +803,8 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
static int RelocateInternalReference(RelocInfo::Mode rmode, Address pc, static int RelocateInternalReference(RelocInfo::Mode rmode, Address pc,
intptr_t pc_delta); intptr_t pc_delta);
static void RelocateRelativeReference(RelocInfo::Mode rmode, Address pc,
intptr_t pc_delta);
// Writes a single byte or word of data in the code stream. Used for // Writes a single byte or word of data in the code stream. Used for
// inline tables, e.g., jump-tables. // inline tables, e.g., jump-tables.
...@@ -862,6 +867,10 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase { ...@@ -862,6 +867,10 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
static bool IsLd(Instr instr); static bool IsLd(Instr instr);
void CheckTrampolinePool(); void CheckTrampolinePool();
// Get the code target object for a pc-relative call or jump.
V8_INLINE Handle<Code> relative_code_target_object_handle_at(
Address pc_) const;
inline int UnboundLabelsCount() { return unbound_labels_count_; } inline int UnboundLabelsCount() { return unbound_labels_count_; }
protected: protected:
......
...@@ -56,7 +56,7 @@ namespace v8 { ...@@ -56,7 +56,7 @@ namespace v8 {
namespace internal { namespace internal {
// TODO(sigurds): Change this value once we use relative jumps. // TODO(sigurds): Change this value once we use relative jumps.
constexpr size_t kMaxPCRelativeCodeRangeInMB = 0; constexpr size_t kMaxPCRelativeCodeRangeInMB = 4096;
// ----------------------------------------------------------------------------- // -----------------------------------------------------------------------------
// Registers and FPURegisters. // Registers and FPURegisters.
......
...@@ -2952,9 +2952,20 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode, ...@@ -2952,9 +2952,20 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
bool target_is_isolate_independent_builtin = bool target_is_isolate_independent_builtin =
isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) && isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
Builtins::IsIsolateIndependent(builtin_index); Builtins::IsIsolateIndependent(builtin_index);
if (target_is_isolate_independent_builtin &&
if (root_array_available_ && options().isolate_independent_code && options().use_pc_relative_calls_and_jumps) {
target_is_isolate_independent_builtin) { int32_t code_target_index = AddCodeTarget(code);
Label skip;
BlockTrampolinePoolScope block_trampoline_pool(this);
if (cond != al) {
Branch(&skip, NegateCondition(cond), rs, rt);
}
RecordRelocInfo(RelocInfo::RELATIVE_CODE_TARGET);
GenPCRelativeJump(t6, code_target_index);
bind(&skip);
return;
} else if (root_array_available_ && options().isolate_independent_code &&
target_is_isolate_independent_builtin) {
int offset = code->builtin_index() * kSystemPointerSize + int offset = code->builtin_index() * kSystemPointerSize +
IsolateData::builtin_entry_table_offset(); IsolateData::builtin_entry_table_offset();
Ld(t6, MemOperand(kRootRegister, offset)); Ld(t6, MemOperand(kRootRegister, offset));
...@@ -3020,8 +3031,22 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode, ...@@ -3020,8 +3031,22 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
bool target_is_isolate_independent_builtin = bool target_is_isolate_independent_builtin =
isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) && isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
Builtins::IsIsolateIndependent(builtin_index); Builtins::IsIsolateIndependent(builtin_index);
if (root_array_available_ && options().isolate_independent_code && if (target_is_isolate_independent_builtin &&
target_is_isolate_independent_builtin) { options().use_pc_relative_calls_and_jumps) {
int32_t code_target_index = AddCodeTarget(code);
Label skip;
BlockTrampolinePoolScope block_trampoline_pool(this);
RecordCommentForOffHeapTrampoline(builtin_index);
if (cond != al) {
Branch(&skip, NegateCondition(cond), rs, rt);
}
RecordRelocInfo(RelocInfo::RELATIVE_CODE_TARGET);
GenPCRelativeJumpAndLink(t6, code_target_index);
bind(&skip);
RecordComment("]");
return;
} else if (root_array_available_ && options().isolate_independent_code &&
target_is_isolate_independent_builtin) {
int offset = code->builtin_index() * kSystemPointerSize + int offset = code->builtin_index() * kSystemPointerSize +
IsolateData::builtin_entry_table_offset(); IsolateData::builtin_entry_table_offset();
LoadRootRelative(t6, offset); LoadRootRelative(t6, offset);
...@@ -3158,16 +3183,28 @@ void TurboAssembler::Ret(Condition cond, Register rs, const Operand& rt) { ...@@ -3158,16 +3183,28 @@ void TurboAssembler::Ret(Condition cond, Register rs, const Operand& rt) {
} }
} }
void TurboAssembler::GenPCRelativeJump(Register rd, int64_t imm32) {
DCHECK(is_int32(imm32));
int32_t Hi20 = (((int32_t)imm32 + 0x800) >> 12);
int32_t Lo12 = (int32_t)imm32 << 20 >> 20;
auipc(rd, Hi20); // Read PC + Hi20 into scratch.
jr(rd, Lo12); // jump PC + Hi20 + Lo12
}
void TurboAssembler::GenPCRelativeJumpAndLink(Register rd, int64_t imm32) {
DCHECK(is_int32(imm32));
int32_t Hi20 = (((int32_t)imm32 + 0x800) >> 12);
int32_t Lo12 = (int32_t)imm32 << 20 >> 20;
auipc(rd, Hi20); // Read PC + Hi20 into scratch.
jalr(rd, Lo12); // jump PC + Hi20 + Lo12
}
void TurboAssembler::BranchLong(Label* L) { void TurboAssembler::BranchLong(Label* L) {
// Generate position independent long branch. // Generate position independent long branch.
BlockTrampolinePoolScope block_trampoline_pool(this); BlockTrampolinePoolScope block_trampoline_pool(this);
int64_t imm64; int64_t imm64;
imm64 = branch_long_offset(L); imm64 = branch_long_offset(L);
DCHECK(is_int32(imm64)); GenPCRelativeJump(t6, imm64);
int32_t Hi20 = (((int32_t)imm64 + 0x800) >> 12);
int32_t Lo12 = (int32_t)imm64 << 20 >> 20;
auipc(t6, Hi20); // Read PC + Hi20 into scratch.
jr(t6, Lo12); // jump PC + Hi20 + Lo12
EmitConstPoolWithJumpIfNeeded(); EmitConstPoolWithJumpIfNeeded();
} }
...@@ -3176,11 +3213,7 @@ void TurboAssembler::BranchAndLinkLong(Label* L) { ...@@ -3176,11 +3213,7 @@ void TurboAssembler::BranchAndLinkLong(Label* L) {
BlockTrampolinePoolScope block_trampoline_pool(this); BlockTrampolinePoolScope block_trampoline_pool(this);
int64_t imm64; int64_t imm64;
imm64 = branch_long_offset(L); imm64 = branch_long_offset(L);
DCHECK(is_int32(imm64)); GenPCRelativeJumpAndLink(t6, imm64);
int32_t Hi20 = (((int32_t)imm64 + 0x800) >> 12);
int32_t Lo12 = (int32_t)imm64 << 20 >> 20;
auipc(t6, Hi20); // Read PC + Hi20 into scratch.
jalr(t6, Lo12); // jump PC + Hi20 + Lo12 and read PC + 4 to ra
} }
void TurboAssembler::DropAndRet(int drop) { void TurboAssembler::DropAndRet(int drop) {
......
...@@ -200,6 +200,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase { ...@@ -200,6 +200,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void LoadRootRegisterOffset(Register destination, intptr_t offset) override; void LoadRootRegisterOffset(Register destination, intptr_t offset) override;
void LoadRootRelative(Register destination, int32_t offset) override; void LoadRootRelative(Register destination, int32_t offset) override;
inline void GenPCRelativeJump(Register rd, int64_t imm32);
inline void GenPCRelativeJumpAndLink(Register rd, int64_t imm32);
// Jump, Call, and Ret pseudo instructions implementing inter-working. // Jump, Call, and Ret pseudo instructions implementing inter-working.
#define COND_ARGS \ #define COND_ARGS \
Condition cond = al, Register rs = zero_reg, \ Condition cond = al, Register rs = zero_reg, \
......
...@@ -1108,7 +1108,7 @@ void DebugEvaluate::VerifyTransitiveBuiltins(Isolate* isolate) { ...@@ -1108,7 +1108,7 @@ void DebugEvaluate::VerifyTransitiveBuiltins(Isolate* isolate) {
} }
CHECK(!failed); CHECK(!failed);
#if defined(V8_TARGET_ARCH_PPC) || defined(V8_TARGET_ARCH_PPC64) || \ #if defined(V8_TARGET_ARCH_PPC) || defined(V8_TARGET_ARCH_PPC64) || \
defined(V8_TARGET_ARCH_MIPS64) || defined(V8_TARGET_ARCH_RISCV64) defined(V8_TARGET_ARCH_MIPS64)
// Isolate-independent builtin calls and jumps do not emit reloc infos // Isolate-independent builtin calls and jumps do not emit reloc infos
// on PPC. We try to avoid using PC relative code due to performance // on PPC. We try to avoid using PC relative code due to performance
// issue with especially older hardwares. // issue with especially older hardwares.
......
...@@ -122,6 +122,8 @@ class Decoder { ...@@ -122,6 +122,8 @@ class Decoder {
// Printing of instruction name. // Printing of instruction name.
void PrintInstructionName(Instruction* instr); void PrintInstructionName(Instruction* instr);
void PrintTarget(Instruction* instr);
// Handle formatting of instructions and their options. // Handle formatting of instructions and their options.
int FormatRegister(Instruction* instr, const char* option); int FormatRegister(Instruction* instr, const char* option);
int FormatFPURegisterOrRoundMode(Instruction* instr, const char* option); int FormatFPURegisterOrRoundMode(Instruction* instr, const char* option);
...@@ -213,6 +215,21 @@ void Decoder::PrintImm12(Instruction* instr) { ...@@ -213,6 +215,21 @@ void Decoder::PrintImm12(Instruction* instr) {
out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm); out_buffer_pos_ += SNPrintF(out_buffer_ + out_buffer_pos_, "%d", imm);
} }
void Decoder::PrintTarget(Instruction* instr) {
if (Assembler::IsJalr(instr->InstructionBits())) {
if (Assembler::IsAuipc((instr - 4)->InstructionBits()) &&
(instr - 4)->RdValue() == instr->Rs1Value()) {
int32_t imm = Assembler::BrachlongOffset((instr - 4)->InstructionBits(),
instr->InstructionBits());
const char* target =
converter_.NameOfAddress(reinterpret_cast<byte*>(instr - 4) + imm);
out_buffer_pos_ +=
SNPrintF(out_buffer_ + out_buffer_pos_, " -> %s", target);
return;
}
}
}
void Decoder::PrintBranchOffset(Instruction* instr) { void Decoder::PrintBranchOffset(Instruction* instr) {
int32_t imm = instr->BranchOffset(); int32_t imm = instr->BranchOffset();
const char* target = const char* target =
...@@ -699,6 +716,11 @@ int Decoder::FormatOption(Instruction* instr, const char* format) { ...@@ -699,6 +716,11 @@ int Decoder::FormatOption(Instruction* instr, const char* format) {
PrintVs1(instr); PrintVs1(instr);
return 3; return 3;
} }
case 't': { // 'target: target of branch instructions'
DCHECK(STRING_STARTS_WITH(format, "target"));
PrintTarget(instr);
return 6;
}
} }
UNREACHABLE(); UNREACHABLE();
} }
...@@ -1280,7 +1302,7 @@ void Decoder::DecodeIType(Instruction* instr) { ...@@ -1280,7 +1302,7 @@ void Decoder::DecodeIType(Instruction* instr) {
else if (instr->RdValue() == ra.code() && instr->Imm12Value() == 0) else if (instr->RdValue() == ra.code() && instr->Imm12Value() == 0)
Format(instr, "jalr 'rs1"); Format(instr, "jalr 'rs1");
else else
Format(instr, "jalr 'rd, 'imm12('rs1)"); Format(instr, "jalr 'rd, 'imm12('rs1)'target");
break; break;
case RO_LB: case RO_LB:
Format(instr, "lb 'rd, 'imm12('rs1)"); Format(instr, "lb 'rd, 'imm12('rs1)");
......
...@@ -299,11 +299,12 @@ bool Code::IsIsolateIndependent(Isolate* isolate) { ...@@ -299,11 +299,12 @@ bool Code::IsIsolateIndependent(Isolate* isolate) {
RelocInfo::ModeMask(RelocInfo::WASM_STUB_CALL))); RelocInfo::ModeMask(RelocInfo::WASM_STUB_CALL)));
#if defined(V8_TARGET_ARCH_PPC) || defined(V8_TARGET_ARCH_PPC64) || \ #if defined(V8_TARGET_ARCH_PPC) || defined(V8_TARGET_ARCH_PPC64) || \
defined(V8_TARGET_ARCH_MIPS64) || defined(V8_TARGET_ARCH_RISCV64) defined(V8_TARGET_ARCH_MIPS64)
return RelocIterator(*this, kModeMask).done(); return RelocIterator(*this, kModeMask).done();
#elif defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_ARM64) || \ #elif defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_ARM64) || \
defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS) || \ defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS) || \
defined(V8_TARGET_ARCH_S390) || defined(V8_TARGET_ARCH_IA32) defined(V8_TARGET_ARCH_S390) || defined(V8_TARGET_ARCH_IA32) || \
defined(V8_TARGET_ARCH_RISCV64)
for (RelocIterator it(*this, kModeMask); !it.done(); it.next()) { for (RelocIterator it(*this, kModeMask); !it.done(); it.next()) {
// On these platforms we emit relative builtin-to-builtin // On these platforms we emit relative builtin-to-builtin
// jumps for isolate independent builtins in the snapshot. They are later // jumps for isolate independent builtins in the snapshot. They are later
......
...@@ -216,7 +216,8 @@ void FinalizeEmbeddedCodeTargets(Isolate* isolate, EmbeddedData* blob) { ...@@ -216,7 +216,8 @@ void FinalizeEmbeddedCodeTargets(Isolate* isolate, EmbeddedData* blob) {
#if defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_ARM64) || \ #if defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_ARM64) || \
defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS) || \ defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS) || \
defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_S390) defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_S390) || \
defined(V8_TARGET_ARCH_RISCV64)
// On these platforms we emit relative builtin-to-builtin // On these platforms we emit relative builtin-to-builtin
// jumps for isolate independent builtins in the snapshot. This fixes up the // jumps for isolate independent builtins in the snapshot. This fixes up the
// relative jumps to the right offsets in the snapshot. // relative jumps to the right offsets in the snapshot.
......
...@@ -844,12 +844,10 @@ ...@@ -844,12 +844,10 @@
'regress/wasm/regress-1187831': [SKIP], 'regress/wasm/regress-1187831': [SKIP],
'regress/regress-1172797': [SKIP], 'regress/regress-1172797': [SKIP],
'regress/wasm/regress-1179025': [SKIP], 'regress/wasm/regress-1179025': [SKIP],
'wasm/simd-errors': [SKIP],
'wasm/simd-globals': [SKIP],
'wasm/multi-value-simd': [SKIP], 'wasm/multi-value-simd': [SKIP],
'wasm/simd-call': [SKIP],
'wasm/liftoff-simd-params': [SKIP], 'wasm/liftoff-simd-params': [SKIP],
'wasm/exceptions-simd': [SKIP], 'wasm/exceptions-simd': [SKIP],
'wasm/simd-*': [SKIP],
}], # 'arch == riscv64' }], # 'arch == riscv64'
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment