Commit a1974d49 authored by Predrag Rudic's avatar Predrag Rudic Committed by Commit Bot

MIPS: Port [turbofan] Use relative calls/jumps on arm for builtins

Port commit 23dbb81d.

Original CL message:

>This CL uses pc-relative jumps and calls (B/BL) for calls from embedded
>builtins to embedded builtins. To make this work, the code range size is
>limited to 32MB on arm during mksnapshot, which ensures that all builtin
>to builtin offsets for jumps/calls fit into the B/BL immediate. At code
>generation time, we put a placeholder into the instruction offset which
>we resolve to the right code object when the code is copied to the heap.
>We use a new relocation mode RELATIVE_CODE_TARGET for these relative jumps.
>The relocation mode RELATIVE_CODE_TARGET should never appear after
>generating the snapshot.
>
>We modify the target_address/set_target_address methods of RelocInfo
>such that they return the absolute target addresses for pc-relative B/BL
>instructions. This ensures that the GC can treat RELATIVE_CODE_TARGET in
>the same way as code targets. This, however, only matters during
>snapshot creation time, and production code never contains
>RELATIVE_CODE_TARGET relocations.
>
>Bug: v8:6666
>Cq-Include-Trybots: luci.chromium.try:linux_chromium_rel_ng
>Change-Id: If7eab83ad588859ca87c654a5ddc3e37caea884c
>Reviewed-on: https://chromium-review.googlesource.com/1117181
>Reviewed-by: Michael Starzinger <mstarzinger@chromium.org>
>Reviewed-by: Ulan Degenbaev <ulan@chromium.org>
>Reviewed-by: Jakob Gruber <jgruber@chromium.org>
>Commit-Queue: Sigurd Schneider <sigurds@chromium.org>
>Cr-Commit-Position: refs/heads/master@{#54320}

This CL is completed Miran's draft without MIPS64 port.
(https://chromium-review.googlesource.com/c/v8/v8/+/1136640/3).

Change-Id: I979378ac445548641755968d890f7f4a82dc7986
Reviewed-on: https://chromium-review.googlesource.com/c/1221313
Commit-Queue: Ivica Bogosavljevic <ibogosavljevic@wavecomp.com>
Reviewed-by: 's avatarJakob Gruber <jgruber@chromium.org>
Reviewed-by: 's avatarIvica Bogosavljevic <ibogosavljevic@wavecomp.com>
Reviewed-by: 's avatarSigurd Schneider <sigurds@chromium.org>
Cr-Commit-Position: refs/heads/master@{#56640}
parent fefd9230
......@@ -70,12 +70,15 @@ void RelocInfo::apply(intptr_t delta) {
if (IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_)) {
// Absolute code pointer inside code object moves with the code object.
Assembler::RelocateInternalReference(rmode_, pc_, delta);
} else if (IsRelativeCodeTarget(rmode_)) {
Assembler::RelocateRelativeReference(rmode_, pc_, delta);
}
}
Address RelocInfo::target_address() {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_));
DCHECK(IsCodeTargetMode(rmode_) || IsRuntimeEntry(rmode_) ||
IsWasmCall(rmode_));
return Assembler::target_address_at(pc_, constant_pool_);
}
......@@ -158,9 +161,8 @@ void Assembler::set_target_internal_reference_encoded_at(Address pc,
Assembler::instr_at_put(pc + 1 * kInstrSize, instr2 | jic_offset_u);
} else {
// Encoded internal references are lui/ori load of 32-bit absolute address.
Assembler::instr_at_put(pc + 0 * kInstrSize,
instr1 | ((imm >> kLuiShift) & kImm16Mask));
Assembler::instr_at_put(pc + 1 * kInstrSize, instr2 | (imm & kImm16Mask));
PatchLuiOriImmediate(pc, imm, instr1, 0 * kInstrSize, instr2,
1 * kInstrSize);
}
// Currently used only by deserializer, and all code will be flushed
......@@ -169,31 +171,34 @@ void Assembler::set_target_internal_reference_encoded_at(Address pc,
void Assembler::deserialization_set_target_internal_reference_at(
Address pc, Address target, RelocInfo::Mode mode) {
if (mode == RelocInfo::INTERNAL_REFERENCE_ENCODED) {
if (RelocInfo::IsInternalReferenceEncoded(mode)) {
DCHECK(IsLui(instr_at(pc)));
set_target_internal_reference_encoded_at(pc, target);
} else {
DCHECK(mode == RelocInfo::INTERNAL_REFERENCE);
DCHECK(RelocInfo::IsInternalReference(mode));
Memory<Address>(pc) = target;
}
}
HeapObject* RelocInfo::target_object() {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
DCHECK(IsCodeTarget(rmode_) || IsEmbeddedObject(rmode_));
return HeapObject::cast(reinterpret_cast<Object*>(
Assembler::target_address_at(pc_, constant_pool_)));
}
Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
return Handle<HeapObject>(reinterpret_cast<HeapObject**>(
Assembler::target_address_at(pc_, constant_pool_)));
if (IsCodeTarget(rmode_) || IsEmbeddedObject(rmode_)) {
return Handle<HeapObject>(reinterpret_cast<HeapObject**>(
Assembler::target_address_at(pc_, constant_pool_)));
}
DCHECK(IsRelativeCodeTarget(rmode_));
return origin->relative_code_target_object_handle_at(pc_);
}
void RelocInfo::set_target_object(Heap* heap, HeapObject* target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || rmode_ == EMBEDDED_OBJECT);
DCHECK(IsCodeTarget(rmode_) || IsEmbeddedObject(rmode_));
Assembler::set_target_address_at(pc_, constant_pool_,
reinterpret_cast<Address>(target),
icache_flush_mode);
......@@ -204,24 +209,24 @@ void RelocInfo::set_target_object(Heap* heap, HeapObject* target,
Address RelocInfo::target_external_reference() {
DCHECK(rmode_ == EXTERNAL_REFERENCE);
DCHECK(IsExternalReference(rmode_));
return Assembler::target_address_at(pc_, constant_pool_);
}
void RelocInfo::set_target_external_reference(
Address target, ICacheFlushMode icache_flush_mode) {
DCHECK(rmode_ == RelocInfo::EXTERNAL_REFERENCE);
DCHECK(IsExternalReference(rmode_));
Assembler::set_target_address_at(pc_, constant_pool_, target,
icache_flush_mode);
}
Address RelocInfo::target_internal_reference() {
if (rmode_ == INTERNAL_REFERENCE) {
if (IsInternalReference(rmode_)) {
return Memory<Address>(pc_);
} else {
// Encoded internal references are lui/ori or lui/jic load of 32-bit
// absolute address.
DCHECK(rmode_ == INTERNAL_REFERENCE_ENCODED);
DCHECK(IsInternalReferenceEncoded(rmode_));
Instr instr1 = Assembler::instr_at(pc_ + 0 * kInstrSize);
Instr instr2 = Assembler::instr_at(pc_ + 1 * kInstrSize);
DCHECK(Assembler::IsLui(instr1));
......@@ -230,15 +235,13 @@ Address RelocInfo::target_internal_reference() {
return static_cast<Address>(
Assembler::CreateTargetAddress(instr1, instr2));
}
int32_t imm = (instr1 & static_cast<int32_t>(kImm16Mask)) << kLuiShift;
imm |= (instr2 & static_cast<int32_t>(kImm16Mask));
return static_cast<Address>(imm);
return static_cast<Address>(Assembler::GetLuiOriImmediate(instr1, instr2));
}
}
Address RelocInfo::target_internal_reference_address() {
DCHECK(rmode_ == INTERNAL_REFERENCE || rmode_ == INTERNAL_REFERENCE_ENCODED);
DCHECK(IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_));
return pc_;
}
......@@ -274,21 +277,34 @@ void RelocInfo::WipeOut() {
}
}
Handle<Code> Assembler::relative_code_target_object_handle_at(
Address pc) const {
Instr instr1 = instr_at(pc);
Instr instr2 = instr_at(pc + kInstrSize);
if (IsNal(instr2)) {
instr2 = instr_at(pc + 2 * kInstrSize);
}
// Interpret 2 instructions generated by li (lui/ori).
DCHECK(IsLui(instr1));
DCHECK(IsOri(instr2));
int code_target_index = GetLuiOriImmediate(instr1, instr2);
return GetCodeTarget(code_target_index);
}
template <typename ObjectVisitor>
void RelocInfo::Visit(ObjectVisitor* visitor) {
RelocInfo::Mode mode = rmode();
if (mode == RelocInfo::EMBEDDED_OBJECT) {
Mode mode = rmode();
if (IsEmbeddedObject(mode)) {
visitor->VisitEmbeddedPointer(host(), this);
} else if (RelocInfo::IsCodeTargetMode(mode)) {
} else if (IsCodeTargetMode(mode)) {
visitor->VisitCodeTarget(host(), this);
} else if (mode == RelocInfo::EXTERNAL_REFERENCE) {
} else if (IsExternalReference(mode)) {
visitor->VisitExternalReference(host(), this);
} else if (mode == RelocInfo::INTERNAL_REFERENCE ||
mode == RelocInfo::INTERNAL_REFERENCE_ENCODED) {
} else if (IsInternalReference(mode) || IsInternalReferenceEncoded(mode)) {
visitor->VisitInternalReference(host(), this);
} else if (RelocInfo::IsRuntimeEntry(mode)) {
} else if (IsRuntimeEntry(mode)) {
visitor->VisitRuntimeEntry(host(), this);
} else if (RelocInfo::IsOffHeapTarget(mode)) {
} else if (IsOffHeapTarget(mode)) {
visitor->VisitOffHeapTarget(host(), this);
}
}
......
This diff is collapsed.
......@@ -380,6 +380,9 @@ constexpr MSAControlRegister no_msacreg = {kInvalidMSAControlRegister};
constexpr MSAControlRegister MSAIR = {kMSAIRRegister};
constexpr MSAControlRegister MSACSR = {kMSACSRRegister};
// Allow programmer to use Branch Delay Slot of Branches, Jumps, Calls.
enum BranchDelaySlot { USE_DELAY_SLOT, PROTECT };
// -----------------------------------------------------------------------------
// Machine instruction Operands.
......@@ -612,14 +615,22 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Difference between address of current opcode and target address offset,
// when we are generatinga sequence of instructions for long relative PC
// branches
// branches. It is distance between address of the first instruction in
// the jump sequence, and the value that ra gets after calling nal().
static constexpr int kLongBranchPCOffset = 3 * kInstrSize;
// Adjust ra register in branch delay slot of bal instruction so to skip
// Adjust ra register in branch delay slot of bal instruction in order to skip
// instructions not needed after optimization of PIC in
// TurboAssembler::BranchAndLink method.
static constexpr int kOptimizedBranchAndLinkLongReturnOffset = 3 * kInstrSize;
static constexpr int kOptimizedBranchAndLinkLongReturnOffset = 4 * kInstrSize;
// Offset of target relative address in calls/jumps for builtins. It is
// distance between instruction that is placed just after calling
// RecordRelocInfo, and the value that ra gets aftr calling nal().
static constexpr int kRelativeJumpForBuiltinsOffset = 1 * kInstrSize;
// Relative target address of jumps for builtins when we use lui, ori, dsll,
// ori sequence when loading address that cannot fit into 32 bits.
static constexpr int kRelativeCallForBuiltinsOffset = 3 * kInstrSize;
// Here we are patching the address in the LUI/ORI instruction pair.
// These values are used in the serialization process and must be zero for
......@@ -644,10 +655,6 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
static constexpr int kCallTargetAddressOffset = 4 * kInstrSize;
#endif
// Difference between address of current opcode and value read from pc
// register.
static constexpr int kPcLoadDelta = 4;
// Max offset for instructions with 16-bit offset field
static constexpr int kMaxBranchOffset = (1 << (18 - 1)) - 1;
......@@ -1733,6 +1740,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
static int RelocateInternalReference(RelocInfo::Mode rmode, Address pc,
intptr_t pc_delta);
static void RelocateRelativeReference(RelocInfo::Mode rmode, Address pc,
intptr_t pc_delta);
// Writes a single byte or word of data in the code stream. Used for
// inline tables, e.g., jump-tables.
void db(uint8_t data);
......@@ -1782,6 +1792,7 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
static bool IsJ(Instr instr);
static bool IsLui(Instr instr);
static bool IsOri(Instr instr);
static bool IsAddu(Instr instr, Register rd, Register rs, Register rt);
static bool IsJal(Instr instr);
static bool IsJr(Instr instr);
......@@ -1848,6 +1859,10 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
return IsMipsArchVariant(kMips32r6);
}
// Get the code target object for a pc-relative call or jump.
V8_INLINE Handle<Code> relative_code_target_object_handle_at(
Address pc_) const;
inline int UnboundLabelsCount() { return unbound_labels_count_; }
protected:
......@@ -1881,6 +1896,9 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
// Record reloc info for current pc_.
void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
// Read 32-bit immediate from lui, ori pair that is used to load immediate.
static int32_t GetLuiOriImmediate(Instr instr1, Instr instr2);
// Block the emission of the trampoline pool before pc_offset.
void BlockTrampolinePoolBefore(int pc_offset) {
if (no_trampoline_pool_before_ < pc_offset)
......@@ -1941,6 +1959,12 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
RegList scratch_register_list_;
// Generate common instruction sequence.
void GenPCRelativeJump(Register tf, Register ts, int32_t imm32,
RelocInfo::Mode rmode, BranchDelaySlot bdslot);
void GenPCRelativeJumpAndLink(Register t, int32_t imm32,
RelocInfo::Mode rmode, BranchDelaySlot bdslot);
private:
// Avoid overflows for displacements etc.
static const int kMaximalBufferSize = 512 * MB;
......@@ -2129,6 +2153,14 @@ class V8_EXPORT_PRIVATE Assembler : public AssemblerBase {
void bind_to(Label* L, int pos);
void next(Label* L, bool is_internal);
// Patching lui/ori pair which is commonly used for loading constants.
static void PatchLuiOriImmediate(Address pc, int32_t imm, Instr instr1,
Address offset_lui, Instr instr2,
Address offset_ori);
void PatchLuiOriImmediate(int pc, int32_t imm, Instr instr1,
Address offset_lui, Instr instr2,
Address offset_ori);
// One trampoline consists of:
// - space for trampoline slots,
// - space for labels.
......
......@@ -145,8 +145,7 @@ const uint32_t kLeastSignificantByteInInt32Offset = 3;
namespace v8 {
namespace internal {
// TODO(sigurds): Change this value once we use relative jumps.
constexpr size_t kMaxPCRelativeCodeRangeInMB = 0;
constexpr size_t kMaxPCRelativeCodeRangeInMB = 4096;
// -----------------------------------------------------------------------------
// Registers and FPURegisters.
......
......@@ -1748,7 +1748,11 @@ void Decoder::DecodeTypeImmediate(Instruction* instr) {
Format(instr, "bltz 'rs, 'imm16u -> 'imm16p4s2");
break;
case BLTZAL:
Format(instr, "bltzal 'rs, 'imm16u -> 'imm16p4s2");
if (instr->RsValue() == 0) {
Format(instr, "nal");
} else {
Format(instr, "bltzal 'rs, 'imm16u -> 'imm16p4s2");
}
break;
case BGEZ:
Format(instr, "bgez 'rs, 'imm16u -> 'imm16p4s2");
......
......@@ -3794,23 +3794,38 @@ void TurboAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
DCHECK(RelocInfo::IsCodeTarget(rmode));
BlockTrampolinePoolScope block_trampoline_pool(this);
if (FLAG_embedded_builtins) {
if (root_array_available_ && options().isolate_independent_code) {
int builtin_index = Builtins::kNoBuiltinId;
bool target_is_isolate_independent_builtin =
isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
Builtins::IsIsolateIndependent(builtin_index);
if (target_is_isolate_independent_builtin &&
options().use_pc_relative_calls_and_jumps) {
int32_t code_target_index = AddCodeTarget(code);
Label skip;
BlockTrampolinePoolScope block_trampoline_pool(this);
if (cond != cc_always) {
// By using delay slot, we always execute first instruction of
// GenPcRelativeJump (which is or_(t8, ra, zero_reg)).
Branch(USE_DELAY_SLOT, &skip, NegateCondition(cond), rs, rt);
}
GenPCRelativeJump(t8, t9, code_target_index,
RelocInfo::RELATIVE_CODE_TARGET, bd);
bind(&skip);
return;
} else if (root_array_available_ && options().isolate_independent_code) {
IndirectLoadConstant(t9, code);
Jump(t9, Code::kHeaderSize - kHeapObjectTag, cond, rs, rt, bd);
return;
} else if (options().inline_offheap_trampolines) {
int builtin_index = Builtins::kNoBuiltinId;
if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
Builtins::IsIsolateIndependent(builtin_index)) {
// Inline the trampoline.
RecordCommentForOffHeapTrampoline(builtin_index);
CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
EmbeddedData d = EmbeddedData::FromBlob();
Address entry = d.InstructionStartOfBuiltin(builtin_index);
li(t9, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
Jump(t9, 0, cond, rs, rt, bd);
return;
}
} else if (target_is_isolate_independent_builtin &&
options().inline_offheap_trampolines) {
// Inline the trampoline.
RecordCommentForOffHeapTrampoline(builtin_index);
CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
EmbeddedData d = EmbeddedData::FromBlob();
Address entry = d.InstructionStartOfBuiltin(builtin_index);
li(t9, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
Jump(t9, 0, cond, rs, rt, bd);
return;
}
}
Jump(static_cast<intptr_t>(code.address()), rmode, cond, rs, rt, bd);
......@@ -3901,23 +3916,36 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
BranchDelaySlot bd) {
BlockTrampolinePoolScope block_trampoline_pool(this);
if (FLAG_embedded_builtins) {
if (root_array_available_ && options().isolate_independent_code) {
int builtin_index = Builtins::kNoBuiltinId;
bool target_is_isolate_independent_builtin =
isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
Builtins::IsIsolateIndependent(builtin_index);
if (target_is_isolate_independent_builtin &&
options().use_pc_relative_calls_and_jumps) {
int32_t code_target_index = AddCodeTarget(code);
Label skip;
BlockTrampolinePoolScope block_trampoline_pool(this);
if (cond != cc_always) {
Branch(PROTECT, &skip, NegateCondition(cond), rs, rt);
}
GenPCRelativeJumpAndLink(t8, code_target_index,
RelocInfo::RELATIVE_CODE_TARGET, bd);
bind(&skip);
return;
} else if (root_array_available_ && options().isolate_independent_code) {
IndirectLoadConstant(t9, code);
Call(t9, Code::kHeaderSize - kHeapObjectTag, cond, rs, rt, bd);
return;
} else if (options().inline_offheap_trampolines) {
int builtin_index = Builtins::kNoBuiltinId;
if (isolate()->builtins()->IsBuiltinHandle(code, &builtin_index) &&
Builtins::IsIsolateIndependent(builtin_index)) {
// Inline the trampoline.
RecordCommentForOffHeapTrampoline(builtin_index);
CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
EmbeddedData d = EmbeddedData::FromBlob();
Address entry = d.InstructionStartOfBuiltin(builtin_index);
li(t9, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
Call(t9, 0, cond, rs, rt, bd);
return;
}
} else if (target_is_isolate_independent_builtin &&
options().inline_offheap_trampolines) {
// Inline the trampoline.
RecordCommentForOffHeapTrampoline(builtin_index);
CHECK_NE(builtin_index, Builtins::kNoBuiltinId);
EmbeddedData d = EmbeddedData::FromBlob();
Address entry = d.InstructionStartOfBuiltin(builtin_index);
li(t9, Operand(entry, RelocInfo::OFF_HEAP_TARGET));
Call(t9, 0, cond, rs, rt, bd);
return;
}
}
DCHECK(RelocInfo::IsCodeTarget(rmode));
......@@ -3939,17 +3967,7 @@ void TurboAssembler::BranchLong(Label* L, BranchDelaySlot bdslot) {
BlockTrampolinePoolScope block_trampoline_pool(this);
int32_t imm32;
imm32 = branch_long_offset(L);
or_(t8, ra, zero_reg);
nal(); // Read PC into ra register.
lui(t9, (imm32 & kHiMask) >> kLuiShift); // Branch delay slot.
ori(t9, t9, (imm32 & kImm16Mask));
addu(t9, ra, t9);
if (bdslot == USE_DELAY_SLOT) {
or_(ra, t8, zero_reg);
}
jr(t9);
// Emit a or_ in the branch delay slot if it's protected.
if (bdslot == PROTECT) or_(ra, t8, zero_reg);
GenPCRelativeJump(t8, t9, imm32, RelocInfo::NONE, bdslot);
}
}
......@@ -3962,13 +3980,7 @@ void TurboAssembler::BranchAndLinkLong(Label* L, BranchDelaySlot bdslot) {
BlockTrampolinePoolScope block_trampoline_pool(this);
int32_t imm32;
imm32 = branch_long_offset(L);
lui(t8, (imm32 & kHiMask) >> kLuiShift);
nal(); // Read PC into ra register.
ori(t8, t8, (imm32 & kImm16Mask)); // Branch delay slot.
addu(t8, ra, t8);
jalr(t8);
// Emit a nop in the branch delay slot if required.
if (bdslot == PROTECT) nop();
GenPCRelativeJumpAndLink(t8, imm32, RelocInfo::NONE, bdslot);
}
}
......
......@@ -61,12 +61,6 @@ enum LeaveExitFrameMode {
NO_EMIT_RETURN = false
};
// Allow programmer to use Branch Delay Slot of Branches, Jumps, Calls.
enum BranchDelaySlot {
USE_DELAY_SLOT,
PROTECT
};
// Flags used for the li macro-assembler function.
enum LiFlags {
// If the constant value can be represented in just 16 bits, then
......
......@@ -14561,12 +14561,11 @@ bool Code::IsIsolateIndependent(Isolate* isolate) {
bool is_process_independent = true;
for (RelocIterator it(this, mode_mask); !it.done(); it.next()) {
#if defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_ARM64) || \
defined(V8_TARGET_ARCH_ARM)
// On X64, ARM, ARM64 we emit relative builtin-to-builtin jumps for isolate
// independent builtins in the snapshot. They are later rewritten as
// pc-relative jumps to the off-heap instruction stream and are thus
// process-independent.
// See also: FinalizeEmbeddedCodeTargets.
defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS)
// On X64, ARM, ARM64, MIPS we emit relative builtin-to-builtin
// jumps for isolate independent builtins in the snapshot. They are later
// rewritten as pc-relative jumps to the off-heap instruction stream and are
// thus process-independent. See also: FinalizeEmbeddedCodeTargets.
if (RelocInfo::IsCodeTargetMode(it.rinfo()->rmode())) {
Address target_address = it.rinfo()->target_address();
if (InstructionStream::PcIsOffHeap(isolate, target_address)) continue;
......
......@@ -329,10 +329,10 @@ void FinalizeEmbeddedCodeTargets(Isolate* isolate, EmbeddedData* blob) {
RelocIterator off_heap_it(blob, code, kRelocMask);
#if defined(V8_TARGET_ARCH_X64) || defined(V8_TARGET_ARCH_ARM64) || \
defined(V8_TARGET_ARCH_ARM)
// On X64, ARM, ARM64 we emit relative builtin-to-builtin jumps for isolate
// independent builtins in the snapshot. This fixes up the relative jumps
// to the right offsets in the snapshot.
defined(V8_TARGET_ARCH_ARM) || defined(V8_TARGET_ARCH_MIPS)
// On X64, ARM, ARM64, MIPS we emit relative builtin-to-builtin
// jumps for isolate independent builtins in the snapshot. This fixes up the
// relative jumps to the right offsets in the snapshot.
// See also: Code::IsIsolateIndependent.
while (!on_heap_it.done()) {
DCHECK(!off_heap_it.done());
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment