Commit d5036361 authored by Liu Yu's avatar Liu Yu Committed by V8 LUCI CQ

[mips][sparkplug] Port Sparkplug to mips and mips64

Bug: v8:11421

Change-Id: I1d3f8d3211d06d3e47ffd530715c1dbfbaf79fe8
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2954905
Auto-Submit: Liu yu <liuyu@loongson.cn>
Reviewed-by: 's avatarLeszek Swirski <leszeks@chromium.org>
Reviewed-by: 's avatarZhao Jiazhong <zhaojiazhong-hf@loongson.cn>
Commit-Queue: Zhao Jiazhong <zhaojiazhong-hf@loongson.cn>
Cr-Commit-Position: refs/heads/master@{#75093}
parent d9726d1a
......@@ -7,8 +7,9 @@
// TODO(v8:11421): Remove #if once baseline compiler is ported to other
// architectures.
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \
V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_RISCV64
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \
V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_MIPS64 || \
V8_TARGET_ARCH_MIPS
#include <type_traits>
#include <unordered_map>
......@@ -30,6 +31,10 @@
#include "src/baseline/arm/baseline-assembler-arm-inl.h"
#elif V8_TARGET_ARCH_RISCV64
#include "src/baseline/riscv64/baseline-assembler-riscv64-inl.h"
#elif V8_TARGET_ARCH_MIPS64
#include "src/baseline/mips64/baseline-assembler-mips64-inl.h"
#elif V8_TARGET_ARCH_MIPS
#include "src/baseline/mips/baseline-assembler-mips-inl.h"
#else
#error Unsupported target architecture.
#endif
......
......@@ -7,8 +7,9 @@
// TODO(v8:11421): Remove #if once baseline compiler is ported to other
// architectures.
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \
V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_RISCV64
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \
V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_MIPS64 || \
V8_TARGET_ARCH_MIPS
#include "src/codegen/macro-assembler.h"
#include "src/objects/tagged-index.h"
......
......@@ -6,8 +6,9 @@
// TODO(v8:11421): Remove #if once baseline compiler is ported to other
// architectures.
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \
V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_RISCV64
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \
V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_MIPS64 || \
V8_TARGET_ARCH_MIPS
#include "src/baseline/baseline-compiler.h"
#include "src/codegen/compiler.h"
......
......@@ -5,8 +5,9 @@
// TODO(v8:11421): Remove #if once baseline compiler is ported to other
// architectures.
#include "src/base/bits.h"
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \
V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_RISCV64
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \
V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_MIPS64 || \
V8_TARGET_ARCH_MIPS
#include <algorithm>
#include <type_traits>
......@@ -43,6 +44,10 @@
#include "src/baseline/arm/baseline-compiler-arm-inl.h"
#elif V8_TARGET_ARCH_RISCV64
#include "src/baseline/riscv64/baseline-compiler-riscv64-inl.h"
#elif V8_TARGET_ARCH_MIPS64
#include "src/baseline/mips64/baseline-compiler-mips64-inl.h"
#elif V8_TARGET_ARCH_MIPS
#include "src/baseline/mips/baseline-compiler-mips-inl.h"
#else
#error Unsupported target architecture.
#endif
......
......@@ -7,8 +7,9 @@
// TODO(v8:11421): Remove #if once baseline compiler is ported to other
// architectures.
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \
V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_RISCV64
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \
V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_MIPS64 || \
V8_TARGET_ARCH_MIPS
#include "src/base/logging.h"
#include "src/base/threaded-list.h"
......
......@@ -8,8 +8,9 @@
// TODO(v8:11421): Remove #if once baseline compiler is ported to other
// architectures.
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \
V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_RISCV64
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \
V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_MIPS64 || \
V8_TARGET_ARCH_MIPS
#include "src/baseline/baseline-assembler-inl.h"
#include "src/baseline/baseline-compiler.h"
......
This diff is collapsed.
// Copyright 2021 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_BASELINE_MIPS_BASELINE_COMPILER_MIPS_INL_H_
#define V8_BASELINE_MIPS_BASELINE_COMPILER_MIPS_INL_H_
#include "src/base/logging.h"
#include "src/baseline/baseline-compiler.h"
namespace v8 {
namespace internal {
namespace baseline {
#define __ basm_.
void BaselineCompiler::Prologue() {
__ masm()->EnterFrame(StackFrame::BASELINE);
DCHECK_EQ(kJSFunctionRegister, kJavaScriptCallTargetRegister);
int max_frame_size = bytecode_->frame_size() + max_call_args_;
CallBuiltin<Builtin::kBaselineOutOfLinePrologue>(
kContextRegister, kJSFunctionRegister, kJavaScriptCallArgCountRegister,
max_frame_size, kJavaScriptCallNewTargetRegister, bytecode_);
PrologueFillFrame();
}
void BaselineCompiler::PrologueFillFrame() {
__ RecordComment("[ Fill frame");
// Inlined register frame fill
interpreter::Register new_target_or_generator_register =
bytecode_->incoming_new_target_or_generator_register();
__ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
int register_count = bytecode_->register_count();
// Magic value
const int kLoopUnrollSize = 8;
const int new_target_index = new_target_or_generator_register.index();
const bool has_new_target = new_target_index != kMaxInt;
if (has_new_target) {
DCHECK_LE(new_target_index, register_count);
__ masm()->Addu(sp, sp, Operand(-(kPointerSize * new_target_index)));
for (int i = 0; i < new_target_index; i++) {
__ masm()->Sw(kInterpreterAccumulatorRegister, MemOperand(sp, i * 4));
}
// Push new_target_or_generator.
__ Push(kJavaScriptCallNewTargetRegister);
register_count -= new_target_index + 1;
}
if (register_count < 2 * kLoopUnrollSize) {
// If the frame is small enough, just unroll the frame fill completely.
__ masm()->Addu(sp, sp, Operand(-(kPointerSize * register_count)));
for (int i = 0; i < register_count; ++i) {
__ masm()->Sw(kInterpreterAccumulatorRegister, MemOperand(sp, i * 4));
}
} else {
__ masm()->Addu(sp, sp, Operand(-(kPointerSize * register_count)));
for (int i = 0; i < register_count; ++i) {
__ masm()->Sw(kInterpreterAccumulatorRegister, MemOperand(sp, i * 4));
}
}
__ RecordComment("]");
}
void BaselineCompiler::VerifyFrameSize() {
__ masm()->Addu(kScratchReg, sp,
Operand(InterpreterFrameConstants::kFixedFrameSizeFromFp +
bytecode_->frame_size()));
__ masm()->Assert(eq, AbortReason::kUnexpectedStackPointer, kScratchReg,
Operand(fp));
}
} // namespace baseline
} // namespace internal
} // namespace v8
#endif // V8_BASELINE_MIPS_BASELINE_COMPILER_MIPS_INL_H_
This diff is collapsed.
// Copyright 2021 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_BASELINE_MIPS64_BASELINE_COMPILER_MIPS64_INL_H_
#define V8_BASELINE_MIPS64_BASELINE_COMPILER_MIPS64_INL_H_
#include "src/base/logging.h"
#include "src/baseline/baseline-compiler.h"
namespace v8 {
namespace internal {
namespace baseline {
#define __ basm_.
void BaselineCompiler::Prologue() {
__ masm()->EnterFrame(StackFrame::BASELINE);
DCHECK_EQ(kJSFunctionRegister, kJavaScriptCallTargetRegister);
int max_frame_size = bytecode_->frame_size() + max_call_args_;
CallBuiltin<Builtin::kBaselineOutOfLinePrologue>(
kContextRegister, kJSFunctionRegister, kJavaScriptCallArgCountRegister,
max_frame_size, kJavaScriptCallNewTargetRegister, bytecode_);
PrologueFillFrame();
}
void BaselineCompiler::PrologueFillFrame() {
__ RecordComment("[ Fill frame");
// Inlined register frame fill
interpreter::Register new_target_or_generator_register =
bytecode_->incoming_new_target_or_generator_register();
__ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
int register_count = bytecode_->register_count();
// Magic value
const int kLoopUnrollSize = 8;
const int new_target_index = new_target_or_generator_register.index();
const bool has_new_target = new_target_index != kMaxInt;
if (has_new_target) {
DCHECK_LE(new_target_index, register_count);
__ masm()->Daddu(sp, sp, Operand(-(kPointerSize * new_target_index)));
for (int i = 0; i < new_target_index; i++) {
__ masm()->Sd(kInterpreterAccumulatorRegister, MemOperand(sp, i * 8));
}
// Push new_target_or_generator.
__ Push(kJavaScriptCallNewTargetRegister);
register_count -= new_target_index + 1;
}
if (register_count < 2 * kLoopUnrollSize) {
// If the frame is small enough, just unroll the frame fill completely.
__ masm()->Daddu(sp, sp, Operand(-(kPointerSize * register_count)));
for (int i = 0; i < register_count; ++i) {
__ masm()->Sd(kInterpreterAccumulatorRegister, MemOperand(sp, i * 8));
}
} else {
__ masm()->Daddu(sp, sp, Operand(-(kPointerSize * register_count)));
for (int i = 0; i < register_count; ++i) {
__ masm()->Sd(kInterpreterAccumulatorRegister, MemOperand(sp, i * 8));
}
}
__ RecordComment("]");
}
void BaselineCompiler::VerifyFrameSize() {
__ masm()->Daddu(kScratchReg, sp,
Operand(InterpreterFrameConstants::kFixedFrameSizeFromFp +
bytecode_->frame_size()));
__ masm()->Assert(eq, AbortReason::kUnexpectedStackPointer, kScratchReg,
Operand(fp));
}
} // namespace baseline
} // namespace internal
} // namespace v8
#endif // V8_BASELINE_MIPS64_BASELINE_COMPILER_MIPS64_INL_H_
......@@ -988,8 +988,9 @@ void Builtins::Generate_MemMove(MacroAssembler* masm) {
// TODO(v8:11421): Remove #if once baseline compiler is ported to other
// architectures.
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \
V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_RISCV64
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \
V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_MIPS64 || \
V8_TARGET_ARCH_MIPS
void Builtins::Generate_BaselineLeaveFrame(MacroAssembler* masm) {
EmitReturnBaseline(masm);
}
......
This diff is collapsed.
This diff is collapsed.
......@@ -313,7 +313,7 @@ constexpr auto BaselineOutOfLinePrologueDescriptor::registers() {
// TODO(v8:11421): Implement on other platforms.
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_ARM || \
V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_S390 || \
V8_TARGET_ARCH_RISCV64
V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_MIPS
return RegisterArray(
kContextRegister, kJSFunctionRegister, kJavaScriptCallArgCountRegister,
kJavaScriptCallExtraArg1Register, kJavaScriptCallNewTargetRegister,
......@@ -331,9 +331,10 @@ constexpr auto BaselineOutOfLinePrologueDescriptor::registers() {
// static
constexpr auto BaselineLeaveFrameDescriptor::registers() {
// TODO(v8:11421): Implement on other platforms.
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \
V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 || \
V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_RISCV64
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \
V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 || \
V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_MIPS64 || \
V8_TARGET_ARCH_MIPS
return RegisterArray(ParamsSizeRegister(), WeightRegister());
#else
return DefaultRegisterArray();
......
......@@ -1918,6 +1918,17 @@ class V8_EXPORT_PRIVATE V8_NODISCARD UseScratchRegisterScope {
Register Acquire();
bool hasAvailable() const;
void Include(const RegList& list) { *available_ |= list; }
void Exclude(const RegList& list) { *available_ &= ~list; }
void Include(const Register& reg1, const Register& reg2 = no_reg) {
RegList list(reg1.bit() | reg2.bit());
Include(list);
}
void Exclude(const Register& reg1, const Register& reg2 = no_reg) {
RegList list(reg1.bit() | reg2.bit());
Exclude(list);
}
private:
RegList* available_;
RegList old_available_;
......
......@@ -73,14 +73,13 @@ constexpr Register GrowArrayElementsDescriptor::KeyRegister() { return a3; }
// static
constexpr Register BaselineLeaveFrameDescriptor::ParamsSizeRegister() {
// TODO(v8:11421): Implement on this platform.
return a3;
return a2;
}
// static
constexpr Register BaselineLeaveFrameDescriptor::WeightRegister() {
// TODO(v8:11421): Implement on this platform.
return t0;
return a3;
}
// static
......@@ -188,6 +187,9 @@ constexpr auto CompareDescriptor::registers() { return RegisterArray(a1, a0); }
// static
constexpr auto Compare_BaselineDescriptor::registers() {
// a1: left operand
// a0: right operand
// a2: feedback slot
return RegisterArray(a1, a0, a2);
}
......
......@@ -3952,7 +3952,7 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
Call(code.address(), rmode, cond, rs, rt, bd);
}
void TurboAssembler::LoadEntryFromBuiltin(Register builtin_index) {
void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
STATIC_ASSERT(kSystemPointerSize == 4);
STATIC_ASSERT(kSmiShiftSize == 0);
STATIC_ASSERT(kSmiTagSize == 1);
......@@ -3964,11 +3964,29 @@ void TurboAssembler::LoadEntryFromBuiltin(Register builtin_index) {
lw(builtin_index,
MemOperand(builtin_index, IsolateData::builtin_entry_table_offset()));
}
void TurboAssembler::LoadEntryFromBuiltin(Builtin builtin_index,
Register destination) {
Lw(destination, EntryFromBuiltinAsOperand(builtin_index));
}
MemOperand TurboAssembler::EntryFromBuiltinAsOperand(Builtin builtin_index) {
DCHECK(root_array_available());
return MemOperand(kRootRegister,
IsolateData::builtin_entry_slot_offset(builtin_index));
}
void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
LoadEntryFromBuiltin(builtin_index);
LoadEntryFromBuiltinIndex(builtin_index);
Call(builtin_index);
}
void TurboAssembler::CallBuiltin(int builtin_index) {
DCHECK(Builtin::IsBuiltinId(builtin_index));
RecordCommentForOffHeapTrampoline(builtin_index);
CHECK_NE(builtin_index, Builtin::kNoBuiltinId);
EmbeddedData d = EmbeddedData::FromBlob(isolate());
Address entry = d.InstructionStartOfBuiltin(builtin_index);
Call(entry, RelocInfo::OFF_HEAP_TARGET);
if (FLAG_code_comments) RecordComment("]");
}
void TurboAssembler::PatchAndJump(Address target) {
if (kArchVariant != kMips32r6) {
......@@ -4819,19 +4837,12 @@ void TurboAssembler::Prologue() { PushStandardFrame(a1); }
void TurboAssembler::EnterFrame(StackFrame::Type type) {
BlockTrampolinePoolScope block_trampoline_pool(this);
int stack_offset = -3 * kPointerSize;
const int fp_offset = 1 * kPointerSize;
addiu(sp, sp, stack_offset);
stack_offset = -stack_offset - kPointerSize;
sw(ra, MemOperand(sp, stack_offset));
stack_offset -= kPointerSize;
sw(fp, MemOperand(sp, stack_offset));
stack_offset -= kPointerSize;
li(t9, Operand(StackFrame::TypeToMarker(type)));
sw(t9, MemOperand(sp, stack_offset));
// Adjust FP to point to saved FP.
DCHECK_EQ(stack_offset, 0);
Addu(fp, sp, Operand(fp_offset));
Push(ra, fp);
Move(fp, sp);
if (!StackFrame::IsJavaScript(type)) {
li(kScratchReg, Operand(StackFrame::TypeToMarker(type)));
Push(kScratchReg);
}
}
void TurboAssembler::LeaveFrame(StackFrame::Type type) {
......@@ -5560,6 +5571,62 @@ void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
}
}
void TurboAssembler::LoadCodeObjectEntry(Register destination,
Register code_object) {
// Code objects are called differently depending on whether we are generating
// builtin code (which will later be embedded into the binary) or compiling
// user JS code at runtime.
// * Builtin code runs in --jitless mode and thus must not call into on-heap
// Code targets. Instead, we dispatch through the builtins entry table.
// * Codegen at runtime does not have this restriction and we can use the
// shorter, branchless instruction sequence. The assumption here is that
// targets are usually generated code and not builtin Code objects.
if (options().isolate_independent_code) {
DCHECK(root_array_available());
Label if_code_is_off_heap, out;
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
DCHECK(!AreAliased(destination, scratch));
DCHECK(!AreAliased(code_object, scratch));
// Check whether the Code object is an off-heap trampoline. If so, call its
// (off-heap) entry point directly without going through the (on-heap)
// trampoline. Otherwise, just call the Code object as always.
Lw(scratch, FieldMemOperand(code_object, Code::kFlagsOffset));
And(scratch, scratch, Operand(Code::IsOffHeapTrampoline::kMask));
Branch(&if_code_is_off_heap, ne, scratch, Operand(zero_reg));
// Not an off-heap trampoline object, the entry point is at
// Code::raw_instruction_start().
Addu(destination, code_object, Code::kHeaderSize - kHeapObjectTag);
Branch(&out);
// An off-heap trampoline, the entry point is loaded from the builtin entry
// table.
bind(&if_code_is_off_heap);
Lw(scratch, FieldMemOperand(code_object, Code::kBuiltinIndexOffset));
Lsa(destination, kRootRegister, scratch, kSystemPointerSizeLog2);
Lw(destination,
MemOperand(destination, IsolateData::builtin_entry_table_offset()));
bind(&out);
} else {
Addu(destination, code_object, Code::kHeaderSize - kHeapObjectTag);
}
}
void TurboAssembler::CallCodeObject(Register code_object) {
LoadCodeObjectEntry(code_object, code_object);
Call(code_object);
}
void TurboAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) {
DCHECK_EQ(JumpMode::kJump, jump_mode);
LoadCodeObjectEntry(code_object, code_object);
Jump(code_object);
}
} // namespace internal
} // namespace v8
......
......@@ -13,6 +13,7 @@
#include "src/codegen/mips/assembler-mips.h"
#include "src/common/globals.h"
#include "src/objects/contexts.h"
#include "src/objects/tagged-index.h"
namespace v8 {
namespace internal {
......@@ -192,6 +193,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void LoadRootRegisterOffset(Register destination, intptr_t offset) final;
void LoadRootRelative(Register destination, int32_t offset) final;
inline void Move(Register output, MemOperand operand) { Lw(output, operand); }
// Jump, Call, and Ret pseudo instructions implementing inter-working.
#define COND_ARGS \
Condition cond = al, Register rs = zero_reg, \
......@@ -219,22 +222,22 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Load the builtin given by the Smi in |builtin_index| into the same
// register.
void LoadEntryFromBuiltin(Register builtin_index);
void CallBuiltinByIndex(Register builtin_index);
void LoadEntryFromBuiltinIndex(Register builtin_index);
void LoadEntryFromBuiltin(Builtin builtin_index, Register destination);
MemOperand EntryFromBuiltinAsOperand(Builtin builtin_index);
void LoadCodeObjectEntry(Register destination, Register code_object) {
// TODO(mips): Implement.
UNIMPLEMENTED();
}
void CallCodeObject(Register code_object) {
// TODO(mips): Implement.
UNIMPLEMENTED();
void CallBuiltinByIndex(Register builtin_index);
void CallBuiltin(Builtin builtin) {
// TODO(11527): drop the int overload in favour of the Builtin one.
return CallBuiltin(static_cast<int>(builtin));
}
void CallBuiltin(int builtin_index);
void LoadCodeObjectEntry(Register destination, Register code_object);
void CallCodeObject(Register code_object);
void JumpCodeObject(Register code_object,
JumpMode jump_mode = JumpMode::kJump) {
// TODO(mips): Implement.
UNIMPLEMENTED();
}
JumpMode jump_mode = JumpMode::kJump);
// Generates an instruction sequence s.t. the return address points to the
// instruction following the call.
......@@ -803,8 +806,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void Trunc_uw_d(Register rd, FPURegister fs, FPURegister scratch);
// Jump the register contains a smi.
void JumpIfSmi(Register value, Label* smi_label, Register scratch = at,
BranchDelaySlot bd = PROTECT);
void JumpIfSmi(Register value, Label* smi_label,
Register scratch = kScratchReg, BranchDelaySlot bd = PROTECT);
void JumpIfEqual(Register a, int32_t b, Label* dest) {
li(kScratchReg, Operand(b));
......
......@@ -1948,6 +1948,17 @@ class V8_EXPORT_PRIVATE V8_NODISCARD UseScratchRegisterScope {
Register Acquire();
bool hasAvailable() const;
void Include(const RegList& list) { *available_ |= list; }
void Exclude(const RegList& list) { *available_ &= ~list; }
void Include(const Register& reg1, const Register& reg2 = no_reg) {
RegList list(reg1.bit() | reg2.bit());
Include(list);
}
void Exclude(const Register& reg1, const Register& reg2 = no_reg) {
RegList list(reg1.bit() | reg2.bit());
Exclude(list);
}
private:
RegList* available_;
RegList old_available_;
......
......@@ -73,15 +73,11 @@ constexpr Register GrowArrayElementsDescriptor::KeyRegister() { return a3; }
// static
constexpr Register BaselineLeaveFrameDescriptor::ParamsSizeRegister() {
// TODO(v8:11421): Implement on this platform.
return a3;
return a2;
}
// static
constexpr Register BaselineLeaveFrameDescriptor::WeightRegister() {
// TODO(v8:11421): Implement on this platform.
return a4;
}
constexpr Register BaselineLeaveFrameDescriptor::WeightRegister() { return a3; }
// static
constexpr Register TypeConversionDescriptor::ArgumentRegister() { return a0; }
......@@ -188,7 +184,9 @@ constexpr auto CompareDescriptor::registers() { return RegisterArray(a1, a0); }
// static
constexpr auto Compare_BaselineDescriptor::registers() {
// TODO(v8:11421): Implement on this platform.
// a1: left operand
// a0: right operand
// a2: feedback slot
return RegisterArray(a1, a0, a2);
}
......@@ -197,6 +195,9 @@ constexpr auto BinaryOpDescriptor::registers() { return RegisterArray(a1, a0); }
// static
constexpr auto BinaryOp_BaselineDescriptor::registers() {
// a1: left operand
// a0: right operand
// a2: feedback slot
return RegisterArray(a1, a0, a2);
}
......
......@@ -4442,7 +4442,7 @@ void TurboAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
Call(code.address(), rmode, cond, rs, rt, bd);
}
void TurboAssembler::LoadEntryFromBuiltin(Register builtin_index) {
void TurboAssembler::LoadEntryFromBuiltinIndex(Register builtin_index) {
STATIC_ASSERT(kSystemPointerSize == 8);
STATIC_ASSERT(kSmiTagSize == 1);
STATIC_ASSERT(kSmiTag == 0);
......@@ -4453,11 +4453,29 @@ void TurboAssembler::LoadEntryFromBuiltin(Register builtin_index) {
Ld(builtin_index,
MemOperand(builtin_index, IsolateData::builtin_entry_table_offset()));
}
void TurboAssembler::LoadEntryFromBuiltin(Builtin builtin_index,
Register destination) {
Ld(destination, EntryFromBuiltinAsOperand(builtin_index));
}
MemOperand TurboAssembler::EntryFromBuiltinAsOperand(Builtin builtin_index) {
DCHECK(root_array_available());
return MemOperand(kRootRegister,
IsolateData::builtin_entry_slot_offset(builtin_index));
}
void TurboAssembler::CallBuiltinByIndex(Register builtin_index) {
LoadEntryFromBuiltin(builtin_index);
LoadEntryFromBuiltinIndex(builtin_index);
Call(builtin_index);
}
void TurboAssembler::CallBuiltin(int builtin_index) {
DCHECK(Builtin::IsBuiltinId(builtin_index));
RecordCommentForOffHeapTrampoline(builtin_index);
CHECK_NE(builtin_index, Builtin::kNoBuiltinId);
EmbeddedData d = EmbeddedData::FromBlob(isolate());
Address entry = d.InstructionStartOfBuiltin(builtin_index);
Call(entry, RelocInfo::OFF_HEAP_TARGET);
if (FLAG_code_comments) RecordComment("]");
}
void TurboAssembler::PatchAndJump(Address target) {
if (kArchVariant != kMips64r6) {
......@@ -5340,19 +5358,12 @@ void TurboAssembler::Prologue() { PushStandardFrame(a1); }
void TurboAssembler::EnterFrame(StackFrame::Type type) {
BlockTrampolinePoolScope block_trampoline_pool(this);
int stack_offset = -3 * kPointerSize;
const int fp_offset = 1 * kPointerSize;
daddiu(sp, sp, stack_offset);
stack_offset = -stack_offset - kPointerSize;
Sd(ra, MemOperand(sp, stack_offset));
stack_offset -= kPointerSize;
Sd(fp, MemOperand(sp, stack_offset));
stack_offset -= kPointerSize;
li(t9, Operand(StackFrame::TypeToMarker(type)));
Sd(t9, MemOperand(sp, stack_offset));
// Adjust FP to point to saved FP.
DCHECK_EQ(stack_offset, 0);
Daddu(fp, sp, Operand(fp_offset));
Push(ra, fp);
Move(fp, sp);
if (!StackFrame::IsJavaScript(type)) {
li(kScratchReg, Operand(StackFrame::TypeToMarker(type)));
Push(kScratchReg);
}
}
void TurboAssembler::LeaveFrame(StackFrame::Type type) {
......@@ -6091,6 +6102,63 @@ void TurboAssembler::CallForDeoptimization(Builtin target, int, Label* exit,
}
}
void TurboAssembler::LoadCodeObjectEntry(Register destination,
Register code_object) {
// Code objects are called differently depending on whether we are generating
// builtin code (which will later be embedded into the binary) or compiling
// user JS code at runtime.
// * Builtin code runs in --jitless mode and thus must not call into on-heap
// Code targets. Instead, we dispatch through the builtins entry table.
// * Codegen at runtime does not have this restriction and we can use the
// shorter, branchless instruction sequence. The assumption here is that
// targets are usually generated code and not builtin Code objects.
if (options().isolate_independent_code) {
DCHECK(root_array_available());
Label if_code_is_off_heap, out;
UseScratchRegisterScope temps(this);
Register scratch = temps.Acquire();
DCHECK(!AreAliased(destination, scratch));
DCHECK(!AreAliased(code_object, scratch));
// Check whether the Code object is an off-heap trampoline. If so, call its
// (off-heap) entry point directly without going through the (on-heap)
// trampoline. Otherwise, just call the Code object as always.
Lw(scratch, FieldMemOperand(code_object, Code::kFlagsOffset));
And(scratch, scratch, Operand(Code::IsOffHeapTrampoline::kMask));
Branch(&if_code_is_off_heap, ne, scratch, Operand(zero_reg));
// Not an off-heap trampoline object, the entry point is at
// Code::raw_instruction_start().
Daddu(destination, code_object, Code::kHeaderSize - kHeapObjectTag);
Branch(&out);
// An off-heap trampoline, the entry point is loaded from the builtin entry
// table.
bind(&if_code_is_off_heap);
Lw(scratch, FieldMemOperand(code_object, Code::kBuiltinIndexOffset));
Dlsa(destination, kRootRegister, scratch, kSystemPointerSizeLog2);
Ld(destination,
MemOperand(destination, IsolateData::builtin_entry_table_offset()));
bind(&out);
} else {
Daddu(destination, code_object, Code::kHeaderSize - kHeapObjectTag);
}
}
void TurboAssembler::CallCodeObject(Register code_object) {
LoadCodeObjectEntry(code_object, code_object);
Call(code_object);
}
void TurboAssembler::JumpCodeObject(Register code_object, JumpMode jump_mode) {
DCHECK_EQ(JumpMode::kJump, jump_mode);
LoadCodeObjectEntry(code_object, code_object);
Jump(code_object);
}
} // namespace internal
} // namespace v8
......
......@@ -12,6 +12,7 @@
#include "src/codegen/assembler.h"
#include "src/codegen/mips64/assembler-mips64.h"
#include "src/common/globals.h"
#include "src/objects/tagged-index.h"
namespace v8 {
namespace internal {
......@@ -219,6 +220,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void LoadRootRegisterOffset(Register destination, intptr_t offset) final;
void LoadRootRelative(Register destination, int32_t offset) final;
inline void Move(Register output, MemOperand operand) { Ld(output, operand); }
// Jump, Call, and Ret pseudo instructions implementing inter-working.
#define COND_ARGS \
Condition cond = al, Register rs = zero_reg, \
......@@ -243,22 +246,21 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
// Load the builtin given by the Smi in |builtin_index| into the same
// register.
void LoadEntryFromBuiltin(Register builtin_index);
void CallBuiltinByIndex(Register builtin_index);
void LoadEntryFromBuiltinIndex(Register builtin_index);
void LoadEntryFromBuiltin(Builtin builtin_index, Register destination);
MemOperand EntryFromBuiltinAsOperand(Builtin builtin_index);
void LoadCodeObjectEntry(Register destination, Register code_object) {
// TODO(mips): Implement.
UNIMPLEMENTED();
}
void CallCodeObject(Register code_object) {
// TODO(mips): Implement.
UNIMPLEMENTED();
void CallBuiltinByIndex(Register builtin_index);
void CallBuiltin(Builtin builtin) {
// TODO(11527): drop the int overload in favour of the Builtin one.
return CallBuiltin(static_cast<int>(builtin));
}
void CallBuiltin(int builtin_index);
void LoadCodeObjectEntry(Register destination, Register code_object);
void CallCodeObject(Register code_object);
void JumpCodeObject(Register code_object,
JumpMode jump_mode = JumpMode::kJump) {
// TODO(mips): Implement.
UNIMPLEMENTED();
}
JumpMode jump_mode = JumpMode::kJump);
// Generates an instruction sequence s.t. the return address points to the
// instruction following the call.
......@@ -815,8 +817,8 @@ class V8_EXPORT_PRIVATE TurboAssembler : public TurboAssemblerBase {
void MSARoundD(MSARegister dst, MSARegister src, FPURoundingMode mode);
// Jump the register contains a smi.
void JumpIfSmi(Register value, Label* smi_label, Register scratch = at,
BranchDelaySlot bd = PROTECT);
void JumpIfSmi(Register value, Label* smi_label,
Register scratch = kScratchReg, BranchDelaySlot bd = PROTECT);
void JumpIfEqual(Register a, int32_t b, Label* dest) {
li(kScratchReg, Operand(b));
......
......@@ -181,8 +181,9 @@ struct MaybeBoolFlag {
#define ENABLE_CONTROL_FLOW_INTEGRITY_BOOL false
#endif
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \
V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_RISCV64
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \
V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_MIPS64 || \
V8_TARGET_ARCH_MIPS
#define ENABLE_SPARKPLUG true
#else
// TODO(v8:11421): Enable Sparkplug for other architectures
......
......@@ -152,7 +152,7 @@
##############################################################################
# Tests requiring Sparkplug.
['arch not in (x64, arm64, ia32, arm)', {
['arch not in (x64, arm64, ia32, arm, mips64el, mipsel)', {
'regress/regress-crbug-1199681': [SKIP],
}],
......
......@@ -1477,7 +1477,7 @@
##############################################################################
# TODO(v8:11421): Port baseline compiler to other architectures.
['arch not in (x64, arm64, ia32, arm)', {
['arch not in (x64, arm64, ia32, arm, mips64el, mipsel)', {
'baseline/*': [SKIP],
}],
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment