Commit 816e9fa3 authored by Yu Yin's avatar Yu Yin Committed by V8 LUCI CQ

[LOONG64] Add LoongArch64 backend

Bug: v8:12008
Change-Id: I2e1d918a1370dae1e15919fbf02d69cbe48f63bf
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3089095Reviewed-by: 's avatarGeorg Neis <neis@chromium.org>
Reviewed-by: 's avatarJakob Gruber <jgruber@chromium.org>
Reviewed-by: 's avatarHannes Payer <hpayer@chromium.org>
Reviewed-by: 's avatarClemens Backes <clemensb@chromium.org>
Commit-Queue: Jakob Gruber <jgruber@chromium.org>
Cr-Commit-Position: refs/heads/master@{#76308}
parent c1495518
......@@ -286,7 +286,9 @@ declare_args() {
cppgc_enable_object_names = false
# Enable heap reservation of size 4GB. Only possible for 64bit archs.
cppgc_enable_caged_heap = v8_current_cpu == "x64" || v8_current_cpu == "arm64"
cppgc_enable_caged_heap =
v8_current_cpu == "x64" || v8_current_cpu == "arm64" ||
v8_current_cpu == "loong64"
# Enable verification of live bytes in the marking verifier.
# TODO(v8:11785): Enable by default when running with the verifier.
......@@ -506,7 +508,7 @@ assert(!v8_enable_unconditional_write_barriers || !v8_disable_write_barriers,
"Write barriers can't be both enabled and disabled")
assert(!cppgc_enable_caged_heap || v8_current_cpu == "x64" ||
v8_current_cpu == "arm64",
v8_current_cpu == "arm64" || v8_current_cpu == "loong64",
"CppGC caged heap requires 64bit platforms")
assert(!cppgc_enable_young_generation || cppgc_enable_caged_heap,
......@@ -1062,6 +1064,15 @@ config("toolchain") {
defines += [ "_MIPS_ARCH_MIPS64R2" ]
}
}
# loong64 simulators.
if (target_is_simulator && v8_current_cpu == "loong64") {
defines += [ "_LOONG64_TARGET_SIMULATOR" ]
}
if (v8_current_cpu == "loong64") {
defines += [ "V8_TARGET_ARCH_LOONG64" ]
}
if (v8_current_cpu == "s390" || v8_current_cpu == "s390x") {
defines += [ "V8_TARGET_ARCH_S390" ]
cflags += [ "-ffp-contract=off" ]
......@@ -2208,6 +2219,11 @@ v8_source_set("v8_initializers") {
### gcmole(arch:mips64el) ###
"src/builtins/mips64/builtins-mips64.cc",
]
} else if (v8_current_cpu == "loong64") {
sources += [
### gcmole(arch:loong64) ###
"src/builtins/loong64/builtins-loong64.cc",
]
} else if (v8_current_cpu == "ppc") {
sources += [
### gcmole(arch:ppc) ###
......@@ -3421,6 +3437,21 @@ v8_header_set("v8_internal_headers") {
"src/regexp/mips64/regexp-macro-assembler-mips64.h",
"src/wasm/baseline/mips64/liftoff-assembler-mips64.h",
]
} else if (v8_current_cpu == "loong64") {
sources += [ ### gcmole(arch:loong64) ###
"src/baseline/loong64/baseline-assembler-loong64-inl.h",
"src/baseline/loong64/baseline-compiler-loong64-inl.h",
"src/codegen/loong64/assembler-loong64-inl.h",
"src/codegen/loong64/assembler-loong64.h",
"src/codegen/loong64/constants-loong64.h",
"src/codegen/loong64/macro-assembler-loong64.h",
"src/codegen/loong64/register-loong64.h",
"src/compiler/backend/loong64/instruction-codes-loong64.h",
"src/execution/loong64/frame-constants-loong64.h",
"src/execution/loong64/simulator-loong64.h",
"src/regexp/loong64/regexp-macro-assembler-loong64.h",
"src/wasm/baseline/loong64/liftoff-assembler-loong64.h",
]
} else if (v8_current_cpu == "ppc") {
sources += [ ### gcmole(arch:ppc) ###
"src/codegen/ppc/assembler-ppc-inl.h",
......@@ -4339,6 +4370,23 @@ v8_source_set("v8_base_without_compiler") {
"src/execution/mips64/simulator-mips64.cc",
"src/regexp/mips64/regexp-macro-assembler-mips64.cc",
]
} else if (v8_current_cpu == "loong64") {
sources += [ ### gcmole(arch:loong64) ###
"src/codegen/loong64/assembler-loong64.cc",
"src/codegen/loong64/constants-loong64.cc",
"src/codegen/loong64/cpu-loong64.cc",
"src/codegen/loong64/interface-descriptors-loong64-inl.h",
"src/codegen/loong64/macro-assembler-loong64.cc",
"src/compiler/backend/loong64/code-generator-loong64.cc",
"src/compiler/backend/loong64/instruction-scheduler-loong64.cc",
"src/compiler/backend/loong64/instruction-selector-loong64.cc",
"src/deoptimizer/loong64/deoptimizer-loong64.cc",
"src/diagnostics/loong64/disasm-loong64.cc",
"src/diagnostics/loong64/unwinder-loong64.cc",
"src/execution/loong64/frame-constants-loong64.cc",
"src/execution/loong64/simulator-loong64.cc",
"src/regexp/loong64/regexp-macro-assembler-loong64.cc",
]
} else if (v8_current_cpu == "ppc") {
sources += [ ### gcmole(arch:ppc) ###
"src/codegen/ppc/assembler-ppc.cc",
......@@ -5040,6 +5088,8 @@ v8_source_set("v8_cppgc_shared") {
sources += [ "src/heap/base/asm/mips/push_registers_asm.cc" ]
} else if (current_cpu == "mips64el") {
sources += [ "src/heap/base/asm/mips64/push_registers_asm.cc" ]
} else if (current_cpu == "loong64") {
sources += [ "src/heap/base/asm/loong64/push_registers_asm.cc" ]
} else if (current_cpu == "riscv64") {
sources += [ "src/heap/base/asm/riscv64/push_registers_asm.cc" ]
}
......
liuyu@loongson.cn
yuyin-hf@loongson.cn
zhaojiazhong-hf@loongson.cn
......@@ -27,6 +27,7 @@ per-file codereview.settings=file:INFRA_OWNERS
per-file AUTHORS=file:COMMON_OWNERS
per-file WATCHLISTS=file:COMMON_OWNERS
per-file ...-loong64*=file:LOONG_OWNERS
per-file ...-mips*=file:MIPS_OWNERS
per-file ...-mips64*=file:MIPS_OWNERS
per-file ...-ppc*=file:PPC_OWNERS
......
......@@ -84,7 +84,7 @@ if (v8_snapshot_toolchain == "") {
if (v8_current_cpu == "x64" || v8_current_cpu == "x86") {
_cpus = v8_current_cpu
} else if (v8_current_cpu == "arm64" || v8_current_cpu == "mips64el" ||
v8_current_cpu == "riscv64") {
v8_current_cpu == "riscv64" || v8_current_cpu == "loong64") {
if (is_win && v8_current_cpu == "arm64") {
# set _cpus to blank for Windows ARM64 so host_toolchain could be
# selected as snapshot toolchain later.
......
......@@ -19,7 +19,8 @@ struct CalleeSavedRegisters {
};
#elif V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM64 || \
V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC || \
V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_S390
V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_S390 || \
V8_TARGET_ARCH_LOONG64
struct CalleeSavedRegisters {};
#else
#error Target architecture was not detected as supported by v8
......
......@@ -33,6 +33,9 @@
#elif defined(__MIPSEB__) || defined(__MIPSEL__)
#define V8_HOST_ARCH_MIPS 1
#define V8_HOST_ARCH_32_BIT 1
#elif defined(__loongarch64)
#define V8_HOST_ARCH_LOONG64 1
#define V8_HOST_ARCH_64_BIT 1
#elif defined(__PPC64__) || defined(_ARCH_PPC64)
#define V8_HOST_ARCH_PPC64 1
#define V8_HOST_ARCH_64_BIT 1
......@@ -83,7 +86,7 @@
#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM && \
!V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64 && \
!V8_TARGET_ARCH_PPC && !V8_TARGET_ARCH_PPC64 && !V8_TARGET_ARCH_S390 && \
!V8_TARGET_ARCH_RISCV64
!V8_TARGET_ARCH_RISCV64 && !V8_TARGET_ARCH_LOONG64
#if defined(_M_X64) || defined(__x86_64__)
#define V8_TARGET_ARCH_X64 1
#elif defined(_M_IX86) || defined(__i386__)
......@@ -128,6 +131,8 @@
#define V8_TARGET_ARCH_32_BIT 1
#elif V8_TARGET_ARCH_MIPS64
#define V8_TARGET_ARCH_64_BIT 1
#elif V8_TARGET_ARCH_LOONG64
#define V8_TARGET_ARCH_64_BIT 1
#elif V8_TARGET_ARCH_PPC
#define V8_TARGET_ARCH_32_BIT 1
#elif V8_TARGET_ARCH_PPC64
......@@ -171,6 +176,9 @@
#if (V8_TARGET_ARCH_RISCV64 && !(V8_HOST_ARCH_X64 || V8_HOST_ARCH_RISCV64))
#error Target architecture riscv64 is only supported on riscv64 and x64 host
#endif
#if (V8_TARGET_ARCH_LOONG64 && !(V8_HOST_ARCH_X64 || V8_HOST_ARCH_LOONG64))
#error Target architecture loong64 is only supported on loong64 and x64 host
#endif
// Determine architecture endianness.
#if V8_TARGET_ARCH_IA32
......@@ -181,6 +189,8 @@
#define V8_TARGET_LITTLE_ENDIAN 1
#elif V8_TARGET_ARCH_ARM64
#define V8_TARGET_LITTLE_ENDIAN 1
#elif V8_TARGET_ARCH_LOONG64
#define V8_TARGET_LITTLE_ENDIAN 1
#elif V8_TARGET_ARCH_MIPS
#if defined(__MIPSEB__)
#define V8_TARGET_BIG_ENDIAN 1
......
......@@ -341,6 +341,10 @@ void* OS::GetRandomMmapAddr() {
// TODO(RISCV): We need more information from the kernel to correctly mask
// this address for RISC-V. https://github.com/v8-riscv/v8/issues/375
raw_addr &= uint64_t{0xFFFFFF0000};
#elif V8_TARGET_ARCH_LOONG64
// 42 bits of virtual addressing. Truncate to 40 bits to allow kernel chance
// to fulfill request.
raw_addr &= uint64_t{0xFFFFFF0000};
#else
raw_addr &= 0x3FFFF000;
......@@ -544,6 +548,8 @@ void OS::DebugBreak() {
asm("break");
#elif V8_HOST_ARCH_MIPS64
asm("break");
#elif V8_HOST_ARCH_LOONG64
asm("break 0");
#elif V8_HOST_ARCH_PPC || V8_HOST_ARCH_PPC64
asm("twge 2,2");
#elif V8_HOST_ARCH_IA32
......
......@@ -34,6 +34,8 @@
#include "src/baseline/mips64/baseline-assembler-mips64-inl.h"
#elif V8_TARGET_ARCH_MIPS
#include "src/baseline/mips/baseline-assembler-mips-inl.h"
#elif V8_TARGET_ARCH_LOONG64
#include "src/baseline/loong64/baseline-assembler-loong64-inl.h"
#else
#error Unsupported target architecture.
#endif
......
......@@ -48,6 +48,8 @@
#include "src/baseline/mips64/baseline-compiler-mips64-inl.h"
#elif V8_TARGET_ARCH_MIPS
#include "src/baseline/mips/baseline-compiler-mips-inl.h"
#elif V8_TARGET_ARCH_LOONG64
#include "src/baseline/loong64/baseline-compiler-loong64-inl.h"
#else
#error Unsupported target architecture.
#endif
......
This diff is collapsed.
// Copyright 2021 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_BASELINE_LOONG64_BASELINE_COMPILER_LOONG64_INL_H_
#define V8_BASELINE_LOONG64_BASELINE_COMPILER_LOONG64_INL_H_
#include "src/base/logging.h"
#include "src/baseline/baseline-compiler.h"
namespace v8 {
namespace internal {
namespace baseline {
#define __ basm_.
void BaselineCompiler::Prologue() {
ASM_CODE_COMMENT(&masm_);
__ masm()->EnterFrame(StackFrame::BASELINE);
DCHECK_EQ(kJSFunctionRegister, kJavaScriptCallTargetRegister);
int max_frame_size =
bytecode_->frame_size() + max_call_args_ * kSystemPointerSize;
CallBuiltin<Builtin::kBaselineOutOfLinePrologue>(
kContextRegister, kJSFunctionRegister, kJavaScriptCallArgCountRegister,
max_frame_size, kJavaScriptCallNewTargetRegister, bytecode_);
PrologueFillFrame();
}
void BaselineCompiler::PrologueFillFrame() {
ASM_CODE_COMMENT(&masm_);
// Inlined register frame fill
interpreter::Register new_target_or_generator_register =
bytecode_->incoming_new_target_or_generator_register();
__ LoadRoot(kInterpreterAccumulatorRegister, RootIndex::kUndefinedValue);
int register_count = bytecode_->register_count();
// Magic value
const int kLoopUnrollSize = 8;
const int new_target_index = new_target_or_generator_register.index();
const bool has_new_target = new_target_index != kMaxInt;
if (has_new_target) {
DCHECK_LE(new_target_index, register_count);
__ masm()->Add_d(sp, sp, Operand(-(kPointerSize * new_target_index)));
for (int i = 0; i < new_target_index; i++) {
__ masm()->St_d(kInterpreterAccumulatorRegister, MemOperand(sp, i * 8));
}
// Push new_target_or_generator.
__ Push(kJavaScriptCallNewTargetRegister);
register_count -= new_target_index + 1;
}
if (register_count < 2 * kLoopUnrollSize) {
// If the frame is small enough, just unroll the frame fill completely.
__ masm()->Add_d(sp, sp, Operand(-(kPointerSize * register_count)));
for (int i = 0; i < register_count; ++i) {
__ masm()->St_d(kInterpreterAccumulatorRegister, MemOperand(sp, i * 8));
}
} else {
__ masm()->Add_d(sp, sp, Operand(-(kPointerSize * register_count)));
for (int i = 0; i < register_count; ++i) {
__ masm()->St_d(kInterpreterAccumulatorRegister, MemOperand(sp, i * 8));
}
}
}
void BaselineCompiler::VerifyFrameSize() {
ASM_CODE_COMMENT(&masm_);
__ masm()->Add_d(t0, sp,
Operand(InterpreterFrameConstants::kFixedFrameSizeFromFp +
bytecode_->frame_size()));
__ masm()->Assert(eq, AbortReason::kUnexpectedStackPointer, t0, Operand(fp));
}
} // namespace baseline
} // namespace internal
} // namespace v8
#endif // V8_BASELINE_LOONG64_BASELINE_COMPILER_LOONG64_INL_H_
This source diff could not be displayed because it is too large. You can view the blob instead.
......@@ -21,6 +21,8 @@
#include "src/codegen/mips/assembler-mips.h"
#elif V8_TARGET_ARCH_MIPS64
#include "src/codegen/mips64/assembler-mips64.h"
#elif V8_TARGET_ARCH_LOONG64
#include "src/codegen/loong64/assembler-loong64.h"
#elif V8_TARGET_ARCH_S390
#include "src/codegen/s390/assembler-s390.h"
#elif V8_TARGET_ARCH_RISCV64
......
......@@ -21,6 +21,8 @@
#include "src/codegen/mips/assembler-mips-inl.h"
#elif V8_TARGET_ARCH_MIPS64
#include "src/codegen/mips64/assembler-mips64-inl.h"
#elif V8_TARGET_ARCH_LOONG64
#include "src/codegen/loong64/assembler-loong64-inl.h"
#elif V8_TARGET_ARCH_S390
#include "src/codegen/s390/assembler-s390-inl.h"
#elif V8_TARGET_ARCH_RISCV64
......
......@@ -276,8 +276,10 @@ class V8_EXPORT_PRIVATE AssemblerBase : public Malloced {
int pc_offset() const { return static_cast<int>(pc_ - buffer_start_); }
int pc_offset_for_safepoint() {
#if defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64)
// Mips needs it's own implementation to avoid trampoline's influence.
#if defined(V8_TARGET_ARCH_MIPS) || defined(V8_TARGET_ARCH_MIPS64) || \
defined(V8_TARGET_ARCH_LOONG64)
// MIPS and LOONG need to use their own implementation to avoid trampoline's
// influence.
UNREACHABLE();
#else
return pc_offset();
......
......@@ -15,6 +15,8 @@
#include "src/codegen/mips/constants-mips.h"
#elif V8_TARGET_ARCH_MIPS64
#include "src/codegen/mips64/constants-mips64.h"
#elif V8_TARGET_ARCH_LOONG64
#include "src/codegen/loong64/constants-loong64.h"
#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
#include "src/codegen/ppc/constants-ppc.h"
#elif V8_TARGET_ARCH_S390
......
......@@ -51,6 +51,9 @@ enum CpuFeature {
MIPSr6,
MIPS_SIMD, // MSA instructions
#elif V8_TARGET_ARCH_LOONG64
FPU,
#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
PPC_6_PLUS,
PPC_7_PLUS,
......
......@@ -688,6 +688,8 @@ ExternalReference ExternalReference::invoke_accessor_getter_callback() {
#define re_stack_check_func RegExpMacroAssemblerMIPS::CheckStackGuardState
#elif V8_TARGET_ARCH_MIPS64
#define re_stack_check_func RegExpMacroAssemblerMIPS::CheckStackGuardState
#elif V8_TARGET_ARCH_LOONG64
#define re_stack_check_func RegExpMacroAssemblerLOONG64::CheckStackGuardState
#elif V8_TARGET_ARCH_S390
#define re_stack_check_func RegExpMacroAssemblerS390::CheckStackGuardState
#elif V8_TARGET_ARCH_RISCV64
......
......@@ -27,6 +27,8 @@
#include "src/codegen/mips64/interface-descriptors-mips64-inl.h"
#elif V8_TARGET_ARCH_MIPS
#include "src/codegen/mips/interface-descriptors-mips-inl.h"
#elif V8_TARGET_ARCH_LOONG64
#include "src/codegen/loong64/interface-descriptors-loong64-inl.h"
#elif V8_TARGET_ARCH_RISCV64
#include "src/codegen/riscv64/interface-descriptors-riscv64-inl.h"
#else
......@@ -320,7 +322,8 @@ constexpr auto BaselineOutOfLinePrologueDescriptor::registers() {
// TODO(v8:11421): Implement on other platforms.
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_ARM || \
V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_S390 || \
V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_MIPS
V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_MIPS || \
V8_TARGET_ARCH_LOONG64
return RegisterArray(
kContextRegister, kJSFunctionRegister, kJavaScriptCallArgCountRegister,
kJavaScriptCallExtraArg1Register, kJavaScriptCallNewTargetRegister,
......@@ -341,7 +344,7 @@ constexpr auto BaselineLeaveFrameDescriptor::registers() {
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \
V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 || \
V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_MIPS64 || \
V8_TARGET_ARCH_MIPS
V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_LOONG64
return RegisterArray(ParamsSizeRegister(), WeightRegister());
#else
return DefaultRegisterArray();
......
// Copyright 2021 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_CODEGEN_LOONG64_ASSEMBLER_LOONG64_INL_H_
#define V8_CODEGEN_LOONG64_ASSEMBLER_LOONG64_INL_H_
#include "src/codegen/assembler.h"
#include "src/codegen/loong64/assembler-loong64.h"
#include "src/debug/debug.h"
#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
bool CpuFeatures::SupportsOptimizer() { return IsSupported(FPU); }
// -----------------------------------------------------------------------------
// Operand and MemOperand.
bool Operand::is_reg() const { return rm_.is_valid(); }
int64_t Operand::immediate() const {
DCHECK(!is_reg());
DCHECK(!IsHeapObjectRequest());
return value_.immediate;
}
// -----------------------------------------------------------------------------
// RelocInfo.
void RelocInfo::apply(intptr_t delta) {
if (IsInternalReference(rmode_)) {
// Absolute code pointer inside code object moves with the code object.
Assembler::RelocateInternalReference(rmode_, pc_, delta);
} else {
DCHECK(IsRelativeCodeTarget(rmode_));
Assembler::RelocateRelativeReference(rmode_, pc_, delta);
}
}
Address RelocInfo::target_address() {
DCHECK(IsCodeTargetMode(rmode_) || IsRuntimeEntry(rmode_) ||
IsWasmCall(rmode_));
return Assembler::target_address_at(pc_, constant_pool_);
}
Address RelocInfo::target_address_address() {
DCHECK(HasTargetAddressAddress());
// Read the address of the word containing the target_address in an
// instruction stream.
// The only architecture-independent user of this function is the serializer.
// The serializer uses it to find out how many raw bytes of instruction to
// output before the next target.
// For an instruction like LUI/ORI where the target bits are mixed into the
// instruction bits, the size of the target will be zero, indicating that the
// serializer should not step forward in memory after a target is resolved
// and written. In this case the target_address_address function should
// return the end of the instructions to be patched, allowing the
// deserializer to deserialize the instructions as raw bytes and put them in
// place, ready to be patched with the target. After jump optimization,
// that is the address of the instruction that follows J/JAL/JR/JALR
// instruction.
return pc_ + Assembler::kInstructionsFor64BitConstant * kInstrSize;
}
Address RelocInfo::constant_pool_entry_address() { UNREACHABLE(); }
int RelocInfo::target_address_size() { return Assembler::kSpecialTargetSize; }
void Assembler::deserialization_set_special_target_at(
Address instruction_payload, Code code, Address target) {
set_target_address_at(instruction_payload,
!code.is_null() ? code.constant_pool() : kNullAddress,
target);
}
int Assembler::deserialization_special_target_size(
Address instruction_payload) {
return kSpecialTargetSize;
}
void Assembler::deserialization_set_target_internal_reference_at(
Address pc, Address target, RelocInfo::Mode mode) {
WriteUnalignedValue<Address>(pc, target);
}
HeapObject RelocInfo::target_object() {
DCHECK(IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_) ||
IsDataEmbeddedObject(rmode_));
if (IsDataEmbeddedObject(rmode_)) {
return HeapObject::cast(Object(ReadUnalignedValue<Address>(pc_)));
}
return HeapObject::cast(
Object(Assembler::target_address_at(pc_, constant_pool_)));
}
HeapObject RelocInfo::target_object_no_host(Isolate* isolate) {
return target_object();
}
Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
if (IsDataEmbeddedObject(rmode_)) {
return Handle<HeapObject>::cast(ReadUnalignedValue<Handle<Object>>(pc_));
} else if (IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_)) {
return Handle<HeapObject>(reinterpret_cast<Address*>(
Assembler::target_address_at(pc_, constant_pool_)));
} else {
DCHECK(IsRelativeCodeTarget(rmode_));
return origin->relative_code_target_object_handle_at(pc_);
}
}
void RelocInfo::set_target_object(Heap* heap, HeapObject target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_) ||
IsDataEmbeddedObject(rmode_));
if (IsDataEmbeddedObject(rmode_)) {
WriteUnalignedValue(pc_, target.ptr());
// No need to flush icache since no instructions were changed.
} else {
Assembler::set_target_address_at(pc_, constant_pool_, target.ptr(),
icache_flush_mode);
}
if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null() &&
!FLAG_disable_write_barriers) {
WriteBarrierForCode(host(), this, target);
}
}
Address RelocInfo::target_external_reference() {
DCHECK(rmode_ == EXTERNAL_REFERENCE);
return Assembler::target_address_at(pc_, constant_pool_);
}
void RelocInfo::set_target_external_reference(
Address target, ICacheFlushMode icache_flush_mode) {
DCHECK(rmode_ == RelocInfo::EXTERNAL_REFERENCE);
Assembler::set_target_address_at(pc_, constant_pool_, target,
icache_flush_mode);
}
Address RelocInfo::target_internal_reference() {
if (rmode_ == INTERNAL_REFERENCE) {
return Memory<Address>(pc_);
} else {
UNREACHABLE();
}
}
Address RelocInfo::target_internal_reference_address() {
DCHECK(rmode_ == INTERNAL_REFERENCE);
return pc_;
}
Handle<Code> Assembler::relative_code_target_object_handle_at(
Address pc) const {
Instr instr = Assembler::instr_at(pc);
int32_t code_target_index = instr & kImm26Mask;
code_target_index = ((code_target_index & 0x3ff) << 22 >> 6) |
((code_target_index >> 10) & kImm16Mask);
return GetCodeTarget(code_target_index);
}
Address RelocInfo::target_runtime_entry(Assembler* origin) {
DCHECK(IsRuntimeEntry(rmode_));
return target_address();
}
void RelocInfo::set_target_runtime_entry(Address target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsRuntimeEntry(rmode_));
if (target_address() != target)
set_target_address(target, write_barrier_mode, icache_flush_mode);
}
Address RelocInfo::target_off_heap_target() {
DCHECK(IsOffHeapTarget(rmode_));
return Assembler::target_address_at(pc_, constant_pool_);
}
void RelocInfo::WipeOut() {
DCHECK(IsFullEmbeddedObject(rmode_) || IsCodeTarget(rmode_) ||
IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) ||
IsInternalReference(rmode_) || IsOffHeapTarget(rmode_));
if (IsInternalReference(rmode_)) {
Memory<Address>(pc_) = kNullAddress;
} else {
Assembler::set_target_address_at(pc_, constant_pool_, kNullAddress);
}
}
// -----------------------------------------------------------------------------
// Assembler.
void Assembler::CheckBuffer() {
if (buffer_space() <= kGap) {
GrowBuffer();
}
}
void Assembler::EmitHelper(Instr x) {
*reinterpret_cast<Instr*>(pc_) = x;
pc_ += kInstrSize;
CheckTrampolinePoolQuick();
}
template <>
inline void Assembler::EmitHelper(uint8_t x);
template <typename T>
void Assembler::EmitHelper(T x) {
*reinterpret_cast<T*>(pc_) = x;
pc_ += sizeof(x);
CheckTrampolinePoolQuick();
}
template <>
void Assembler::EmitHelper(uint8_t x) {
*reinterpret_cast<uint8_t*>(pc_) = x;
pc_ += sizeof(x);
if (reinterpret_cast<intptr_t>(pc_) % kInstrSize == 0) {
CheckTrampolinePoolQuick();
}
}
void Assembler::emit(Instr x) {
if (!is_buffer_growth_blocked()) {
CheckBuffer();
}
EmitHelper(x);
}
void Assembler::emit(uint64_t data) {
// CheckForEmitInForbiddenSlot();
if (!is_buffer_growth_blocked()) {
CheckBuffer();
}
EmitHelper(data);
}
EnsureSpace::EnsureSpace(Assembler* assembler) { assembler->CheckBuffer(); }
} // namespace internal
} // namespace v8
#endif // V8_CODEGEN_LOONG64_ASSEMBLER_LOONG64_INL_H_
This diff is collapsed.
This diff is collapsed.
// Copyright 2021 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#if V8_TARGET_ARCH_LOONG64
#include "src/codegen/loong64/constants-loong64.h"
namespace v8 {
namespace internal {
// -----------------------------------------------------------------------------
// Registers.
// These register names are defined in a way to match the native disassembler
// formatting. See for example the command "objdump -d <binary file>".
const char* Registers::names_[kNumSimuRegisters] = {
"zero_reg", "ra", "tp", "sp", "a0", "a1", "a2", "a3", "a4", "a5", "a6",
"a7", "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", "t8", "x_reg",
"fp", "s0", "s1", "s2", "s3", "s4", "s5", "s6", "s7", "s8", "pc"};
// List of alias names which can be used when referring to registers.
const Registers::RegisterAlias Registers::aliases_[] = {
{0, "zero"}, {30, "cp"}, {kInvalidRegister, nullptr}};
const char* Registers::Name(int reg) {
const char* result;
if ((0 <= reg) && (reg < kNumSimuRegisters)) {
result = names_[reg];
} else {
result = "noreg";
}
return result;
}
int Registers::Number(const char* name) {
// Look through the canonical names.
for (int i = 0; i < kNumSimuRegisters; i++) {
if (strcmp(names_[i], name) == 0) {
return i;
}
}
// Look through the alias names.
int i = 0;
while (aliases_[i].reg != kInvalidRegister) {
if (strcmp(aliases_[i].name, name) == 0) {
return aliases_[i].reg;
}
i++;
}
// No register with the reguested name found.
return kInvalidRegister;
}
const char* FPURegisters::names_[kNumFPURegisters] = {
"f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "f9", "f10",
"f11", "f12", "f13", "f14", "f15", "f16", "f17", "f18", "f19", "f20", "f21",
"f22", "f23", "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31"};
// List of alias names which can be used when referring to LoongArch registers.
const FPURegisters::RegisterAlias FPURegisters::aliases_[] = {
{kInvalidRegister, nullptr}};
const char* FPURegisters::Name(int creg) {
const char* result;
if ((0 <= creg) && (creg < kNumFPURegisters)) {
result = names_[creg];
} else {
result = "nocreg";
}
return result;
}
int FPURegisters::Number(const char* name) {
// Look through the canonical names.
for (int i = 0; i < kNumFPURegisters; i++) {
if (strcmp(names_[i], name) == 0) {
return i;
}
}
// Look through the alias names.
int i = 0;
while (aliases_[i].creg != kInvalidRegister) {
if (strcmp(aliases_[i].name, name) == 0) {
return aliases_[i].creg;
}
i++;
}
// No Cregister with the reguested name found.
return kInvalidFPURegister;
}
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_LOONG64
This diff is collapsed.
// Copyright 2021 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// CPU specific code for LoongArch independent of OS goes here.
#include <sys/syscall.h>
#include <unistd.h>
#if V8_TARGET_ARCH_LOONG64
#include "src/codegen/cpu-features.h"
namespace v8 {
namespace internal {
void CpuFeatures::FlushICache(void* start, size_t size) {
#if defined(V8_HOST_ARCH_LOONG64)
// Nothing to do, flushing no instructions.
if (size == 0) {
return;
}
#if defined(ANDROID) && !defined(__LP64__)
// Bionic cacheflush can typically run in userland, avoiding kernel call.
char* end = reinterpret_cast<char*>(start) + size;
cacheflush(reinterpret_cast<intptr_t>(start), reinterpret_cast<intptr_t>(end),
0);
#else // ANDROID
asm("ibar 0\n");
#endif // ANDROID
#endif // V8_HOST_ARCH_LOONG64
}
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_LOONG64
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -57,6 +57,9 @@ enum class SmiCheck { kOmit, kInline };
#elif V8_TARGET_ARCH_MIPS64
#include "src/codegen/mips64/constants-mips64.h"
#include "src/codegen/mips64/macro-assembler-mips64.h"
#elif V8_TARGET_ARCH_LOONG64
#include "src/codegen/loong64/constants-loong64.h"
#include "src/codegen/loong64/macro-assembler-loong64.h"
#elif V8_TARGET_ARCH_S390
#include "src/codegen/s390/constants-s390.h"
#include "src/codegen/s390/macro-assembler-s390.h"
......
......@@ -22,6 +22,8 @@
#include "src/codegen/mips/register-mips.h"
#elif V8_TARGET_ARCH_MIPS64
#include "src/codegen/mips64/register-mips64.h"
#elif V8_TARGET_ARCH_LOONG64
#include "src/codegen/loong64/register-loong64.h"
#elif V8_TARGET_ARCH_S390
#include "src/codegen/s390/register-s390.h"
#elif V8_TARGET_ARCH_RISCV64
......
......@@ -60,6 +60,8 @@ static int get_num_allocatable_double_registers() {
kMaxAllocatableDoubleRegisterCount;
#elif V8_TARGET_ARCH_MIPS64
kMaxAllocatableDoubleRegisterCount;
#elif V8_TARGET_ARCH_LOONG64
kMaxAllocatableDoubleRegisterCount;
#elif V8_TARGET_ARCH_PPC
kMaxAllocatableDoubleRegisterCount;
#elif V8_TARGET_ARCH_PPC64
......
......@@ -320,7 +320,7 @@ bool RelocInfo::OffHeapTargetIsCodedSpecially() {
#elif defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_MIPS) || \
defined(V8_TARGET_ARCH_MIPS64) || defined(V8_TARGET_ARCH_PPC) || \
defined(V8_TARGET_ARCH_PPC64) || defined(V8_TARGET_ARCH_S390) || \
defined(V8_TARGET_ARCH_RISCV64)
defined(V8_TARGET_ARCH_RISCV64) || defined(V8_TARGET_ARCH_LOONG64)
return true;
#endif
}
......
......@@ -62,6 +62,9 @@ constexpr int GB = MB * 1024;
#if (V8_TARGET_ARCH_RISCV64 && !V8_HOST_ARCH_RISCV64)
#define USE_SIMULATOR 1
#endif
#if (V8_TARGET_ARCH_LOONG64 && !V8_HOST_ARCH_LOONG64)
#define USE_SIMULATOR 1
#endif
#endif
// Determine whether the architecture uses an embedded constant pool
......
......@@ -17,6 +17,8 @@
#include "src/compiler/backend/mips/instruction-codes-mips.h"
#elif V8_TARGET_ARCH_MIPS64
#include "src/compiler/backend/mips64/instruction-codes-mips64.h"
#elif V8_TARGET_ARCH_LOONG64
#include "src/compiler/backend/loong64/instruction-codes-loong64.h"
#elif V8_TARGET_ARCH_X64
#include "src/compiler/backend/x64/instruction-codes-x64.h"
#elif V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64
......
......@@ -2711,7 +2711,8 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
#endif // !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS
#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS64 && \
!V8_TARGET_ARCH_S390 && !V8_TARGET_ARCH_PPC64 && !V8_TARGET_ARCH_RISCV64
!V8_TARGET_ARCH_S390 && !V8_TARGET_ARCH_PPC64 && \
!V8_TARGET_ARCH_RISCV64 && !V8_TARGET_ARCH_LOONG64
void InstructionSelector::VisitWord64AtomicLoad(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitWord64AtomicStore(Node* node) {
......@@ -2737,7 +2738,7 @@ void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
}
#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_PPC64
// !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_S390 &&
// !V8_TARGET_ARCH_RISCV64
// !V8_TARGET_ARCH_RISCV64 && !V8_TARGET_ARCH_LOONG64
#if !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM
// This is only needed on 32-bit to split the 64-bit value into two operands.
......@@ -2751,11 +2752,11 @@ void InstructionSelector::VisitI64x2ReplaceLaneI32Pair(Node* node) {
#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_S390X && !V8_TARGET_ARCH_PPC64
#if !V8_TARGET_ARCH_ARM64
#if !V8_TARGET_ARCH_MIPS64
#if !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_LOONG64
void InstructionSelector::VisitI64x2Splat(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI64x2ExtractLane(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitI64x2ReplaceLane(Node* node) { UNIMPLEMENTED(); }
#endif // !V8_TARGET_ARCH_MIPS64
#endif // !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_LOONG64
void InstructionSelector::VisitF64x2Qfma(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF64x2Qfms(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitF32x4Qfma(Node* node) { UNIMPLEMENTED(); }
......
This diff is collapsed.
This diff is collapsed.
// Copyright 2021 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/codegen/macro-assembler.h"
#include "src/compiler/backend/instruction-scheduler.h"
namespace v8 {
namespace internal {
namespace compiler {
// TODO(LOONG_dev): LOONG64 Support instruction scheduler.
bool InstructionScheduler::SchedulerSupported() { return false; }
int InstructionScheduler::GetTargetInstructionFlags(
const Instruction* instr) const {
UNREACHABLE();
}
int InstructionScheduler::GetInstructionLatency(const Instruction* instr) {
UNREACHABLE();
}
} // namespace compiler
} // namespace internal
} // namespace v8
This diff is collapsed.
......@@ -100,6 +100,18 @@ namespace {
#define CALLEE_SAVE_FP_REGISTERS \
f20.bit() | f22.bit() | f24.bit() | f26.bit() | f28.bit() | f30.bit()
#elif V8_TARGET_ARCH_LOONG64
// ===========================================================================
// == loong64 ================================================================
// ===========================================================================
#define PARAM_REGISTERS a0, a1, a2, a3, a4, a5, a6, a7
#define CALLEE_SAVE_REGISTERS \
s0.bit() | s1.bit() | s2.bit() | s3.bit() | s4.bit() | s5.bit() | s6.bit() | \
s7.bit() | s8.bit() | fp.bit()
#define CALLEE_SAVE_FP_REGISTERS \
f24.bit() | f25.bit() | f26.bit() | f27.bit() | f28.bit() | f29.bit() | \
f30.bit() | f31.bit()
#elif V8_TARGET_ARCH_PPC64
// ===========================================================================
// == ppc & ppc64 ============================================================
......
// Copyright 2021 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/deoptimizer/deoptimizer.h"
namespace v8 {
namespace internal {
const bool Deoptimizer::kSupportsFixedDeoptExitSizes = true;
const int Deoptimizer::kNonLazyDeoptExitSize = 2 * kInstrSize;
const int Deoptimizer::kLazyDeoptExitSize = 2 * kInstrSize;
const int Deoptimizer::kEagerWithResumeBeforeArgsSize = 3 * kInstrSize;
const int Deoptimizer::kEagerWithResumeDeoptExitSize =
kEagerWithResumeBeforeArgsSize + 2 * kSystemPointerSize;
// TODO(LOONG_dev): LOONG64 Is the PcOffset right?
const int Deoptimizer::kEagerWithResumeImmedArgs1PcOffset = kInstrSize;
const int Deoptimizer::kEagerWithResumeImmedArgs2PcOffset =
kInstrSize + kSystemPointerSize;
Float32 RegisterValues::GetFloatRegister(unsigned n) const {
return Float32::FromBits(
static_cast<uint32_t>(double_registers_[n].get_bits()));
}
void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
SetFrameSlot(offset, value);
}
void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
SetFrameSlot(offset, value);
}
void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
// No embedded constant pool support.
UNREACHABLE();
}
void FrameDescription::SetPc(intptr_t pc) { pc_ = pc; }
} // namespace internal
} // namespace v8
......@@ -1080,6 +1080,8 @@ class DebugInfoSection : public DebugSection {
UNIMPLEMENTED();
#elif V8_TARGET_ARCH_MIPS64
UNIMPLEMENTED();
#elif V8_TARGET_ARCH_LOONG64
UNIMPLEMENTED();
#elif V8_TARGET_ARCH_PPC64 && V8_OS_LINUX
w->Write<uint8_t>(DW_OP_reg31); // The frame pointer is here on PPC64.
#elif V8_TARGET_ARCH_S390
......
This diff is collapsed.
// Copyright 2021 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/diagnostics/unwinder.h"
namespace v8 {
struct RegisterState;
void GetCalleeSavedRegistersFromEntryFrame(void* fp,
RegisterState* register_state) {}
} // namespace v8
......@@ -87,6 +87,7 @@ class PerfJitLogger : public CodeEventLogger {
static const uint32_t kElfMachARM = 40;
static const uint32_t kElfMachMIPS = 8;
static const uint32_t kElfMachMIPS64 = 8;
static const uint32_t kElfMachLOONG64 = 258;
static const uint32_t kElfMachARM64 = 183;
static const uint32_t kElfMachS390x = 22;
static const uint32_t kElfMachPPC64 = 21;
......@@ -103,6 +104,8 @@ class PerfJitLogger : public CodeEventLogger {
return kElfMachMIPS;
#elif V8_TARGET_ARCH_MIPS64
return kElfMachMIPS64;
#elif V8_TARGET_ARCH_LOONG64
return kElfMachLOONG64;
#elif V8_TARGET_ARCH_ARM64
return kElfMachARM64;
#elif V8_TARGET_ARCH_S390X
......
......@@ -403,6 +403,8 @@ inline static int FrameSlotToFPOffset(int slot) {
#include "src/execution/mips/frame-constants-mips.h"
#elif V8_TARGET_ARCH_MIPS64
#include "src/execution/mips64/frame-constants-mips64.h"
#elif V8_TARGET_ARCH_LOONG64
#include "src/execution/loong64/frame-constants-loong64.h"
#elif V8_TARGET_ARCH_S390
#include "src/execution/s390/frame-constants-s390.h"
#elif V8_TARGET_ARCH_RISCV64
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -88,9 +88,9 @@ class SimulatorBase {
static typename std::enable_if<std::is_integral<T>::value, intptr_t>::type
ConvertArg(T arg) {
static_assert(sizeof(T) <= sizeof(intptr_t), "type bigger than ptrsize");
#if V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_RISCV64
// The MIPS64 and RISCV64 calling convention is to sign extend all values,
// even unsigned ones.
#if V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_LOONG64
// The MIPS64, LOONG64 and RISCV64 calling convention is to sign extend all
// values, even unsigned ones.
using signed_t = typename std::make_signed<T>::type;
return static_cast<intptr_t>(static_cast<signed_t>(arg));
#else
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment