Commit ffd9e82d authored by Brice Dobry's avatar Brice Dobry Committed by Commit Bot

Add RISC-V backend

This very large changeset adds support for RISC-V.

Bug: v8:10991
Change-Id: Ic997c94cc12bba6881bc208e66526f423dd0679c
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/2571344
Commit-Queue: Brice Dobry <brice.dobry@futurewei.com>
Commit-Queue: Georg Neis <neis@chromium.org>
Reviewed-by: 's avatarGeorg Neis <neis@chromium.org>
Reviewed-by: 's avatarHannes Payer <hpayer@chromium.org>
Reviewed-by: 's avatarMichael Achenbach <machenbach@chromium.org>
Reviewed-by: 's avatarMichael Stanton <mvstanton@chromium.org>
Cr-Commit-Position: refs/heads/master@{#72598}
parent 6beed8d5
......@@ -68,8 +68,10 @@ Ben Newman <ben@meteor.com>
Ben Noordhuis <info@bnoordhuis.nl>
Benjamin Tan <demoneaux@gmail.com>
Bert Belder <bertbelder@gmail.com>
Brice Dobry <brice.dobry@futurewei.com>
Burcu Dogan <burcujdogan@gmail.com>
Caitlin Potter <caitpotter88@gmail.com>
Chao Wang <chao.w@rioslab.org>
Craig Schlenter <craig.schlenter@gmail.com>
Charles Kerr <charles@charleskerr.com>
Chengzhong Wu <legendecas@gmail.com>
......@@ -85,6 +87,7 @@ David Carlier <devnexen@gmail.com>
David Manouchehri <david@davidmanouchehri.com>
Deepak Mohan <hop2deep@gmail.com>
Deon Dior <diaoyuanjie@gmail.com>
Derek Tu <derek.t@rioslab.org>
Dominic Farolini <domfarolino@gmail.com>
Douglas Crosher <dtc-v8@scieneer.com>
Dusan Milosavljevic <dusan.m.milosavljevic@gmail.com>
......@@ -100,6 +103,7 @@ Gergely Nagy <ngg@ngg.hu>
Gilang Mentari Hamidy <gilang@hamidy.net>
Gus Caplan <me@gus.host>
Gwang Yoon Hwang <ryumiel@company100.net>
Haichuan Wang <hc.opensource@gmail.com>
Hannu Trey <hannu.trey@gmail.com>
Henrique Ferreiro <henrique.ferreiro@gmail.com>
Hirofumi Mako <mkhrfm@gmail.com>
......@@ -118,6 +122,7 @@ Jay Freeman <saurik@saurik.com>
James Pike <g00gle@chilon.net>
James M Snell <jasnell@gmail.com>
Javad Amiri <javad.amiri@anu.edu.au>
Ji Qiu <qiuji@iscas.ac.cn>
Jianghua Yang <jianghua.yjh@alibaba-inc.com>
Jiawen Geng <technicalcute@gmail.com>
Jiaxun Yang <jiaxun.yang@flygoat.com>
......@@ -168,6 +173,7 @@ Oliver Dunk <oliver@oliverdunk.com>
Paolo Giarrusso <p.giarrusso@gmail.com>
Patrick Gansterer <paroga@paroga.com>
Peng Fei <pfgenyun@gmail.com>
Peng Wu <peng.w@rioslab.org>
Peng-Yu Chen <pengyu@libstarrify.so>
Peter Rybin <peter.rybin@gmail.com>
Peter Varga <pvarga@inf.u-szeged.hu>
......@@ -182,6 +188,7 @@ Raul Tambre <raul@tambre.ee>
Ray Glover <ray@rayglover.net>
Refael Ackermann <refack@gmail.com>
Rene Rebe <rene@exactcode.de>
Reza Yazdani <ryazdani@futurewei.com>
Rick Waldron <waldron.rick@gmail.com>
Rob Wu <rob@robwu.nl>
Robert Meijer <robert.s.meijer@gmail.com>
......@@ -202,7 +209,9 @@ Shawn Presser <shawnpresser@gmail.com>
Stefan Penner <stefan.penner@gmail.com>
Sylvestre Ledru <sledru@mozilla.com>
Taketoshi Aono <brn@b6n.ch>
Tao Liqiang <taolq@outlook.com>
Teddy Katz <teddy.katz@gmail.com>
Thomas Young <wenzhang5800@gmail.com>
Tiancheng "Timothy" Gu <timothygu99@gmail.com>
Tobias Burnus <burnus@net-b.de>
Tobias Nießen <tniessen@tnie.de>
......@@ -213,12 +222,15 @@ Victor Costan <costan@gmail.com>
Vlad Burlik <vladbph@gmail.com>
Vladimir Krivosheev <develar@gmail.com>
Vladimir Shutoff <vovan@shutoff.ru>
Wei Wu <lazyparser@gmail.com>
Wenlu Wang <kingwenlu@gmail.com>
Wiktor Garbacz <wiktor.garbacz@gmail.com>
Wouter Vermeiren <wouter.vermeiren@essensium.com>
Xiaofang Zou <zouxiaofang@iscas.ac.cn>
Xiaoyin Liu <xiaoyin.l@outlook.com>
Yanbo Li <lybvinci@gmail.com>
Yannic Bonenberger <contact@yannic-bonenberger.com>
Yi Wang <wangyi8848@gmail.com>
Yong Wang <ccyongwang@tencent.com>
Youfeng Hao <ajihyf@gmail.com>
Yu Yin <xwafish@gmail.com>
......
......@@ -930,6 +930,15 @@ config("toolchain") {
}
}
# Under simulator build, compiler will not provide __riscv_xlen. Define here
if (v8_current_cpu == "riscv64") {
defines += [ "V8_TARGET_ARCH_RISCV64" ]
defines += [ "__riscv_xlen=64" ]
#FIXME: Temporarily use MIPS macro for the building.
defines += [ "CAN_USE_FPU_INSTRUCTIONS" ]
}
if (v8_current_cpu == "x86") {
defines += [ "V8_TARGET_ARCH_IA32" ]
if (is_win) {
......@@ -1018,7 +1027,7 @@ config("toolchain") {
}
if (v8_current_cpu == "x64" || v8_current_cpu == "arm64" ||
v8_current_cpu == "mips64el") {
v8_current_cpu == "mips64el" || v8_current_cpu == "riscv64") {
cflags += [ "-Wshorten-64-to-32" ]
}
}
......@@ -2027,6 +2036,11 @@ v8_source_set("v8_initializers") {
### gcmole(arch:s390) ###
"src/builtins/s390/builtins-s390.cc",
]
} else if (v8_current_cpu == "riscv64") {
sources += [
### gcmole(arch:riscv64) ###
"src/builtins/riscv64/builtins-riscv64.cc",
]
}
if (!v8_enable_i18n_support) {
......@@ -3986,6 +4000,34 @@ v8_source_set("v8_base_without_compiler") {
"src/regexp/s390/regexp-macro-assembler-s390.h",
"src/wasm/baseline/s390/liftoff-assembler-s390.h",
]
} else if (v8_current_cpu == "riscv64") {
sources += [ ### gcmole(arch:riscv64) ###
"src/codegen/riscv64/assembler-riscv64-inl.h",
"src/codegen/riscv64/assembler-riscv64.cc",
"src/codegen/riscv64/assembler-riscv64.h",
"src/codegen/riscv64/constants-riscv64.cc",
"src/codegen/riscv64/constants-riscv64.h",
"src/codegen/riscv64/cpu-riscv64.cc",
"src/codegen/riscv64/interface-descriptors-riscv64.cc",
"src/codegen/riscv64/macro-assembler-riscv64.cc",
"src/codegen/riscv64/macro-assembler-riscv64.h",
"src/codegen/riscv64/register-riscv64.h",
"src/compiler/backend/riscv64/code-generator-riscv64.cc",
"src/compiler/backend/riscv64/instruction-codes-riscv64.h",
"src/compiler/backend/riscv64/instruction-scheduler-riscv64.cc",
"src/compiler/backend/riscv64/instruction-selector-riscv64.cc",
"src/debug/riscv64/debug-riscv64.cc",
"src/deoptimizer/riscv64/deoptimizer-riscv64.cc",
"src/diagnostics/riscv64/disasm-riscv64.cc",
"src/diagnostics/riscv64/unwinder-riscv64.cc",
"src/execution/riscv64/frame-constants-riscv64.cc",
"src/execution/riscv64/frame-constants-riscv64.h",
"src/execution/riscv64/simulator-riscv64.cc",
"src/execution/riscv64/simulator-riscv64.h",
"src/regexp/riscv64/regexp-macro-assembler-riscv64.cc",
"src/regexp/riscv64/regexp-macro-assembler-riscv64.h",
"src/wasm/baseline/riscv64/liftoff-assembler-riscv64.h",
]
}
configs = [
......@@ -4087,7 +4129,8 @@ v8_source_set("v8_base_without_compiler") {
if (v8_current_cpu == "mips" || v8_current_cpu == "mipsel" ||
v8_current_cpu == "mips64" || v8_current_cpu == "mips64el" ||
v8_current_cpu == "ppc" || v8_current_cpu == "ppc64" ||
v8_current_cpu == "s390" || v8_current_cpu == "s390x") {
v8_current_cpu == "s390" || v8_current_cpu == "s390x" ||
v8_current_cpu == "riscv64") {
libs += [ "atomic" ]
}
......@@ -4432,6 +4475,10 @@ v8_component("v8_libbase") {
sources += [ "src/base/ubsan.cc" ]
}
if (v8_current_cpu == "riscv64") {
libs += [ "atomic" ]
}
if (is_tsan && !build_with_chromium) {
data += [ "tools/sanitizers/tsan_suppressions.txt" ]
......@@ -4570,6 +4617,8 @@ v8_source_set("v8_cppgc_shared") {
sources += [ "src/heap/base/asm/mips/push_registers_asm.cc" ]
} else if (current_cpu == "mips64el") {
sources += [ "src/heap/base/asm/mips64/push_registers_asm.cc" ]
} else if (current_cpu == "riscv64") {
sources += [ "src/heap/base/asm/riscv64/push_registers_asm.cc" ]
}
} else if (is_win) {
if (current_cpu == "x64") {
......@@ -5318,6 +5367,10 @@ if (want_v8_shell) {
v8_executable("cppgc_sample") {
sources = [ "samples/cppgc/cppgc-sample.cc" ]
if (v8_current_cpu == "riscv64") {
libs = [ "atomic" ]
}
configs = [
# Note: don't use :internal_config here because this target will get
# the :external_config applied to it by virtue of depending on :cppgc, and
......
......@@ -26,4 +26,5 @@ per-file WATCHLIST=file:COMMON_OWNERS
per-file *-mips*=file:MIPS_OWNERS
per-file *-mips64*=file:MIPS_OWNERS
per-file *-ppc*=file:PPC_OWNERS
per-file *-riscv64*=file:RISCV_OWNERS
per-file *-s390*=file:S390_OWNERS
brice.dobry@futurewei.com
lazyparser@gmail.com
peng.w@rioslab.org
......@@ -79,7 +79,8 @@ if (v8_snapshot_toolchain == "") {
if (v8_current_cpu == "x64" || v8_current_cpu == "x86") {
_cpus = v8_current_cpu
} else if (v8_current_cpu == "arm64" || v8_current_cpu == "mips64el") {
} else if (v8_current_cpu == "arm64" || v8_current_cpu == "mips64el" ||
v8_current_cpu == "riscv64") {
if (is_win && v8_current_cpu == "arm64") {
# set _cpus to blank for Windows ARM64 so host_toolchain could be
# selected as snapshot toolchain later.
......
......@@ -19,7 +19,7 @@ struct CalleeSavedRegisters {
};
#elif V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM64 || \
V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC || \
V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_S390
V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_S390
struct CalleeSavedRegisters {};
#else
#error Target architecture was not detected as supported by v8
......
......@@ -33,6 +33,12 @@
'ppc64.debug.sim': 'default_debug_ppc64_sim',
'ppc64.optdebug.sim': 'default_optdebug_ppc64_sim',
'ppc64.release.sim': 'default_release_ppc64_sim',
'riscv64.debug': 'default_debug_riscv64',
'riscv64.optdebug': 'default_optdebug_riscv64',
'riscv64.release': 'default_release_riscv64',
'riscv64.debug.sim': 'default_debug_riscv64_sim',
'riscv64.optdebug.sim': 'default_optdebug_riscv64_sim',
'riscv64.release.sim': 'default_release_riscv64_sim',
's390x.debug': 'default_debug_s390x',
's390x.optdebug': 'default_optdebug_s390x',
's390x.release': 'default_release_s390x',
......@@ -290,6 +296,18 @@
'debug', 'simulate_mips64el', 'v8_enable_slow_dchecks'],
'default_release_mips64el': [
'release', 'simulate_mips64el'],
'default_debug_riscv64': [
'debug', 'riscv64', 'gcc', 'v8_enable_slow_dchecks', 'v8_full_debug'],
'default_optdebug_riscv64': [
'debug', 'riscv64', 'gcc', 'v8_enable_slow_dchecks'],
'default_release_riscv64': [
'release', 'riscv64', 'gcc'],
'default_debug_riscv64_sim': [
'debug', 'simulate_riscv64', 'v8_enable_slow_dchecks', 'v8_full_debug'],
'default_optdebug_riscv64_sim': [
'debug', 'simulate_riscv64', 'v8_enable_slow_dchecks'],
'default_release_riscv64_sim': [
'release', 'simulate_riscv64'],
'default_debug_ppc64': [
'debug', 'ppc64', 'gcc', 'v8_enable_slow_dchecks', 'v8_full_debug'],
'default_optdebug_ppc64': [
......@@ -875,6 +893,10 @@
'gn_args': 'target_cpu="ppc64" use_custom_libcxx=false',
},
'riscv64': {
'gn_args': 'target_cpu="riscv64" use_custom_libcxx=false',
},
'x64': {
'gn_args': 'target_cpu="x64"',
},
......
......@@ -46,38 +46,44 @@
#else
#define V8_HOST_ARCH_32_BIT 1
#endif
#elif defined(__riscv) || defined(__riscv__)
#if __riscv_xlen == 64
#define V8_HOST_ARCH_RISCV64 1
#define V8_HOST_ARCH_64_BIT 1
#else
#error "Cannot detect Riscv's bitwidth"
#endif
#else
#error "Host architecture was not detected as supported by v8"
#endif
#if defined(__ARM_ARCH_7A__) || \
defined(__ARM_ARCH_7R__) || \
#if defined(__ARM_ARCH_7A__) || defined(__ARM_ARCH_7R__) || \
defined(__ARM_ARCH_7__)
# define CAN_USE_ARMV7_INSTRUCTIONS 1
#define CAN_USE_ARMV7_INSTRUCTIONS 1
#ifdef __ARM_ARCH_EXT_IDIV__
#define CAN_USE_SUDIV 1
#endif
# ifndef CAN_USE_VFP3_INSTRUCTIONS
#ifndef CAN_USE_VFP3_INSTRUCTIONS
#define CAN_USE_VFP3_INSTRUCTIONS 1
# endif
#endif
#endif
#if defined(__ARM_ARCH_8A__)
#define CAN_USE_ARMV7_INSTRUCTIONS 1
#define CAN_USE_SUDIV 1
# define CAN_USE_ARMV8_INSTRUCTIONS 1
#define CAN_USE_ARMV8_INSTRUCTIONS 1
#ifndef CAN_USE_VFP3_INSTRUCTIONS
#define CAN_USE_VFP3_INSTRUCTIONS 1
#endif
#endif
// Target architecture detection. This may be set externally. If not, detect
// in the same way as the host architecture, that is, target the native
// environment as presented by the compiler.
#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM && \
!V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64 && \
!V8_TARGET_ARCH_PPC && !V8_TARGET_ARCH_PPC64 && !V8_TARGET_ARCH_S390
!V8_TARGET_ARCH_PPC && !V8_TARGET_ARCH_PPC64 && !V8_TARGET_ARCH_S390 && \
!V8_TARGET_ARCH_RISCV64
#if defined(_M_X64) || defined(__x86_64__)
#define V8_TARGET_ARCH_X64 1
#elif defined(_M_IX86) || defined(__i386__)
......@@ -94,6 +100,10 @@
#define V8_TARGET_ARCH_PPC64 1
#elif defined(_ARCH_PPC)
#define V8_TARGET_ARCH_PPC 1
#elif defined(__riscv) || defined(__riscv__)
#if __riscv_xlen == 64
#define V8_TARGET_ARCH_RISCV64 1
#endif
#else
#error Target architecture was not detected as supported by v8
#endif
......@@ -128,6 +138,8 @@
#else
#define V8_TARGET_ARCH_32_BIT 1
#endif
#elif V8_TARGET_ARCH_RISCV64
#define V8_TARGET_ARCH_64_BIT 1
#else
#error Unknown target architecture pointer size
#endif
......@@ -156,6 +168,9 @@
#if (V8_TARGET_ARCH_MIPS64 && !(V8_HOST_ARCH_X64 || V8_HOST_ARCH_MIPS64))
#error Target architecture mips64 is only supported on mips64 and x64 host
#endif
#if (V8_TARGET_ARCH_RISCV64 && !(V8_HOST_ARCH_X64 || V8_HOST_ARCH_RISCV64))
#error Target architecture riscv64 is only supported on riscv64 and x64 host
#endif
// Determine architecture endianness.
#if V8_TARGET_ARCH_IA32
......@@ -190,6 +205,8 @@
#else
#define V8_TARGET_BIG_ENDIAN 1
#endif
#elif V8_TARGET_ARCH_RISCV64
#define V8_TARGET_LITTLE_ENDIAN 1
#else
#error Unknown target architecture endianness
#endif
......
......@@ -98,7 +98,8 @@
// there.
#if ((!defined(V8_CC_GNU) && !defined(V8_CC_MSVC) && \
!defined(V8_TARGET_ARCH_MIPS) && !defined(V8_TARGET_ARCH_MIPS64) && \
!defined(V8_TARGET_ARCH_PPC) && !defined(V8_TARGET_ARCH_PPC64)) || \
!defined(V8_TARGET_ARCH_PPC) && !defined(V8_TARGET_ARCH_PPC64) && \
!defined(V8_TARGET_ARCH_RISCV64)) || \
(defined(__clang__) && __cplusplus > 201300L))
#define V8_NOEXCEPT noexcept
#else
......
......@@ -322,6 +322,10 @@ void* OS::GetRandomMmapAddr() {
// 42 bits of virtual addressing. Truncate to 40 bits to allow kernel chance
// to fulfill request.
raw_addr &= uint64_t{0xFFFFFF0000};
#elif V8_TARGET_ARCH_RISCV64
// TODO(RISCV): We need more information from the kernel to correctly mask
// this address for RISC-V. https://github.com/v8-riscv/v8/issues/375
raw_addr &= uint64_t{0xFFFFFF0000};
#else
raw_addr &= 0x3FFFF000;
......@@ -520,6 +524,8 @@ void OS::DebugBreak() {
#elif V8_HOST_ARCH_S390
// Software breakpoint instruction is 0x0001
asm volatile(".word 0x0001");
#elif V8_HOST_ARCH_RISCV64
asm("ebreak");
#else
#error Unsupported host architecture.
#endif
......
......@@ -375,7 +375,7 @@ TF_BUILTIN(AtomicsExchange, SharedArrayBufferBuiltinsAssembler) {
// 2. Let i be ? ValidateAtomicAccess(typedArray, index).
TNode<UintPtrT> index_word = ValidateAtomicAccess(array, index, context);
#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_RISCV64
USE(array_buffer);
TNode<Number> index_number = ChangeUintPtrToTagged(index_word);
Return(CallRuntime(Runtime::kAtomicsExchange, context, array, index_number,
......@@ -476,7 +476,8 @@ TF_BUILTIN(AtomicsExchange, SharedArrayBufferBuiltinsAssembler) {
// This shouldn't happen, we've already validated the type.
BIND(&other);
Unreachable();
#endif // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64
#endif // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 ||
// V8_TARGET_ARCH_RISCV64
BIND(&detached);
{
......@@ -505,7 +506,8 @@ TF_BUILTIN(AtomicsCompareExchange, SharedArrayBufferBuiltinsAssembler) {
TNode<UintPtrT> index_word = ValidateAtomicAccess(array, index, context);
#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64 || \
V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X
V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X || \
V8_TARGET_ARCH_RISCV64
USE(array_buffer);
TNode<Number> index_number = ChangeUintPtrToTagged(index_word);
Return(CallRuntime(Runtime::kAtomicsCompareExchange, context, array,
......@@ -627,6 +629,7 @@ TF_BUILTIN(AtomicsCompareExchange, SharedArrayBufferBuiltinsAssembler) {
Unreachable();
#endif // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64
// || V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X
// || V8_TARGET_ARCH_RISCV64
BIND(&detached);
{
......@@ -678,7 +681,8 @@ void SharedArrayBufferBuiltinsAssembler::AtomicBinopBuiltinCommon(
TNode<UintPtrT> index_word = ValidateAtomicAccess(array, index, context);
#if V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64 || \
V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X
V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X || \
V8_TARGET_ARCH_RISCV64
USE(array_buffer);
TNode<Number> index_number = ChangeUintPtrToTagged(index_word);
Return(CallRuntime(runtime_function, context, array, index_number, value));
......@@ -771,6 +775,7 @@ void SharedArrayBufferBuiltinsAssembler::AtomicBinopBuiltinCommon(
Unreachable();
#endif // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC64
// || V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_S390X
// || V8_TARGET_ARCH_RISCV64
BIND(&detached);
ThrowTypeError(context, MessageTemplate::kDetachedOperation, method_name);
......
This source diff could not be displayed because it is too large. You can view the blob instead.
......@@ -23,6 +23,8 @@
#include "src/codegen/mips64/assembler-mips64.h"
#elif V8_TARGET_ARCH_S390
#include "src/codegen/s390/assembler-s390.h"
#elif V8_TARGET_ARCH_RISCV64
#include "src/codegen/riscv64/assembler-riscv64.h"
#else
#error Unknown architecture.
#endif
......
......@@ -23,6 +23,8 @@
#include "src/codegen/mips64/assembler-mips64-inl.h"
#elif V8_TARGET_ARCH_S390
#include "src/codegen/s390/assembler-s390-inl.h"
#elif V8_TARGET_ARCH_RISCV64
#include "src/codegen/riscv64/assembler-riscv64-inl.h"
#else
#error Unknown architecture.
#endif
......
......@@ -459,5 +459,254 @@ void ConstantPool::MaybeCheck() {
#endif // defined(V8_TARGET_ARCH_ARM64)
#if defined(V8_TARGET_ARCH_RISCV64)
// Constant Pool.
ConstantPool::ConstantPool(Assembler* assm) : assm_(assm) {}
ConstantPool::~ConstantPool() { DCHECK_EQ(blocked_nesting_, 0); }
RelocInfoStatus ConstantPool::RecordEntry(uint32_t data,
RelocInfo::Mode rmode) {
ConstantPoolKey key(data, rmode);
CHECK(key.is_value32());
return RecordKey(std::move(key), assm_->pc_offset());
}
RelocInfoStatus ConstantPool::RecordEntry(uint64_t data,
RelocInfo::Mode rmode) {
ConstantPoolKey key(data, rmode);
CHECK(!key.is_value32());
return RecordKey(std::move(key), assm_->pc_offset());
}
RelocInfoStatus ConstantPool::RecordKey(ConstantPoolKey key, int offset) {
RelocInfoStatus write_reloc_info = GetRelocInfoStatusFor(key);
if (write_reloc_info == RelocInfoStatus::kMustRecord) {
if (key.is_value32()) {
if (entry32_count_ == 0) first_use_32_ = offset;
++entry32_count_;
} else {
if (entry64_count_ == 0) first_use_64_ = offset;
++entry64_count_;
}
}
entries_.insert(std::make_pair(key, offset));
if (Entry32Count() + Entry64Count() > ConstantPool::kApproxMaxEntryCount) {
// Request constant pool emission after the next instruction.
SetNextCheckIn(1);
}
return write_reloc_info;
}
RelocInfoStatus ConstantPool::GetRelocInfoStatusFor(
const ConstantPoolKey& key) {
if (key.AllowsDeduplication()) {
auto existing = entries_.find(key);
if (existing != entries_.end()) {
return RelocInfoStatus::kMustOmitForDuplicate;
}
}
return RelocInfoStatus::kMustRecord;
}
void ConstantPool::EmitAndClear(Jump require_jump) {
DCHECK(!IsBlocked());
// Prevent recursive pool emission.
Assembler::BlockPoolsScope block_pools(assm_, PoolEmissionCheck::kSkip);
Alignment require_alignment =
IsAlignmentRequiredIfEmittedAt(require_jump, assm_->pc_offset());
int size = ComputeSize(require_jump, require_alignment);
Label size_check;
assm_->bind(&size_check);
assm_->RecordConstPool(size);
// Emit the constant pool. It is preceded by an optional branch if
// {require_jump} and a header which will:
// 1) Encode the size of the constant pool, for use by the disassembler.
// 2) Terminate the program, to try to prevent execution from accidentally
// flowing into the constant pool.
// 3) align the 64bit pool entries to 64-bit.
// TODO(all): Make the alignment part less fragile. Currently code is
// allocated as a byte array so there are no guarantees the alignment will
// be preserved on compaction. Currently it works as allocation seems to be
// 64-bit aligned.
DEBUG_PRINTF("\tConstant Pool start\n")
Label after_pool;
if (require_jump == Jump::kRequired) assm_->b(&after_pool);
assm_->RecordComment("[ Constant Pool");
EmitPrologue(require_alignment);
if (require_alignment == Alignment::kRequired) assm_->DataAlign(kInt64Size);
EmitEntries();
assm_->RecordComment("]");
assm_->bind(&after_pool);
DEBUG_PRINTF("\tConstant Pool end\n")
DCHECK_LE(assm_->SizeOfCodeGeneratedSince(&size_check) - size, 3);
Clear();
}
void ConstantPool::Clear() {
entries_.clear();
first_use_32_ = -1;
first_use_64_ = -1;
entry32_count_ = 0;
entry64_count_ = 0;
next_check_ = 0;
}
void ConstantPool::StartBlock() {
if (blocked_nesting_ == 0) {
// Prevent constant pool checks from happening by setting the next check to
// the biggest possible offset.
next_check_ = kMaxInt;
}
++blocked_nesting_;
}
void ConstantPool::EndBlock() {
--blocked_nesting_;
if (blocked_nesting_ == 0) {
DCHECK(IsInImmRangeIfEmittedAt(assm_->pc_offset()));
// Make sure a check happens quickly after getting unblocked.
next_check_ = 0;
}
}
bool ConstantPool::IsBlocked() const { return blocked_nesting_ > 0; }
void ConstantPool::SetNextCheckIn(size_t instructions) {
next_check_ =
assm_->pc_offset() + static_cast<int>(instructions * kInstrSize);
}
void ConstantPool::EmitEntries() {
for (auto iter = entries_.begin(); iter != entries_.end();) {
DCHECK(iter->first.is_value32() || IsAligned(assm_->pc_offset(), 8));
auto range = entries_.equal_range(iter->first);
bool shared = iter->first.AllowsDeduplication();
for (auto it = range.first; it != range.second; ++it) {
SetLoadOffsetToConstPoolEntry(it->second, assm_->pc(), it->first);
if (!shared) Emit(it->first);
}
if (shared) Emit(iter->first);
iter = range.second;
}
}
void ConstantPool::Emit(const ConstantPoolKey& key) {
if (key.is_value32()) {
assm_->dd(key.value32());
} else {
assm_->dq(key.value64());
}
}
bool ConstantPool::ShouldEmitNow(Jump require_jump, size_t margin) const {
if (IsEmpty()) return false;
if (Entry32Count() + Entry64Count() > ConstantPool::kApproxMaxEntryCount) {
return true;
}
// We compute {dist32/64}, i.e. the distance from the first instruction
// accessing a 32bit/64bit entry in the constant pool to any of the
// 32bit/64bit constant pool entries, respectively. This is required because
// we do not guarantee that entries are emitted in order of reference, i.e. it
// is possible that the entry with the earliest reference is emitted last.
// The constant pool should be emitted if either of the following is true:
// (A) {dist32/64} will be out of range at the next check in.
// (B) Emission can be done behind an unconditional branch and {dist32/64}
// exceeds {kOpportunityDist*}.
// (C) {dist32/64} exceeds the desired approximate distance to the pool.
int worst_case_size = ComputeSize(Jump::kRequired, Alignment::kRequired);
size_t pool_end_32 = assm_->pc_offset() + margin + worst_case_size;
size_t pool_end_64 = pool_end_32 - Entry32Count() * kInt32Size;
if (Entry64Count() != 0) {
// The 64-bit constants are always emitted before the 32-bit constants, so
// we subtract the size of the 32-bit constants from {size}.
size_t dist64 = pool_end_64 - first_use_64_;
bool next_check_too_late = dist64 + 2 * kCheckInterval >= kMaxDistToPool64;
bool opportune_emission_without_jump =
require_jump == Jump::kOmitted && (dist64 >= kOpportunityDistToPool64);
bool approximate_distance_exceeded = dist64 >= kApproxDistToPool64;
if (next_check_too_late || opportune_emission_without_jump ||
approximate_distance_exceeded) {
return true;
}
}
if (Entry32Count() != 0) {
size_t dist32 = pool_end_32 - first_use_32_;
bool next_check_too_late = dist32 + 2 * kCheckInterval >= kMaxDistToPool32;
bool opportune_emission_without_jump =
require_jump == Jump::kOmitted && (dist32 >= kOpportunityDistToPool32);
bool approximate_distance_exceeded = dist32 >= kApproxDistToPool32;
if (next_check_too_late || opportune_emission_without_jump ||
approximate_distance_exceeded) {
return true;
}
}
return false;
}
int ConstantPool::ComputeSize(Jump require_jump,
Alignment require_alignment) const {
int size_up_to_marker = PrologueSize(require_jump);
int alignment = require_alignment == Alignment::kRequired ? kInstrSize : 0;
size_t size_after_marker =
Entry32Count() * kInt32Size + alignment + Entry64Count() * kInt64Size;
return size_up_to_marker + static_cast<int>(size_after_marker);
}
Alignment ConstantPool::IsAlignmentRequiredIfEmittedAt(Jump require_jump,
int pc_offset) const {
int size_up_to_marker = PrologueSize(require_jump);
if (Entry64Count() != 0 &&
!IsAligned(pc_offset + size_up_to_marker, kInt64Size)) {
return Alignment::kRequired;
}
return Alignment::kOmitted;
}
bool ConstantPool::IsInImmRangeIfEmittedAt(int pc_offset) {
// Check that all entries are in range if the pool is emitted at {pc_offset}.
// This ignores kPcLoadDelta (conservatively, since all offsets are positive),
// and over-estimates the last entry's address with the pool's end.
Alignment require_alignment =
IsAlignmentRequiredIfEmittedAt(Jump::kRequired, pc_offset);
size_t pool_end_32 =
pc_offset + ComputeSize(Jump::kRequired, require_alignment);
size_t pool_end_64 = pool_end_32 - Entry32Count() * kInt32Size;
bool entries_in_range_32 =
Entry32Count() == 0 || (pool_end_32 < first_use_32_ + kMaxDistToPool32);
bool entries_in_range_64 =
Entry64Count() == 0 || (pool_end_64 < first_use_64_ + kMaxDistToPool64);
return entries_in_range_32 && entries_in_range_64;
}
ConstantPool::BlockScope::BlockScope(Assembler* assm, size_t margin)
: pool_(&assm->constpool_) {
pool_->assm_->EmitConstPoolWithJumpIfNeeded(margin);
pool_->StartBlock();
}
ConstantPool::BlockScope::BlockScope(Assembler* assm, PoolEmissionCheck check)
: pool_(&assm->constpool_) {
DCHECK_EQ(check, PoolEmissionCheck::kSkip);
pool_->StartBlock();
}
ConstantPool::BlockScope::~BlockScope() { pool_->EndBlock(); }
void ConstantPool::MaybeCheck() {
if (assm_->pc_offset() >= next_check_) {
Check(Emission::kIfNeeded, Jump::kRequired);
}
}
#endif // defined(V8_TARGET_ARCH_RISCV64)
} // namespace internal
} // namespace v8
......@@ -163,7 +163,7 @@ class ConstantPoolBuilder {
#endif // defined(V8_TARGET_ARCH_PPC) || defined(V8_TARGET_ARCH_PPC64)
#if defined(V8_TARGET_ARCH_ARM64)
#if defined(V8_TARGET_ARCH_ARM64) || defined(V8_TARGET_ARCH_RISCV64)
class ConstantPoolKey {
public:
......
......@@ -21,6 +21,8 @@
#include "src/codegen/s390/constants-s390.h" // NOLINT
#elif V8_TARGET_ARCH_X64
#include "src/codegen/x64/constants-x64.h" // NOLINT
#elif V8_TARGET_ARCH_RISCV64
#include "src/codegen/riscv64/constants-riscv64.h" // NOLINT
#else
#error Unsupported target architecture.
#endif
......
......@@ -68,6 +68,11 @@ enum CpuFeature {
VECTOR_ENHANCE_FACILITY_1,
VECTOR_ENHANCE_FACILITY_2,
MISC_INSTR_EXT2,
#elif V8_TARGET_ARCH_RISCV64
FPU,
FP64FPU,
RISCV_SIMD,
#endif
NUMBER_OF_CPU_FEATURES
......
......@@ -705,6 +705,8 @@ ExternalReference ExternalReference::invoke_accessor_getter_callback() {
#define re_stack_check_func RegExpMacroAssemblerMIPS::CheckStackGuardState
#elif V8_TARGET_ARCH_S390
#define re_stack_check_func RegExpMacroAssemblerS390::CheckStackGuardState
#elif V8_TARGET_ARCH_RISCV64
#define re_stack_check_func RegExpMacroAssemblerRISCV::CheckStackGuardState
#else
UNREACHABLE();
#endif
......
......@@ -403,7 +403,8 @@ void WasmFloat64ToNumberDescriptor::InitializePlatformSpecific(
}
#endif // !V8_TARGET_ARCH_IA32
#if !defined(V8_TARGET_ARCH_MIPS) && !defined(V8_TARGET_ARCH_MIPS64)
#if !defined(V8_TARGET_ARCH_MIPS) && !defined(V8_TARGET_ARCH_MIPS64) && \
!defined(V8_TARGET_ARCH_RISCV64)
void WasmI32AtomicWait32Descriptor::InitializePlatformSpecific(
CallInterfaceDescriptorData* data) {
DefaultInitializePlatformSpecific(data, kParameterCount);
......
......@@ -52,6 +52,9 @@ enum AllocationFlags {
#elif V8_TARGET_ARCH_S390
#include "src/codegen/s390/constants-s390.h"
#include "src/codegen/s390/macro-assembler-s390.h"
#elif V8_TARGET_ARCH_RISCV64
#include "src/codegen/riscv64/constants-riscv64.h"
#include "src/codegen/riscv64/macro-assembler-riscv64.h"
#else
#error Unsupported target architecture.
#endif
......
......@@ -24,6 +24,8 @@
#include "src/codegen/mips64/register-mips64.h"
#elif V8_TARGET_ARCH_S390
#include "src/codegen/s390/register-s390.h"
#elif V8_TARGET_ARCH_RISCV64
#include "src/codegen/riscv64/register-riscv64.h"
#else
#error Unknown architecture.
#endif
......
......@@ -66,6 +66,8 @@ static int get_num_allocatable_double_registers() {
kMaxAllocatableDoubleRegisterCount;
#elif V8_TARGET_ARCH_S390
kMaxAllocatableDoubleRegisterCount;
#elif V8_TARGET_ARCH_RISCV64
kMaxAllocatableDoubleRegisterCount;
#else
#error Unsupported target architecture.
#endif
......
......@@ -330,7 +330,8 @@ bool RelocInfo::OffHeapTargetIsCodedSpecially() {
return false;
#elif defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_MIPS) || \
defined(V8_TARGET_ARCH_MIPS64) || defined(V8_TARGET_ARCH_PPC) || \
defined(V8_TARGET_ARCH_PPC64) || defined(V8_TARGET_ARCH_S390)
defined(V8_TARGET_ARCH_PPC64) || defined(V8_TARGET_ARCH_S390) || \
defined(V8_TARGET_ARCH_RISCV64)
return true;
#endif
}
......
......@@ -70,7 +70,7 @@ class RelocInfo {
EXTERNAL_REFERENCE, // The address of an external C++ function.
INTERNAL_REFERENCE, // An address inside the same function.
// Encoded internal reference, used only on MIPS, MIPS64 and PPC.
// Encoded internal reference, used only on RISCV64, MIPS, MIPS64 and PPC.
INTERNAL_REFERENCE_ENCODED,
// An off-heap instruction stream target. See http://goo.gl/Z2HUiM.
......
// Copyright (c) 1994-2006 Sun Microsystems Inc.
// All Rights Reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// - Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// - Redistribution in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// - Neither the name of Sun Microsystems or the names of contributors may
// be used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// The original source code covered by the above license above has been
// modified significantly by Google Inc.
// Copyright 2021 the V8 project authors. All rights reserved.
#ifndef V8_CODEGEN_RISCV64_ASSEMBLER_RISCV64_INL_H_
#define V8_CODEGEN_RISCV64_ASSEMBLER_RISCV64_INL_H_
#include "src/codegen/assembler.h"
#include "src/codegen/riscv64/assembler-riscv64.h"
#include "src/debug/debug.h"
#include "src/objects/objects-inl.h"
namespace v8 {
namespace internal {
bool CpuFeatures::SupportsOptimizer() { return IsSupported(FPU); }
bool CpuFeatures::SupportsWasmSimd128() { return IsSupported(RISCV_SIMD); }
// -----------------------------------------------------------------------------
// Operand and MemOperand.
bool Operand::is_reg() const { return rm_.is_valid(); }
int64_t Operand::immediate() const {
DCHECK(!is_reg());
DCHECK(!IsHeapObjectRequest());
return value_.immediate;
}
// -----------------------------------------------------------------------------
// RelocInfo.
void RelocInfo::apply(intptr_t delta) {
if (IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_)) {
// Absolute code pointer inside code object moves with the code object.
Assembler::RelocateInternalReference(rmode_, pc_, delta);
}
}
Address RelocInfo::target_address() {
DCHECK(IsCodeTarget(rmode_) || IsRuntimeEntry(rmode_) || IsWasmCall(rmode_));
return Assembler::target_address_at(pc_, constant_pool_);
}
Address RelocInfo::target_address_address() {
DCHECK(HasTargetAddressAddress());
// Read the address of the word containing the target_address in an
// instruction stream.
// The only architecture-independent user of this function is the serializer.
// The serializer uses it to find out how many raw bytes of instruction to
// output before the next target.
// For an instruction like LUI/ORI where the target bits are mixed into the
// instruction bits, the size of the target will be zero, indicating that the
// serializer should not step forward in memory after a target is resolved
// and written. In this case the target_address_address function should
// return the end of the instructions to be patched, allowing the
// deserializer to deserialize the instructions as raw bytes and put them in
// place, ready to be patched with the target. After jump optimization,
// that is the address of the instruction that follows J/JAL/JR/JALR
// instruction.
return pc_ + Assembler::kInstructionsFor64BitConstant * kInstrSize;
}
Address RelocInfo::constant_pool_entry_address() { UNREACHABLE(); }
int RelocInfo::target_address_size() { return Assembler::kSpecialTargetSize; }
void Assembler::deserialization_set_special_target_at(
Address instruction_payload, Code code, Address target) {
set_target_address_at(instruction_payload,
!code.is_null() ? code.constant_pool() : kNullAddress,
target);
}
int Assembler::deserialization_special_target_size(
Address instruction_payload) {
return kSpecialTargetSize;
}
void Assembler::set_target_internal_reference_encoded_at(Address pc,
Address target) {
set_target_value_at(pc, static_cast<uint64_t>(target));
}
void Assembler::deserialization_set_target_internal_reference_at(
Address pc, Address target, RelocInfo::Mode mode) {
if (RelocInfo::IsInternalReferenceEncoded(mode)) {
DCHECK(IsLui(instr_at(pc)));
set_target_internal_reference_encoded_at(pc, target);
} else {
DCHECK(RelocInfo::IsInternalReference(mode));
Memory<Address>(pc) = target;
}
}
HeapObject RelocInfo::target_object() {
DCHECK(IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_));
return HeapObject::cast(
Object(Assembler::target_address_at(pc_, constant_pool_)));
}
HeapObject RelocInfo::target_object_no_host(Isolate* isolate) {
return target_object();
}
Handle<HeapObject> RelocInfo::target_object_handle(Assembler* origin) {
DCHECK(IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_));
return Handle<HeapObject>(reinterpret_cast<Address*>(
Assembler::target_address_at(pc_, constant_pool_)));
}
void RelocInfo::set_target_object(Heap* heap, HeapObject target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsCodeTarget(rmode_) || IsFullEmbeddedObject(rmode_));
Assembler::set_target_address_at(pc_, constant_pool_, target.ptr(),
icache_flush_mode);
if (write_barrier_mode == UPDATE_WRITE_BARRIER && !host().is_null() &&
!FLAG_disable_write_barriers) {
WriteBarrierForCode(host(), this, target);
}
}
Address RelocInfo::target_external_reference() {
DCHECK(rmode_ == EXTERNAL_REFERENCE);
return Assembler::target_address_at(pc_, constant_pool_);
}
void RelocInfo::set_target_external_reference(
Address target, ICacheFlushMode icache_flush_mode) {
DCHECK(rmode_ == RelocInfo::EXTERNAL_REFERENCE);
Assembler::set_target_address_at(pc_, constant_pool_, target,
icache_flush_mode);
}
Address RelocInfo::target_internal_reference() {
if (rmode_ == INTERNAL_REFERENCE) {
return Memory<Address>(pc_);
} else {
// Encoded internal references are j/jal instructions.
DCHECK(rmode_ == INTERNAL_REFERENCE_ENCODED);
DCHECK(Assembler::IsLui(Assembler::instr_at(pc_ + 0 * kInstrSize)));
Address address = Assembler::target_address_at(pc_);
return address;
}
}
Address RelocInfo::target_internal_reference_address() {
DCHECK(rmode_ == INTERNAL_REFERENCE || rmode_ == INTERNAL_REFERENCE_ENCODED);
return pc_;
}
Address RelocInfo::target_runtime_entry(Assembler* origin) {
DCHECK(IsRuntimeEntry(rmode_));
return target_address();
}
void RelocInfo::set_target_runtime_entry(Address target,
WriteBarrierMode write_barrier_mode,
ICacheFlushMode icache_flush_mode) {
DCHECK(IsRuntimeEntry(rmode_));
if (target_address() != target)
set_target_address(target, write_barrier_mode, icache_flush_mode);
}
Address RelocInfo::target_off_heap_target() {
DCHECK(IsOffHeapTarget(rmode_));
return Assembler::target_address_at(pc_, constant_pool_);
}
void RelocInfo::WipeOut() {
DCHECK(IsFullEmbeddedObject(rmode_) || IsCodeTarget(rmode_) ||
IsRuntimeEntry(rmode_) || IsExternalReference(rmode_) ||
IsInternalReference(rmode_) || IsInternalReferenceEncoded(rmode_) ||
IsOffHeapTarget(rmode_));
if (IsInternalReference(rmode_)) {
Memory<Address>(pc_) = kNullAddress;
} else if (IsInternalReferenceEncoded(rmode_)) {
Assembler::set_target_internal_reference_encoded_at(pc_, kNullAddress);
} else {
Assembler::set_target_address_at(pc_, constant_pool_, kNullAddress);
}
}
// -----------------------------------------------------------------------------
// Assembler.
void Assembler::CheckBuffer() {
if (buffer_space() <= kGap) {
GrowBuffer();
}
}
template <typename T>
void Assembler::EmitHelper(T x) {
*reinterpret_cast<T*>(pc_) = x;
pc_ += sizeof(x);
}
void Assembler::emit(Instr x) {
if (!is_buffer_growth_blocked()) {
CheckBuffer();
}
DEBUG_PRINTF("%p: ", pc_);
disassembleInstr(x);
EmitHelper(x);
CheckTrampolinePoolQuick();
}
void Assembler::emit(ShortInstr x) {
if (!is_buffer_growth_blocked()) {
CheckBuffer();
}
DEBUG_PRINTF("%p: ", pc_);
disassembleInstr(x);
EmitHelper(x);
CheckTrampolinePoolQuick();
}
void Assembler::emit(uint64_t data) {
if (!is_buffer_growth_blocked()) CheckBuffer();
EmitHelper(data);
}
EnsureSpace::EnsureSpace(Assembler* assembler) { assembler->CheckBuffer(); }
} // namespace internal
} // namespace v8
#endif // V8_CODEGEN_RISCV64_ASSEMBLER_RISCV64_INL_H_
This source diff could not be displayed because it is too large. You can view the blob instead.
This diff is collapsed.
// Copyright 2021 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#if V8_TARGET_ARCH_RISCV64
#include "src/codegen/riscv64/constants-riscv64.h"
namespace v8 {
namespace internal {
// -----------------------------------------------------------------------------
// Registers.
// These register names are defined in a way to match the native disassembler
// formatting. See for example the command "objdump -d <binary file>".
const char* Registers::names_[kNumSimuRegisters] = {
"zero_reg", "ra", "sp", "gp", "tp", "t0", "t1", "t2", "fp", "s1", "a0",
"a1", "a2", "a3", "a4", "a5", "a6", "a7", "s2", "s3", "s4", "s5",
"s6", "s7", "s8", "s9", "s10", "s11", "t3", "t4", "t5", "t6", "pc"};
// List of alias names which can be used when referring to RISC-V registers.
const Registers::RegisterAlias Registers::aliases_[] = {
{0, "zero"},
{33, "pc"},
{8, "s0"},
{8, "s0_fp"},
{kInvalidRegister, nullptr}};
const char* Registers::Name(int reg) {
const char* result;
if ((0 <= reg) && (reg < kNumSimuRegisters)) {
result = names_[reg];
} else {
result = "noreg";
}
return result;
}
int Registers::Number(const char* name) {
// Look through the canonical names.
for (int i = 0; i < kNumSimuRegisters; i++) {
if (strcmp(names_[i], name) == 0) {
return i;
}
}
// Look through the alias names.
int i = 0;
while (aliases_[i].reg != kInvalidRegister) {
if (strcmp(aliases_[i].name, name) == 0) {
return aliases_[i].reg;
}
i++;
}
// No register with the reguested name found.
return kInvalidRegister;
}
/*
const char* FPURegisters::names_[kNumFPURegisters] = {
"f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "f9", "f10",
"f11", "f12", "f13", "f14", "f15", "f16", "f17", "f18", "f19", "f20", "f21",
"f22", "f23", "f24", "f25", "f26", "f27", "f28", "f29", "f30", "f31"};
*/
const char* FPURegisters::names_[kNumFPURegisters] = {
"ft0", "ft1", "ft2", "ft3", "ft4", "ft5", "ft6", "ft7",
"fs0", "fs1", "fa0", "fa1", "fa2", "fa3", "fa4", "fa5",
"fa6", "fa7", "fs2", "fs3", "fs4", "fs5", "fs6", "fs7",
"fs8", "fs9", "fs10", "fs11", "ft8", "ft9", "ft10", "ft11"};
// List of alias names which can be used when referring to RISC-V FP registers.
const FPURegisters::RegisterAlias FPURegisters::aliases_[] = {
{kInvalidRegister, nullptr}};
const char* FPURegisters::Name(int creg) {
const char* result;
if ((0 <= creg) && (creg < kNumFPURegisters)) {
result = names_[creg];
} else {
result = "nocreg";
}
return result;
}
int FPURegisters::Number(const char* name) {
// Look through the canonical names.
for (int i = 0; i < kNumFPURegisters; i++) {
if (strcmp(names_[i], name) == 0) {
return i;
}
}
// Look through the alias names.
int i = 0;
while (aliases_[i].creg != kInvalidRegister) {
if (strcmp(aliases_[i].name, name) == 0) {
return aliases_[i].creg;
}
i++;
}
// No Cregister with the reguested name found.
return kInvalidFPURegister;
}
InstructionBase::Type InstructionBase::InstructionType() const {
// RV64C Instruction
if (IsShortInstruction()) {
switch (InstructionBits() & kRvcOpcodeMask) {
case RO_C_ADDI4SPN:
return kCIWType;
case RO_C_FLD:
case RO_C_LW:
case RO_C_LD:
return kCLType;
case RO_C_FSD:
case RO_C_SW:
case RO_C_SD:
return kCSType;
case RO_C_NOP_ADDI:
case RO_C_ADDIW:
case RO_C_LI:
case RO_C_LUI_ADD:
return kCIType;
case RO_C_MISC_ALU:
if (Bits(11, 10) != 0b11)
return kCBType;
else
return kCAType;
case RO_C_J:
return kCJType;
case RO_C_BEQZ:
case RO_C_BNEZ:
return kCBType;
case RO_C_SLLI:
case RO_C_FLDSP:
case RO_C_LWSP:
case RO_C_LDSP:
return kCIType;
case RO_C_JR_MV_ADD:
return kCRType;
case RO_C_FSDSP:
case RO_C_SWSP:
case RO_C_SDSP:
return kCSSType;
default:
break;
}
} else {
// RISCV routine
switch (InstructionBits() & kBaseOpcodeMask) {
case LOAD:
return kIType;
case LOAD_FP:
return kIType;
case MISC_MEM:
return kIType;
case OP_IMM:
return kIType;
case AUIPC:
return kUType;
case OP_IMM_32:
return kIType;
case STORE:
return kSType;
case STORE_FP:
return kSType;
case AMO:
return kRType;
case OP:
return kRType;
case LUI:
return kUType;
case OP_32:
return kRType;
case MADD:
case MSUB:
case NMSUB:
case NMADD:
return kR4Type;
case OP_FP:
return kRType;
case BRANCH:
return kBType;
case JALR:
return kIType;
case JAL:
return kJType;
case SYSTEM:
return kIType;
}
}
return kUnsupported;
}
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_RISCV64
This diff is collapsed.
// Copyright 2021 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
// CPU specific code for arm independent of OS goes here.
#include <sys/syscall.h>
#include <unistd.h>
#if V8_TARGET_ARCH_RISCV64
#include "src/codegen/cpu-features.h"
namespace v8 {
namespace internal {
void CpuFeatures::FlushICache(void* start, size_t size) {
#if !defined(USE_SIMULATOR)
char* end = reinterpret_cast<char*>(start) + size;
// The definition of this syscall is
// SYSCALL_DEFINE3(riscv_flush_icache, uintptr_t, start,
// uintptr_t, end, uintptr_t, flags)
// The flag here is set to be SYS_RISCV_FLUSH_ICACHE_LOCAL, which is
// defined as 1 in the Linux kernel.
syscall(SYS_riscv_flush_icache, start, end, 1);
#endif // !USE_SIMULATOR.
}
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_RISCV64
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -58,6 +58,9 @@ constexpr int GB = MB * 1024;
#if (V8_TARGET_ARCH_S390 && !V8_HOST_ARCH_S390)
#define USE_SIMULATOR 1
#endif
#if (V8_TARGET_ARCH_RISCV64 && !V8_HOST_ARCH_RISCV64)
#define USE_SIMULATOR 1
#endif
#endif
// Determine whether the architecture uses an embedded constant pool
......
......@@ -23,6 +23,8 @@
#include "src/compiler/backend/ppc/instruction-codes-ppc.h"
#elif V8_TARGET_ARCH_S390
#include "src/compiler/backend/s390/instruction-codes-s390.h"
#elif V8_TARGET_ARCH_RISCV64
#include "src/compiler/backend/riscv64/instruction-codes-riscv64.h"
#else
#define TARGET_ARCH_OPCODE_LIST(V)
#define TARGET_ADDRESSING_MODE_LIST(V)
......
......@@ -2700,7 +2700,7 @@ void InstructionSelector::VisitWord32AtomicPairCompareExchange(Node* node) {
#endif // !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM && !V8_TARGET_ARCH_MIPS
#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS64 && \
!V8_TARGET_ARCH_S390 && !V8_TARGET_ARCH_PPC64
!V8_TARGET_ARCH_S390 && !V8_TARGET_ARCH_PPC64 && !V8_TARGET_ARCH_RISCV64
void InstructionSelector::VisitWord64AtomicLoad(Node* node) { UNIMPLEMENTED(); }
void InstructionSelector::VisitWord64AtomicStore(Node* node) {
......@@ -2725,7 +2725,8 @@ void InstructionSelector::VisitWord64AtomicCompareExchange(Node* node) {
UNIMPLEMENTED();
}
#endif // !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_PPC64
// !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_S390
// !V8_TARGET_ARCH_MIPS64 && !V8_TARGET_ARCH_S390 &&
// !V8_TARGET_ARCH_RISCV64
#if !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM
// This is only needed on 32-bit to split the 64-bit value into two operands.
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -59,7 +59,6 @@ namespace {
(1 << d11.code()) | (1 << d12.code()) | (1 << d13.code()) | \
(1 << d14.code()) | (1 << d15.code())
#elif V8_TARGET_ARCH_ARM64
// ===========================================================================
// == arm64 ====================================================================
......@@ -130,6 +129,19 @@ namespace {
d8.bit() | d9.bit() | d10.bit() | d11.bit() | d12.bit() | d13.bit() | \
d14.bit() | d15.bit()
#elif V8_TARGET_ARCH_RISCV64
// ===========================================================================
// == riscv64 =================================================================
// ===========================================================================
#define PARAM_REGISTERS a0, a1, a2, a3, a4, a5, a6, a7
// fp is not part of CALLEE_SAVE_REGISTERS (similar to how MIPS64 or PPC defines
// it)
#define CALLEE_SAVE_REGISTERS \
s1.bit() | s2.bit() | s3.bit() | s4.bit() | s5.bit() | s6.bit() | s7.bit() | \
s8.bit() | s9.bit() | s10.bit() | s11.bit()
#define CALLEE_SAVE_FP_REGISTERS \
fs0.bit() | fs1.bit() | fs2.bit() | fs3.bit() | fs4.bit() | fs5.bit() | \
fs6.bit() | fs7.bit() | fs8.bit() | fs9.bit() | fs10.bit() | fs11.bit()
#else
// ===========================================================================
// == unknown ================================================================
......
......@@ -1086,7 +1086,7 @@ void DebugEvaluate::VerifyTransitiveBuiltins(Isolate* isolate) {
}
CHECK(!failed);
#if defined(V8_TARGET_ARCH_PPC) || defined(V8_TARGET_ARCH_PPC64) || \
defined(V8_TARGET_ARCH_MIPS64)
defined(V8_TARGET_ARCH_MIPS64) || defined(V8_TARGET_ARCH_RISCV64)
// Isolate-independent builtin calls and jumps do not emit reloc infos
// on PPC. We try to avoid using PC relative code due to performance
// issue with especially older hardwares.
......
// Copyright 2021 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#if V8_TARGET_ARCH_RISCV64
#include "src/codegen/macro-assembler.h"
#include "src/debug/debug.h"
#include "src/debug/liveedit.h"
#include "src/execution/frames-inl.h"
namespace v8 {
namespace internal {
#define __ ACCESS_MASM(masm)
void DebugCodegen::GenerateHandleDebuggerStatement(MacroAssembler* masm) {
{
FrameScope scope(masm, StackFrame::INTERNAL);
__ CallRuntime(Runtime::kHandleDebuggerStatement, 0);
}
__ MaybeDropFrames();
// Return to caller.
__ Ret();
}
void DebugCodegen::GenerateFrameDropperTrampoline(MacroAssembler* masm) {
// Frame is being dropped:
// - Drop to the target frame specified by a1.
// - Look up current function on the frame.
// - Leave the frame.
// - Restart the frame by calling the function.
__ mv(fp, a1);
__ Ld(a1, MemOperand(fp, StandardFrameConstants::kFunctionOffset));
// Pop return address and frame.
__ LeaveFrame(StackFrame::INTERNAL);
__ Ld(a0, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
__ Lhu(a0,
FieldMemOperand(a0, SharedFunctionInfo::kFormalParameterCountOffset));
__ mv(a2, a0);
__ InvokeFunction(a1, a2, a0, JUMP_FUNCTION);
}
const bool LiveEdit::kFrameDropperSupported = true;
#undef __
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_RISCV64
// Copyright 2021 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/deoptimizer/deoptimizer.h"
namespace v8 {
namespace internal {
const bool Deoptimizer::kSupportsFixedDeoptExitSizes = true;
const int Deoptimizer::kNonLazyDeoptExitSize = 5 * kInstrSize;
const int Deoptimizer::kLazyDeoptExitSize = 5 * kInstrSize;
const int Deoptimizer::kEagerWithResumeBeforeArgsSize = 6 * kInstrSize;
const int Deoptimizer::kEagerWithResumeDeoptExitSize =
kEagerWithResumeBeforeArgsSize + 4 * kInstrSize;
const int Deoptimizer::kEagerWithResumeImmedArgs1PcOffset = kInstrSize;
const int Deoptimizer::kEagerWithResumeImmedArgs2PcOffset =
kInstrSize + kSystemPointerSize;
Float32 RegisterValues::GetFloatRegister(unsigned n) const {
return Float32::FromBits(
static_cast<uint32_t>(double_registers_[n].get_bits()));
}
void FrameDescription::SetCallerPc(unsigned offset, intptr_t value) {
SetFrameSlot(offset, value);
}
void FrameDescription::SetCallerFp(unsigned offset, intptr_t value) {
SetFrameSlot(offset, value);
}
void FrameDescription::SetCallerConstantPool(unsigned offset, intptr_t value) {
// No embedded constant pool support.
UNREACHABLE();
}
void FrameDescription::SetPc(intptr_t pc) { pc_ = pc; }
} // namespace internal
} // namespace v8
......@@ -86,6 +86,7 @@ class PerfJitLogger : public CodeEventLogger {
static const uint32_t kElfMachARM64 = 183;
static const uint32_t kElfMachS390x = 22;
static const uint32_t kElfMachPPC64 = 21;
static const uint32_t kElfMachRISCV = 243;
uint32_t GetElfMach() {
#if V8_TARGET_ARCH_IA32
......@@ -104,6 +105,8 @@ class PerfJitLogger : public CodeEventLogger {
return kElfMachS390x;
#elif V8_TARGET_ARCH_PPC64
return kElfMachPPC64;
#elif V8_TARGET_ARCH_RISCV64
return kElfMachRISCV;
#else
UNIMPLEMENTED();
return 0;
......
This diff is collapsed.
// Copyright 2021 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/diagnostics/unwinder.h"
namespace v8 {
void GetCalleeSavedRegistersFromEntryFrame(void* fp,
RegisterState* register_state) {}
} // namespace v8
......@@ -339,6 +339,8 @@ inline static int FrameSlotToFPOffset(int slot) {
#include "src/execution/mips64/frame-constants-mips64.h" // NOLINT
#elif V8_TARGET_ARCH_S390
#include "src/execution/s390/frame-constants-s390.h" // NOLINT
#elif V8_TARGET_ARCH_RISCV64
#include "src/execution/riscv64/frame-constants-riscv64.h" // NOLINT
#else
#error Unsupported target architecture.
#endif
......
// Copyright 2021 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#if V8_TARGET_ARCH_RISCV64
#include "src/execution/riscv64/frame-constants-riscv64.h"
#include "src/codegen/riscv64/assembler-riscv64-inl.h"
#include "src/execution/frame-constants.h"
#include "src/execution/frames.h"
namespace v8 {
namespace internal {
Register JavaScriptFrame::fp_register() { return v8::internal::fp; }
Register JavaScriptFrame::context_register() { return cp; }
Register JavaScriptFrame::constant_pool_pointer_register() { UNREACHABLE(); }
int InterpreterFrameConstants::RegisterStackSlotCount(int register_count) {
return register_count;
}
int BuiltinContinuationFrameConstants::PaddingSlotCount(int register_count) {
USE(register_count);
return 0;
}
} // namespace internal
} // namespace v8
#endif // V8_TARGET_ARCH_RISCV64
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -26,6 +26,8 @@
#include "src/execution/mips64/simulator-mips64.h"
#elif V8_TARGET_ARCH_S390
#include "src/execution/s390/simulator-s390.h"
#elif V8_TARGET_ARCH_RISCV64
#include "src/execution/riscv64/simulator-riscv64.h"
#else
#error Unsupported target architecture.
#endif
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment