Commit c0d5d4d6 authored by Lu Yahan's avatar Lu Yahan Committed by V8 LUCI CQ

Revert "[riscv32] Add RISCV32 backend"

This reverts commit 491de34b.

Reason for revert: Lose co-authors information

Original change's description:
> [riscv32] Add RISCV32 backend
>
> This very large changeset adds support for RISCV32.
>
> Bug: v8:13025
> Change-Id: Ieacc857131e6620f0fcfd7daa88a0f8d77056aa9
> Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3736732
> Reviewed-by: Michael Achenbach <machenbach@chromium.org>
> Commit-Queue: Yahan Lu <yahan@iscas.ac.cn>
> Reviewed-by: ji qiu <qiuji@iscas.ac.cn>
> Reviewed-by: Andreas Haas <ahaas@chromium.org>
> Reviewed-by: Hannes Payer <hpayer@chromium.org>
> Reviewed-by: Nico Hartmann <nicohartmann@chromium.org>
> Cr-Commit-Position: refs/heads/main@{#82053}

Bug: v8:13025
Change-Id: I6abea32c8ea43b080a938782dc643c97a123f1d8
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3803994Reviewed-by: 's avatarMichael Achenbach <machenbach@chromium.org>
Owners-Override: Leszek Swirski <leszeks@chromium.org>
Reviewed-by: 's avatarji qiu <qiuji@iscas.ac.cn>
Reviewed-by: 's avatarAndreas Haas <ahaas@chromium.org>
Commit-Queue: Yahan Lu <yahan@iscas.ac.cn>
Cr-Commit-Position: refs/heads/main@{#82163}
parent 48522862
This diff is collapsed.
...@@ -27,5 +27,5 @@ per-file ...-loong64*=file:LOONG_OWNERS ...@@ -27,5 +27,5 @@ per-file ...-loong64*=file:LOONG_OWNERS
per-file ...-mips*=file:MIPS_OWNERS per-file ...-mips*=file:MIPS_OWNERS
per-file ...-mips64*=file:MIPS_OWNERS per-file ...-mips64*=file:MIPS_OWNERS
per-file ...-ppc*=file:PPC_OWNERS per-file ...-ppc*=file:PPC_OWNERS
per-file ...-riscv*=file:RISCV_OWNERS per-file ...-riscv64*=file:RISCV_OWNERS
per-file ...-s390*=file:S390_OWNERS per-file ...-s390*=file:S390_OWNERS
...@@ -96,8 +96,7 @@ if (v8_snapshot_toolchain == "") { ...@@ -96,8 +96,7 @@ if (v8_snapshot_toolchain == "") {
} else { } else {
_cpus = "x64_v8_${v8_current_cpu}" _cpus = "x64_v8_${v8_current_cpu}"
} }
} else if (v8_current_cpu == "arm" || v8_current_cpu == "mipsel" || } else if (v8_current_cpu == "arm" || v8_current_cpu == "mipsel") {
v8_current_cpu == "riscv32") {
_cpus = "x86_v8_${v8_current_cpu}" _cpus = "x86_v8_${v8_current_cpu}"
} else { } else {
# This branch should not be reached; leave _cpus blank so the assert # This branch should not be reached; leave _cpus blank so the assert
......
...@@ -20,7 +20,7 @@ struct CalleeSavedRegisters { ...@@ -20,7 +20,7 @@ struct CalleeSavedRegisters {
#elif V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM64 || \ #elif V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_ARM64 || \
V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC || \ V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_PPC || \
V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_S390 || \ V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_S390 || \
V8_TARGET_ARCH_LOONG64 || V8_TARGET_ARCH_RISCV32 V8_TARGET_ARCH_LOONG64
struct CalleeSavedRegisters {}; struct CalleeSavedRegisters {};
#else #else
#error Target architecture was not detected as supported by v8 #error Target architecture was not detected as supported by v8
......
...@@ -674,9 +674,6 @@ V8 shared library set USING_V8_SHARED. ...@@ -674,9 +674,6 @@ V8 shared library set USING_V8_SHARED.
#if __riscv_xlen == 64 #if __riscv_xlen == 64
#define V8_HOST_ARCH_RISCV64 1 #define V8_HOST_ARCH_RISCV64 1
#define V8_HOST_ARCH_64_BIT 1 #define V8_HOST_ARCH_64_BIT 1
#elif __riscv_xlen == 32
#define V8_HOST_ARCH_RISCV32 1
#define V8_HOST_ARCH_32_BIT 1
#else #else
#error "Cannot detect Riscv's bitwidth" #error "Cannot detect Riscv's bitwidth"
#endif #endif
...@@ -692,8 +689,7 @@ V8 shared library set USING_V8_SHARED. ...@@ -692,8 +689,7 @@ V8 shared library set USING_V8_SHARED.
#if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM && \ #if !V8_TARGET_ARCH_X64 && !V8_TARGET_ARCH_IA32 && !V8_TARGET_ARCH_ARM && \
!V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64 && \ !V8_TARGET_ARCH_ARM64 && !V8_TARGET_ARCH_MIPS && !V8_TARGET_ARCH_MIPS64 && \
!V8_TARGET_ARCH_PPC && !V8_TARGET_ARCH_PPC64 && !V8_TARGET_ARCH_S390 && \ !V8_TARGET_ARCH_PPC && !V8_TARGET_ARCH_PPC64 && !V8_TARGET_ARCH_S390 && \
!V8_TARGET_ARCH_RISCV64 && !V8_TARGET_ARCH_LOONG64 && \ !V8_TARGET_ARCH_RISCV64 && !V8_TARGET_ARCH_LOONG64
!V8_TARGET_ARCH_RISCV32
#if defined(_M_X64) || defined(__x86_64__) #if defined(_M_X64) || defined(__x86_64__)
#define V8_TARGET_ARCH_X64 1 #define V8_TARGET_ARCH_X64 1
#elif defined(_M_IX86) || defined(__i386__) #elif defined(_M_IX86) || defined(__i386__)
...@@ -718,8 +714,6 @@ V8 shared library set USING_V8_SHARED. ...@@ -718,8 +714,6 @@ V8 shared library set USING_V8_SHARED.
#elif defined(__riscv) || defined(__riscv__) #elif defined(__riscv) || defined(__riscv__)
#if __riscv_xlen == 64 #if __riscv_xlen == 64
#define V8_TARGET_ARCH_RISCV64 1 #define V8_TARGET_ARCH_RISCV64 1
#elif __riscv_xlen == 32
#define V8_TARGET_ARCH_RISCV32 1
#endif #endif
#else #else
#error Target architecture was not detected as supported by v8 #error Target architecture was not detected as supported by v8
...@@ -759,8 +753,6 @@ V8 shared library set USING_V8_SHARED. ...@@ -759,8 +753,6 @@ V8 shared library set USING_V8_SHARED.
#endif #endif
#elif V8_TARGET_ARCH_RISCV64 #elif V8_TARGET_ARCH_RISCV64
#define V8_TARGET_ARCH_64_BIT 1 #define V8_TARGET_ARCH_64_BIT 1
#elif V8_TARGET_ARCH_RISCV32
#define V8_TARGET_ARCH_32_BIT 1
#else #else
#error Unknown target architecture pointer size #error Unknown target architecture pointer size
#endif #endif
...@@ -792,9 +784,6 @@ V8 shared library set USING_V8_SHARED. ...@@ -792,9 +784,6 @@ V8 shared library set USING_V8_SHARED.
#if (V8_TARGET_ARCH_RISCV64 && !(V8_HOST_ARCH_X64 || V8_HOST_ARCH_RISCV64)) #if (V8_TARGET_ARCH_RISCV64 && !(V8_HOST_ARCH_X64 || V8_HOST_ARCH_RISCV64))
#error Target architecture riscv64 is only supported on riscv64 and x64 host #error Target architecture riscv64 is only supported on riscv64 and x64 host
#endif #endif
#if (V8_TARGET_ARCH_RISCV32 && !(V8_HOST_ARCH_IA32 || V8_HOST_ARCH_RISCV32))
#error Target architecture riscv32 is only supported on riscv32 and ia32 host
#endif
#if (V8_TARGET_ARCH_LOONG64 && !(V8_HOST_ARCH_X64 || V8_HOST_ARCH_LOONG64)) #if (V8_TARGET_ARCH_LOONG64 && !(V8_HOST_ARCH_X64 || V8_HOST_ARCH_LOONG64))
#error Target architecture loong64 is only supported on loong64 and x64 host #error Target architecture loong64 is only supported on loong64 and x64 host
#endif #endif
...@@ -834,7 +823,7 @@ V8 shared library set USING_V8_SHARED. ...@@ -834,7 +823,7 @@ V8 shared library set USING_V8_SHARED.
#else #else
#define V8_TARGET_BIG_ENDIAN 1 #define V8_TARGET_BIG_ENDIAN 1
#endif #endif
#elif V8_TARGET_ARCH_RISCV32 || V8_TARGET_ARCH_RISCV64 #elif V8_TARGET_ARCH_RISCV64
#define V8_TARGET_LITTLE_ENDIAN 1 #define V8_TARGET_LITTLE_ENDIAN 1
#elif defined(__BYTE_ORDER__) #elif defined(__BYTE_ORDER__)
#if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__ #if __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
......
...@@ -101,7 +101,7 @@ ...@@ -101,7 +101,7 @@
#if ((!defined(V8_CC_GNU) && !defined(V8_CC_MSVC) && \ #if ((!defined(V8_CC_GNU) && !defined(V8_CC_MSVC) && \
!defined(V8_TARGET_ARCH_MIPS) && !defined(V8_TARGET_ARCH_MIPS64) && \ !defined(V8_TARGET_ARCH_MIPS) && !defined(V8_TARGET_ARCH_MIPS64) && \
!defined(V8_TARGET_ARCH_PPC) && !defined(V8_TARGET_ARCH_PPC64) && \ !defined(V8_TARGET_ARCH_PPC) && !defined(V8_TARGET_ARCH_PPC64) && \
!defined(V8_TARGET_ARCH_RISCV64) && !defined(V8_TARGET_ARCH_RISCV32)) || \ !defined(V8_TARGET_ARCH_RISCV64)) || \
(defined(__clang__) && __cplusplus > 201300L)) (defined(__clang__) && __cplusplus > 201300L))
#define V8_NOEXCEPT noexcept #define V8_NOEXCEPT noexcept
#else #else
......
...@@ -354,10 +354,6 @@ void* OS::GetRandomMmapAddr() { ...@@ -354,10 +354,6 @@ void* OS::GetRandomMmapAddr() {
// TODO(RISCV): We need more information from the kernel to correctly mask // TODO(RISCV): We need more information from the kernel to correctly mask
// this address for RISC-V. https://github.com/v8-riscv/v8/issues/375 // this address for RISC-V. https://github.com/v8-riscv/v8/issues/375
raw_addr &= uint64_t{0xFFFFFF0000}; raw_addr &= uint64_t{0xFFFFFF0000};
#elif V8_TARGET_ARCH_RISCV32
// TODO(RISCV): We need more information from the kernel to correctly mask
// this address for RISC-V. https://github.com/v8-riscv/v8/issues/375
raw_addr &= 0x3FFFF000;
#elif V8_TARGET_ARCH_LOONG64 #elif V8_TARGET_ARCH_LOONG64
// 42 bits of virtual addressing. Truncate to 40 bits to allow kernel chance // 42 bits of virtual addressing. Truncate to 40 bits to allow kernel chance
// to fulfill request. // to fulfill request.
...@@ -689,8 +685,6 @@ void OS::DebugBreak() { ...@@ -689,8 +685,6 @@ void OS::DebugBreak() {
asm volatile(".word 0x0001"); asm volatile(".word 0x0001");
#elif V8_HOST_ARCH_RISCV64 #elif V8_HOST_ARCH_RISCV64
asm("ebreak"); asm("ebreak");
#elif V8_HOST_ARCH_RISCV32
asm("ebreak");
#else #else
#error Unsupported host architecture. #error Unsupported host architecture.
#endif #endif
......
...@@ -32,8 +32,8 @@ ...@@ -32,8 +32,8 @@
#include "src/baseline/ppc/baseline-assembler-ppc-inl.h" #include "src/baseline/ppc/baseline-assembler-ppc-inl.h"
#elif V8_TARGET_ARCH_S390X #elif V8_TARGET_ARCH_S390X
#include "src/baseline/s390/baseline-assembler-s390-inl.h" #include "src/baseline/s390/baseline-assembler-s390-inl.h"
#elif V8_TARGET_ARCH_RISCV32 || V8_TARGET_ARCH_RISCV64 #elif V8_TARGET_ARCH_RISCV64
#include "src/baseline/riscv/baseline-assembler-riscv-inl.h" #include "src/baseline/riscv64/baseline-assembler-riscv64-inl.h"
#elif V8_TARGET_ARCH_MIPS64 #elif V8_TARGET_ARCH_MIPS64
#include "src/baseline/mips64/baseline-assembler-mips64-inl.h" #include "src/baseline/mips64/baseline-assembler-mips64-inl.h"
#elif V8_TARGET_ARCH_MIPS #elif V8_TARGET_ARCH_MIPS
......
...@@ -48,9 +48,7 @@ ...@@ -48,9 +48,7 @@
#elif V8_TARGET_ARCH_S390X #elif V8_TARGET_ARCH_S390X
#include "src/baseline/s390/baseline-compiler-s390-inl.h" #include "src/baseline/s390/baseline-compiler-s390-inl.h"
#elif V8_TARGET_ARCH_RISCV64 #elif V8_TARGET_ARCH_RISCV64
#include "src/baseline/riscv/baseline-compiler-riscv-inl.h" #include "src/baseline/riscv64/baseline-compiler-riscv64-inl.h"
#elif V8_TARGET_ARCH_RISCV32
#include "src/baseline/riscv/baseline-compiler-riscv-inl.h"
#elif V8_TARGET_ARCH_MIPS64 #elif V8_TARGET_ARCH_MIPS64
#include "src/baseline/mips64/baseline-compiler-mips64-inl.h" #include "src/baseline/mips64/baseline-compiler-mips64-inl.h"
#elif V8_TARGET_ARCH_MIPS #elif V8_TARGET_ARCH_MIPS
......
...@@ -2,8 +2,8 @@ ...@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be // Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. // found in the LICENSE file.
#ifndef V8_BASELINE_RISCV_BASELINE_ASSEMBLER_RISCV_INL_H_ #ifndef V8_BASELINE_RISCV64_BASELINE_ASSEMBLER_RISCV64_INL_H_
#define V8_BASELINE_RISCV_BASELINE_ASSEMBLER_RISCV_INL_H_ #define V8_BASELINE_RISCV64_BASELINE_ASSEMBLER_RISCV64_INL_H_
#include "src/baseline/baseline-assembler.h" #include "src/baseline/baseline-assembler.h"
#include "src/codegen/assembler-inl.h" #include "src/codegen/assembler-inl.h"
...@@ -79,7 +79,7 @@ MemOperand BaselineAssembler::RegisterFrameOperand( ...@@ -79,7 +79,7 @@ MemOperand BaselineAssembler::RegisterFrameOperand(
} }
void BaselineAssembler::RegisterFrameAddress( void BaselineAssembler::RegisterFrameAddress(
interpreter::Register interpreter_register, Register rscratch) { interpreter::Register interpreter_register, Register rscratch) {
return __ AddWord(rscratch, fp, return __ Add64(rscratch, fp,
interpreter_register.ToOperand() * kSystemPointerSize); interpreter_register.ToOperand() * kSystemPointerSize);
} }
MemOperand BaselineAssembler::FeedbackVectorOperand() { MemOperand BaselineAssembler::FeedbackVectorOperand() {
...@@ -163,7 +163,7 @@ void BaselineAssembler::JumpIfInstanceType(Condition cc, Register map, ...@@ -163,7 +163,7 @@ void BaselineAssembler::JumpIfInstanceType(Condition cc, Register map,
__ GetObjectType(map, type, type); __ GetObjectType(map, type, type);
__ Assert(eq, AbortReason::kUnexpectedValue, type, Operand(MAP_TYPE)); __ Assert(eq, AbortReason::kUnexpectedValue, type, Operand(MAP_TYPE));
} }
__ LoadWord(type, FieldMemOperand(map, Map::kInstanceTypeOffset)); __ Ld(type, FieldMemOperand(map, Map::kInstanceTypeOffset));
__ Branch(target, AsMasmCondition(cc), type, Operand(instance_type)); __ Branch(target, AsMasmCondition(cc), type, Operand(instance_type));
} }
void BaselineAssembler::JumpIfPointer(Condition cc, Register value, void BaselineAssembler::JumpIfPointer(Condition cc, Register value,
...@@ -171,7 +171,7 @@ void BaselineAssembler::JumpIfPointer(Condition cc, Register value, ...@@ -171,7 +171,7 @@ void BaselineAssembler::JumpIfPointer(Condition cc, Register value,
Label::Distance) { Label::Distance) {
ScratchRegisterScope temps(this); ScratchRegisterScope temps(this);
Register temp = temps.AcquireScratch(); Register temp = temps.AcquireScratch();
__ LoadWord(temp, operand); __ Ld(temp, operand);
__ Branch(target, AsMasmCondition(cc), value, Operand(temp)); __ Branch(target, AsMasmCondition(cc), value, Operand(temp));
} }
void BaselineAssembler::JumpIfSmi(Condition cc, Register value, Smi smi, void BaselineAssembler::JumpIfSmi(Condition cc, Register value, Smi smi,
...@@ -195,7 +195,7 @@ void BaselineAssembler::JumpIfTagged(Condition cc, Register value, ...@@ -195,7 +195,7 @@ void BaselineAssembler::JumpIfTagged(Condition cc, Register value,
// todo: compress pointer // todo: compress pointer
ScratchRegisterScope temps(this); ScratchRegisterScope temps(this);
Register scratch = temps.AcquireScratch(); Register scratch = temps.AcquireScratch();
__ LoadWord(scratch, operand); __ Ld(scratch, operand);
__ Branch(target, AsMasmCondition(cc), value, Operand(scratch)); __ Branch(target, AsMasmCondition(cc), value, Operand(scratch));
} }
void BaselineAssembler::JumpIfTagged(Condition cc, MemOperand operand, void BaselineAssembler::JumpIfTagged(Condition cc, MemOperand operand,
...@@ -204,7 +204,7 @@ void BaselineAssembler::JumpIfTagged(Condition cc, MemOperand operand, ...@@ -204,7 +204,7 @@ void BaselineAssembler::JumpIfTagged(Condition cc, MemOperand operand,
// todo: compress pointer // todo: compress pointer
ScratchRegisterScope temps(this); ScratchRegisterScope temps(this);
Register scratch = temps.AcquireScratch(); Register scratch = temps.AcquireScratch();
__ LoadWord(scratch, operand); __ Ld(scratch, operand);
__ Branch(target, AsMasmCondition(cc), scratch, Operand(value)); __ Branch(target, AsMasmCondition(cc), scratch, Operand(value));
} }
void BaselineAssembler::JumpIfByte(Condition cc, Register value, int32_t byte, void BaselineAssembler::JumpIfByte(Condition cc, Register value, int32_t byte,
...@@ -219,7 +219,7 @@ void BaselineAssembler::Move(Register output, TaggedIndex value) { ...@@ -219,7 +219,7 @@ void BaselineAssembler::Move(Register output, TaggedIndex value) {
__ li(output, Operand(value.ptr())); __ li(output, Operand(value.ptr()));
} }
void BaselineAssembler::Move(MemOperand output, Register source) { void BaselineAssembler::Move(MemOperand output, Register source) {
__ StoreWord(source, output); __ Sd(source, output);
} }
void BaselineAssembler::Move(Register output, ExternalReference reference) { void BaselineAssembler::Move(Register output, ExternalReference reference) {
__ li(output, Operand(reference)); __ li(output, Operand(reference));
...@@ -446,9 +446,8 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded( ...@@ -446,9 +446,8 @@ void BaselineAssembler::AddToInterruptBudgetAndJumpIfNotExceeded(
__ Add32(interrupt_budget, interrupt_budget, weight); __ Add32(interrupt_budget, interrupt_budget, weight);
__ Sw(interrupt_budget, __ Sw(interrupt_budget,
FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset)); FieldMemOperand(feedback_cell, FeedbackCell::kInterruptBudgetOffset));
if (skip_interrupt_label) { if (skip_interrupt_label)
__ Branch(skip_interrupt_label, ge, interrupt_budget, Operand(zero_reg)); __ Branch(skip_interrupt_label, ge, interrupt_budget, Operand(zero_reg));
}
} }
void BaselineAssembler::LdaContextSlot(Register context, uint32_t index, void BaselineAssembler::LdaContextSlot(Register context, uint32_t index,
...@@ -511,26 +510,27 @@ void BaselineAssembler::AddSmi(Register lhs, Smi rhs) { ...@@ -511,26 +510,27 @@ void BaselineAssembler::AddSmi(Register lhs, Smi rhs) {
if (SmiValuesAre31Bits()) { if (SmiValuesAre31Bits()) {
__ Add32(lhs, lhs, Operand(rhs)); __ Add32(lhs, lhs, Operand(rhs));
} else { } else {
__ AddWord(lhs, lhs, Operand(rhs)); __ Add64(lhs, lhs, Operand(rhs));
} }
} }
void BaselineAssembler::Word32And(Register output, Register lhs, int rhs) { void BaselineAssembler::Word32And(Register output, Register lhs, int rhs) {
__ And(output, lhs, Operand(rhs)); __ And(output, lhs, Operand(rhs));
} }
void BaselineAssembler::Switch(Register reg, int case_value_base, void BaselineAssembler::Switch(Register reg, int case_value_base,
Label** labels, int num_labels) { Label** labels, int num_labels) {
ASM_CODE_COMMENT(masm_); ASM_CODE_COMMENT(masm_);
Label fallthrough; Label fallthrough;
if (case_value_base != 0) { if (case_value_base != 0) {
__ SubWord(reg, reg, Operand(case_value_base)); __ Sub64(reg, reg, Operand(case_value_base));
} }
// Mostly copied from code-generator-riscv64.cc // Mostly copied from code-generator-riscv64.cc
ScratchRegisterScope scope(this); ScratchRegisterScope scope(this);
Label table; Label table;
__ Branch(&fallthrough, AsMasmCondition(Condition::kUnsignedGreaterThanEqual), __ Branch(&fallthrough, AsMasmCondition(Condition::kUnsignedGreaterThanEqual),
reg, Operand(num_labels)); reg, Operand(int64_t(num_labels)));
int64_t imm64; int64_t imm64;
imm64 = __ branch_long_offset(&table); imm64 = __ branch_long_offset(&table);
CHECK(is_int32(imm64 + 0x800)); CHECK(is_int32(imm64 + 0x800));
...@@ -619,4 +619,4 @@ inline void EnsureAccumulatorPreservedScope::AssertEqualToAccumulator( ...@@ -619,4 +619,4 @@ inline void EnsureAccumulatorPreservedScope::AssertEqualToAccumulator(
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8
#endif // V8_BASELINE_RISCV_BASELINE_ASSEMBLER_RISCV_INL_H_ #endif // V8_BASELINE_RISCV64_BASELINE_ASSEMBLER_RISCV64_INL_H_
...@@ -2,8 +2,8 @@ ...@@ -2,8 +2,8 @@
// Use of this source code is governed by a BSD-style license that can be // Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file. // found in the LICENSE file.
#ifndef V8_BASELINE_RISCV_BASELINE_COMPILER_RISCV_INL_H_ #ifndef V8_BASELINE_RISCV64_BASELINE_COMPILER_RISCV64_INL_H_
#define V8_BASELINE_RISCV_BASELINE_COMPILER_RISCV_INL_H_ #define V8_BASELINE_RISCV64_BASELINE_COMPILER_RISCV64_INL_H_
#include "src/baseline/baseline-compiler.h" #include "src/baseline/baseline-compiler.h"
...@@ -39,9 +39,9 @@ void BaselineCompiler::PrologueFillFrame() { ...@@ -39,9 +39,9 @@ void BaselineCompiler::PrologueFillFrame() {
const bool has_new_target = new_target_index != kMaxInt; const bool has_new_target = new_target_index != kMaxInt;
if (has_new_target) { if (has_new_target) {
DCHECK_LE(new_target_index, register_count); DCHECK_LE(new_target_index, register_count);
__ masm()->AddWord(sp, sp, Operand(-(kPointerSize * new_target_index))); __ masm()->Add64(sp, sp, Operand(-(kPointerSize * new_target_index)));
for (int i = 0; i < new_target_index; i++) { for (int i = 0; i < new_target_index; i++) {
__ masm()->StoreWord(kInterpreterAccumulatorRegister, __ masm()->Sd(kInterpreterAccumulatorRegister,
MemOperand(sp, i * kSystemPointerSize)); MemOperand(sp, i * kSystemPointerSize));
} }
// Push new_target_or_generator. // Push new_target_or_generator.
...@@ -50,15 +50,15 @@ void BaselineCompiler::PrologueFillFrame() { ...@@ -50,15 +50,15 @@ void BaselineCompiler::PrologueFillFrame() {
} }
if (register_count < 2 * kLoopUnrollSize) { if (register_count < 2 * kLoopUnrollSize) {
// If the frame is small enough, just unroll the frame fill completely. // If the frame is small enough, just unroll the frame fill completely.
__ masm()->AddWord(sp, sp, Operand(-(kPointerSize * register_count))); __ masm()->Add64(sp, sp, Operand(-(kPointerSize * register_count)));
for (int i = 0; i < register_count; ++i) { for (int i = 0; i < register_count; ++i) {
__ masm()->StoreWord(kInterpreterAccumulatorRegister, __ masm()->Sd(kInterpreterAccumulatorRegister,
MemOperand(sp, i * kSystemPointerSize)); MemOperand(sp, i * kSystemPointerSize));
} }
} else { } else {
__ masm()->AddWord(sp, sp, Operand(-(kPointerSize * register_count))); __ masm()->Add64(sp, sp, Operand(-(kPointerSize * register_count)));
for (int i = 0; i < register_count; ++i) { for (int i = 0; i < register_count; ++i) {
__ masm()->StoreWord(kInterpreterAccumulatorRegister, __ masm()->Sd(kInterpreterAccumulatorRegister,
MemOperand(sp, i * kSystemPointerSize)); MemOperand(sp, i * kSystemPointerSize));
} }
} }
...@@ -66,7 +66,7 @@ void BaselineCompiler::PrologueFillFrame() { ...@@ -66,7 +66,7 @@ void BaselineCompiler::PrologueFillFrame() {
void BaselineCompiler::VerifyFrameSize() { void BaselineCompiler::VerifyFrameSize() {
ASM_CODE_COMMENT(&masm_); ASM_CODE_COMMENT(&masm_);
__ masm()->AddWord(kScratchReg, sp, __ masm()->Add64(kScratchReg, sp,
Operand(InterpreterFrameConstants::kFixedFrameSizeFromFp + Operand(InterpreterFrameConstants::kFixedFrameSizeFromFp +
bytecode_->frame_size())); bytecode_->frame_size()));
__ masm()->Assert(eq, AbortReason::kUnexpectedStackPointer, kScratchReg, __ masm()->Assert(eq, AbortReason::kUnexpectedStackPointer, kScratchReg,
...@@ -79,4 +79,4 @@ void BaselineCompiler::VerifyFrameSize() { ...@@ -79,4 +79,4 @@ void BaselineCompiler::VerifyFrameSize() {
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8
#endif // V8_BASELINE_RISCV_BASELINE_COMPILER_RISCV_INL_H_ #endif // V8_BASELINE_RISCV64_BASELINE_COMPILER_RISCV64_INL_H_
...@@ -251,7 +251,7 @@ TF_BUILTIN(AtomicsLoad, SharedArrayBufferBuiltinsAssembler) { ...@@ -251,7 +251,7 @@ TF_BUILTIN(AtomicsLoad, SharedArrayBufferBuiltinsAssembler) {
BIND(&u32); BIND(&u32);
Return(ChangeUint32ToTagged(AtomicLoad<Uint32T>( Return(ChangeUint32ToTagged(AtomicLoad<Uint32T>(
AtomicMemoryOrder::kSeqCst, backing_store, WordShl(index_word, 2)))); AtomicMemoryOrder::kSeqCst, backing_store, WordShl(index_word, 2))));
#if (V8_TARGET_ARCH_MIPS && !_MIPS_ARCH_MIPS32R6) || V8_TARGET_ARCH_RISCV32 #if V8_TARGET_ARCH_MIPS && !_MIPS_ARCH_MIPS32R6
BIND(&i64); BIND(&i64);
Goto(&u64); Goto(&u64);
...@@ -268,8 +268,7 @@ TF_BUILTIN(AtomicsLoad, SharedArrayBufferBuiltinsAssembler) { ...@@ -268,8 +268,7 @@ TF_BUILTIN(AtomicsLoad, SharedArrayBufferBuiltinsAssembler) {
BIND(&u64); BIND(&u64);
Return(BigIntFromUnsigned64(AtomicLoad64<AtomicUint64>( Return(BigIntFromUnsigned64(AtomicLoad64<AtomicUint64>(
AtomicMemoryOrder::kSeqCst, backing_store, WordShl(index_word, 3)))); AtomicMemoryOrder::kSeqCst, backing_store, WordShl(index_word, 3))));
#endif //(V8_TARGET_ARCH_MIPS && !_MIPS_ARCH_MIPS32R6) || #endif
// V8_TARGET_ARCH_RISCV32
// This shouldn't happen, we've already validated the type. // This shouldn't happen, we've already validated the type.
BIND(&other); BIND(&other);
...@@ -524,7 +523,8 @@ TF_BUILTIN(AtomicsExchange, SharedArrayBufferBuiltinsAssembler) { ...@@ -524,7 +523,8 @@ TF_BUILTIN(AtomicsExchange, SharedArrayBufferBuiltinsAssembler) {
// This shouldn't happen, we've already validated the type. // This shouldn't happen, we've already validated the type.
BIND(&other); BIND(&other);
Unreachable(); Unreachable();
#endif // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 #endif // V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_MIPS64 ||
// V8_TARGET_ARCH_RISCV64
BIND(&detached_or_out_of_bounds); BIND(&detached_or_out_of_bounds);
{ {
......
...@@ -25,8 +25,8 @@ ...@@ -25,8 +25,8 @@
#include "src/codegen/loong64/assembler-loong64.h" #include "src/codegen/loong64/assembler-loong64.h"
#elif V8_TARGET_ARCH_S390 #elif V8_TARGET_ARCH_S390
#include "src/codegen/s390/assembler-s390.h" #include "src/codegen/s390/assembler-s390.h"
#elif V8_TARGET_ARCH_RISCV32 || V8_TARGET_ARCH_RISCV64 #elif V8_TARGET_ARCH_RISCV64
#include "src/codegen/riscv/assembler-riscv.h" #include "src/codegen/riscv64/assembler-riscv64.h"
#else #else
#error Unknown architecture. #error Unknown architecture.
#endif #endif
......
...@@ -25,8 +25,8 @@ ...@@ -25,8 +25,8 @@
#include "src/codegen/loong64/assembler-loong64-inl.h" #include "src/codegen/loong64/assembler-loong64-inl.h"
#elif V8_TARGET_ARCH_S390 #elif V8_TARGET_ARCH_S390
#include "src/codegen/s390/assembler-s390-inl.h" #include "src/codegen/s390/assembler-s390-inl.h"
#elif V8_TARGET_ARCH_RISCV32 || V8_TARGET_ARCH_RISCV64 #elif V8_TARGET_ARCH_RISCV64
#include "src/codegen/riscv/assembler-riscv-inl.h" #include "src/codegen/riscv64/assembler-riscv64-inl.h"
#else #else
#error Unknown architecture. #error Unknown architecture.
#endif #endif
......
...@@ -459,7 +459,7 @@ void ConstantPool::MaybeCheck() { ...@@ -459,7 +459,7 @@ void ConstantPool::MaybeCheck() {
#endif // defined(V8_TARGET_ARCH_ARM64) #endif // defined(V8_TARGET_ARCH_ARM64)
#if defined(V8_TARGET_ARCH_RISCV64) || defined(V8_TARGET_ARCH_RISCV32) #if defined(V8_TARGET_ARCH_RISCV64)
// Constant Pool. // Constant Pool.
...@@ -706,7 +706,7 @@ void ConstantPool::MaybeCheck() { ...@@ -706,7 +706,7 @@ void ConstantPool::MaybeCheck() {
} }
} }
#endif // defined(V8_TARGET_ARCH_RISCV64) || defined(V8_TARGET_ARCH_RISCV32) #endif // defined(V8_TARGET_ARCH_RISCV64)
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8
...@@ -163,8 +163,7 @@ class ConstantPoolBuilder { ...@@ -163,8 +163,7 @@ class ConstantPoolBuilder {
#endif // defined(V8_TARGET_ARCH_PPC) || defined(V8_TARGET_ARCH_PPC64) #endif // defined(V8_TARGET_ARCH_PPC) || defined(V8_TARGET_ARCH_PPC64)
#if defined(V8_TARGET_ARCH_ARM64) || defined(V8_TARGET_ARCH_RISCV64) || \ #if defined(V8_TARGET_ARCH_ARM64) || defined(V8_TARGET_ARCH_RISCV64)
defined(V8_TARGET_ARCH_RISCV32)
class ConstantPoolKey { class ConstantPoolKey {
public: public:
...@@ -346,8 +345,7 @@ class ConstantPool { ...@@ -346,8 +345,7 @@ class ConstantPool {
int blocked_nesting_ = 0; int blocked_nesting_ = 0;
}; };
#endif // defined(V8_TARGET_ARCH_ARM64) || defined(V8_TARGET_ARCH_RISCV64) || #endif // defined(V8_TARGET_ARCH_ARM64)
// defined(V8_TARGET_ARCH_RISCV32)
} // namespace internal } // namespace internal
} // namespace v8 } // namespace v8
......
...@@ -23,8 +23,8 @@ ...@@ -23,8 +23,8 @@
#include "src/codegen/s390/constants-s390.h" #include "src/codegen/s390/constants-s390.h"
#elif V8_TARGET_ARCH_X64 #elif V8_TARGET_ARCH_X64
#include "src/codegen/x64/constants-x64.h" #include "src/codegen/x64/constants-x64.h"
#elif V8_TARGET_ARCH_RISCV32 || V8_TARGET_ARCH_RISCV64 #elif V8_TARGET_ARCH_RISCV64
#include "src/codegen/riscv/constants-riscv.h" #include "src/codegen/riscv64/constants-riscv64.h"
#else #else
#error Unsupported target architecture. #error Unsupported target architecture.
#endif #endif
......
...@@ -76,10 +76,6 @@ enum CpuFeature { ...@@ -76,10 +76,6 @@ enum CpuFeature {
FPU, FPU,
FP64FPU, FP64FPU,
RISCV_SIMD, RISCV_SIMD,
#elif V8_TARGET_ARCH_RISCV32
FPU,
FP64FPU,
RISCV_SIMD,
#endif #endif
NUMBER_OF_CPU_FEATURES NUMBER_OF_CPU_FEATURES
......
...@@ -757,7 +757,7 @@ ExternalReference ExternalReference::invoke_accessor_getter_callback() { ...@@ -757,7 +757,7 @@ ExternalReference ExternalReference::invoke_accessor_getter_callback() {
#define re_stack_check_func RegExpMacroAssemblerLOONG64::CheckStackGuardState #define re_stack_check_func RegExpMacroAssemblerLOONG64::CheckStackGuardState
#elif V8_TARGET_ARCH_S390 #elif V8_TARGET_ARCH_S390
#define re_stack_check_func RegExpMacroAssemblerS390::CheckStackGuardState #define re_stack_check_func RegExpMacroAssemblerS390::CheckStackGuardState
#elif V8_TARGET_ARCH_RISCV32 || V8_TARGET_ARCH_RISCV64 #elif V8_TARGET_ARCH_RISCV64
#define re_stack_check_func RegExpMacroAssemblerRISCV::CheckStackGuardState #define re_stack_check_func RegExpMacroAssemblerRISCV::CheckStackGuardState
#else #else
UNREACHABLE(); UNREACHABLE();
......
...@@ -29,8 +29,8 @@ ...@@ -29,8 +29,8 @@
#include "src/codegen/mips/interface-descriptors-mips-inl.h" #include "src/codegen/mips/interface-descriptors-mips-inl.h"
#elif V8_TARGET_ARCH_LOONG64 #elif V8_TARGET_ARCH_LOONG64
#include "src/codegen/loong64/interface-descriptors-loong64-inl.h" #include "src/codegen/loong64/interface-descriptors-loong64-inl.h"
#elif V8_TARGET_ARCH_RISCV32 || V8_TARGET_ARCH_RISCV64 #elif V8_TARGET_ARCH_RISCV64
#include "src/codegen/riscv/interface-descriptors-riscv-inl.h" #include "src/codegen/riscv64/interface-descriptors-riscv64-inl.h"
#else #else
#error Unsupported target architecture. #error Unsupported target architecture.
#endif #endif
...@@ -336,7 +336,7 @@ constexpr auto BaselineOutOfLinePrologueDescriptor::registers() { ...@@ -336,7 +336,7 @@ constexpr auto BaselineOutOfLinePrologueDescriptor::registers() {
#if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_ARM || \ #if V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || V8_TARGET_ARCH_ARM || \
V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_S390 || \ V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 || V8_TARGET_ARCH_S390 || \
V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_MIPS || \ V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_MIPS64 || V8_TARGET_ARCH_MIPS || \
V8_TARGET_ARCH_LOONG64 || V8_TARGET_ARCH_RISCV32 V8_TARGET_ARCH_LOONG64
return RegisterArray( return RegisterArray(
kContextRegister, kJSFunctionRegister, kJavaScriptCallArgCountRegister, kContextRegister, kJSFunctionRegister, kJavaScriptCallArgCountRegister,
kJavaScriptCallExtraArg1Register, kJavaScriptCallNewTargetRegister, kJavaScriptCallExtraArg1Register, kJavaScriptCallNewTargetRegister,
...@@ -357,7 +357,7 @@ constexpr auto BaselineLeaveFrameDescriptor::registers() { ...@@ -357,7 +357,7 @@ constexpr auto BaselineLeaveFrameDescriptor::registers() {
#if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \ #if V8_TARGET_ARCH_IA32 || V8_TARGET_ARCH_X64 || V8_TARGET_ARCH_ARM64 || \
V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 || \ V8_TARGET_ARCH_ARM || V8_TARGET_ARCH_PPC || V8_TARGET_ARCH_PPC64 || \
V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_MIPS64 || \ V8_TARGET_ARCH_S390 || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_MIPS64 || \
V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_LOONG64 || V8_TARGET_ARCH_RISCV32 V8_TARGET_ARCH_MIPS || V8_TARGET_ARCH_LOONG64
return RegisterArray(ParamsSizeRegister(), WeightRegister()); return RegisterArray(ParamsSizeRegister(), WeightRegister());
#else #else
return DefaultRegisterArray(); return DefaultRegisterArray();
......
...@@ -63,9 +63,9 @@ enum class SmiCheck { kOmit, kInline }; ...@@ -63,9 +63,9 @@ enum class SmiCheck { kOmit, kInline };
#elif V8_TARGET_ARCH_S390 #elif V8_TARGET_ARCH_S390
#include "src/codegen/s390/constants-s390.h" #include "src/codegen/s390/constants-s390.h"
#include "src/codegen/s390/macro-assembler-s390.h" #include "src/codegen/s390/macro-assembler-s390.h"
#elif V8_TARGET_ARCH_RISCV32 || V8_TARGET_ARCH_RISCV64 #elif V8_TARGET_ARCH_RISCV64
#include "src/codegen/riscv/constants-riscv.h" #include "src/codegen/riscv64/constants-riscv64.h"
#include "src/codegen/riscv/macro-assembler-riscv.h" #include "src/codegen/riscv64/macro-assembler-riscv64.h"
#else #else
#error Unsupported target architecture. #error Unsupported target architecture.
#endif #endif
......
...@@ -25,8 +25,8 @@ ...@@ -25,8 +25,8 @@
#include "src/codegen/loong64/register-loong64.h" #include "src/codegen/loong64/register-loong64.h"
#elif V8_TARGET_ARCH_S390 #elif V8_TARGET_ARCH_S390
#include "src/codegen/s390/register-s390.h" #include "src/codegen/s390/register-s390.h"
#elif V8_TARGET_ARCH_RISCV32 || V8_TARGET_ARCH_RISCV64 #elif V8_TARGET_ARCH_RISCV64
#include "src/codegen/riscv/register-riscv.h" #include "src/codegen/riscv64/register-riscv64.h"
#else #else
#error Unknown architecture. #error Unknown architecture.
#endif #endif
......
...@@ -19,7 +19,7 @@ static const int kMaxAllocatableGeneralRegisterCount = ...@@ -19,7 +19,7 @@ static const int kMaxAllocatableGeneralRegisterCount =
ALLOCATABLE_GENERAL_REGISTERS(REGISTER_COUNT) 0; ALLOCATABLE_GENERAL_REGISTERS(REGISTER_COUNT) 0;
static const int kMaxAllocatableDoubleRegisterCount = static const int kMaxAllocatableDoubleRegisterCount =
ALLOCATABLE_DOUBLE_REGISTERS(REGISTER_COUNT) 0; ALLOCATABLE_DOUBLE_REGISTERS(REGISTER_COUNT) 0;
#if V8_TARGET_ARCH_RISCV32 || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_PPC64 #if V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_PPC64
static const int kMaxAllocatableSIMD128RegisterCount = static const int kMaxAllocatableSIMD128RegisterCount =
ALLOCATABLE_SIMD128_REGISTERS(REGISTER_COUNT) 0; ALLOCATABLE_SIMD128_REGISTERS(REGISTER_COUNT) 0;
#endif #endif
...@@ -38,17 +38,16 @@ static const int kAllocatableNoVFP32DoubleCodes[] = { ...@@ -38,17 +38,16 @@ static const int kAllocatableNoVFP32DoubleCodes[] = {
#endif // V8_TARGET_ARCH_ARM #endif // V8_TARGET_ARCH_ARM
#undef REGISTER_CODE #undef REGISTER_CODE
#if V8_TARGET_ARCH_RISCV32 || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_PPC64 #if V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_PPC64
static const int kAllocatableSIMD128Codes[] = { static const int kAllocatableSIMD128Codes[] = {
#if V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_RISCV32 #if V8_TARGET_ARCH_RISCV64
#define REGISTER_CODE(R) kVRCode_##R, #define REGISTER_CODE(R) kVRCode_##R,
#else #else
#define REGISTER_CODE(R) kSimd128Code_##R, #define REGISTER_CODE(R) kSimd128Code_##R,
#endif #endif
ALLOCATABLE_SIMD128_REGISTERS(REGISTER_CODE)}; ALLOCATABLE_SIMD128_REGISTERS(REGISTER_CODE)};
#undef REGISTER_CODE #undef REGISTER_CODE
#endif // V8_TARGET_ARCH_RISCV32 || V8_TARGET_ARCH_RISCV64 || #endif // V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_PPC64
// V8_TARGET_ARCH_PPC64
static_assert(RegisterConfiguration::kMaxGeneralRegisters >= static_assert(RegisterConfiguration::kMaxGeneralRegisters >=
Register::kNumRegisters); Register::kNumRegisters);
...@@ -96,8 +95,6 @@ static int get_num_allocatable_double_registers() { ...@@ -96,8 +95,6 @@ static int get_num_allocatable_double_registers() {
kMaxAllocatableDoubleRegisterCount; kMaxAllocatableDoubleRegisterCount;
#elif V8_TARGET_ARCH_RISCV64 #elif V8_TARGET_ARCH_RISCV64
kMaxAllocatableDoubleRegisterCount; kMaxAllocatableDoubleRegisterCount;
#elif V8_TARGET_ARCH_RISCV32
kMaxAllocatableDoubleRegisterCount;
#else #else
#error Unsupported target architecture. #error Unsupported target architecture.
#endif #endif
...@@ -107,7 +104,7 @@ static int get_num_allocatable_double_registers() { ...@@ -107,7 +104,7 @@ static int get_num_allocatable_double_registers() {
static int get_num_allocatable_simd128_registers() { static int get_num_allocatable_simd128_registers() {
return return
#if V8_TARGET_ARCH_RISCV32 || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_PPC64 #if V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_PPC64
kMaxAllocatableSIMD128RegisterCount; kMaxAllocatableSIMD128RegisterCount;
#else #else
0; 0;
...@@ -128,7 +125,7 @@ static const int* get_allocatable_double_codes() { ...@@ -128,7 +125,7 @@ static const int* get_allocatable_double_codes() {
static const int* get_allocatable_simd128_codes() { static const int* get_allocatable_simd128_codes() {
return return
#if V8_TARGET_ARCH_RISCV32 || V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_PPC64 #if V8_TARGET_ARCH_RISCV64 || V8_TARGET_ARCH_PPC64
kAllocatableSIMD128Codes; kAllocatableSIMD128Codes;
#else #else
kAllocatableDoubleCodes; kAllocatableDoubleCodes;
......
...@@ -23,8 +23,8 @@ ...@@ -23,8 +23,8 @@
#include "src/codegen/loong64/reglist-loong64.h" #include "src/codegen/loong64/reglist-loong64.h"
#elif V8_TARGET_ARCH_S390 #elif V8_TARGET_ARCH_S390
#include "src/codegen/s390/reglist-s390.h" #include "src/codegen/s390/reglist-s390.h"
#elif V8_TARGET_ARCH_RISCV32 || V8_TARGET_ARCH_RISCV64 #elif V8_TARGET_ARCH_RISCV64
#include "src/codegen/riscv/reglist-riscv.h" #include "src/codegen/riscv64/reglist-riscv64.h"
#else #else
#error Unknown architecture. #error Unknown architecture.
#endif #endif
......
...@@ -312,8 +312,7 @@ bool RelocInfo::OffHeapTargetIsCodedSpecially() { ...@@ -312,8 +312,7 @@ bool RelocInfo::OffHeapTargetIsCodedSpecially() {
#elif defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_MIPS) || \ #elif defined(V8_TARGET_ARCH_IA32) || defined(V8_TARGET_ARCH_MIPS) || \
defined(V8_TARGET_ARCH_MIPS64) || defined(V8_TARGET_ARCH_PPC) || \ defined(V8_TARGET_ARCH_MIPS64) || defined(V8_TARGET_ARCH_PPC) || \
defined(V8_TARGET_ARCH_PPC64) || defined(V8_TARGET_ARCH_S390) || \ defined(V8_TARGET_ARCH_PPC64) || defined(V8_TARGET_ARCH_S390) || \
defined(V8_TARGET_ARCH_RISCV64) || defined(V8_TARGET_ARCH_LOONG64) || \ defined(V8_TARGET_ARCH_RISCV64) || defined(V8_TARGET_ARCH_LOONG64)
defined(V8_TARGET_ARCH_RISCV32)
return true; return true;
#endif #endif
} }
......
...@@ -72,8 +72,7 @@ class RelocInfo { ...@@ -72,8 +72,7 @@ class RelocInfo {
EXTERNAL_REFERENCE, // The address of an external C++ function. EXTERNAL_REFERENCE, // The address of an external C++ function.
INTERNAL_REFERENCE, // An address inside the same function. INTERNAL_REFERENCE, // An address inside the same function.
// Encoded internal reference, used only on RISCV64, RISCV32, MIPS, MIPS64 // Encoded internal reference, used only on RISCV64, MIPS, MIPS64 and PPC.
// and PPC.
INTERNAL_REFERENCE_ENCODED, INTERNAL_REFERENCE_ENCODED,
// An off-heap instruction stream target. See http://goo.gl/Z2HUiM. // An off-heap instruction stream target. See http://goo.gl/Z2HUiM.
......
This diff is collapsed.
// Copyright (c) 1994-2006 Sun Microsystems Inc.
// All Rights Reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// - Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// - Redistribution in binary form must reproduce the above copyright
// notice, this list of conditions and the following disclaimer in the
// documentation and/or other materials provided with the distribution.
//
// - Neither the name of Sun Microsystems or the names of contributors may
// be used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
// IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
// THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
// PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
// PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
// LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
// NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// The original source code covered by the above license above has been
// modified significantly by Google Inc.
// Copyright 2021 the V8 project authors. All rights reserved.
#ifndef V8_CODEGEN_RISCV_BASE_ASSEMBLER_RISCV_H_
#define V8_CODEGEN_RISCV_BASE_ASSEMBLER_RISCV_H_
#include <stdio.h>
#include <memory>
#include <set>
#include "src/codegen/assembler.h"
#include "src/codegen/constant-pool.h"
#include "src/codegen/external-reference.h"
#include "src/codegen/label.h"
#include "src/codegen/riscv/constants-riscv.h"
#include "src/codegen/riscv/register-riscv.h"
#include "src/objects/contexts.h"
#include "src/objects/smi.h"
namespace v8 {
namespace internal {
#define DEBUG_PRINTF(...) \
if (FLAG_riscv_debug) { \
printf(__VA_ARGS__); \
}
class SafepointTableBuilder;
class AssemblerRiscvBase {
protected:
// Returns the branch offset to the given label from the current code
// position. Links the label to the current position if it is still unbound.
// Manages the jump elimination optimization if the second parameter is true.
enum OffsetSize : int {
kOffset21 = 21, // RISCV jal
kOffset12 = 12, // RISCV imm12
kOffset20 = 20, // RISCV imm20
kOffset13 = 13, // RISCV branch
kOffset32 = 32, // RISCV auipc + instr_I
kOffset11 = 11, // RISCV C_J
kOffset9 = 9 // RISCV compressed branch
};
virtual int32_t branch_offset_helper(Label* L, OffsetSize bits) = 0;
virtual void emit(Instr x) = 0;
virtual void emit(ShortInstr x) = 0;
virtual void emit(uint64_t x) = 0;
// Instruction generation.
// ----- Top-level instruction formats match those in the ISA manual
// (R, I, S, B, U, J). These match the formats defined in LLVM's
// RISCVInstrFormats.td.
void GenInstrR(uint8_t funct7, uint8_t funct3, BaseOpcode opcode, Register rd,
Register rs1, Register rs2);
void GenInstrR(uint8_t funct7, uint8_t funct3, BaseOpcode opcode,
FPURegister rd, FPURegister rs1, FPURegister rs2);
void GenInstrR(uint8_t funct7, uint8_t funct3, BaseOpcode opcode, Register rd,
FPURegister rs1, Register rs2);
void GenInstrR(uint8_t funct7, uint8_t funct3, BaseOpcode opcode,
FPURegister rd, Register rs1, Register rs2);
void GenInstrR(uint8_t funct7, uint8_t funct3, BaseOpcode opcode,
FPURegister rd, FPURegister rs1, Register rs2);
void GenInstrR(uint8_t funct7, uint8_t funct3, BaseOpcode opcode, Register rd,
FPURegister rs1, FPURegister rs2);
void GenInstrR4(uint8_t funct2, BaseOpcode opcode, Register rd, Register rs1,
Register rs2, Register rs3, FPURoundingMode frm);
void GenInstrR4(uint8_t funct2, BaseOpcode opcode, FPURegister rd,
FPURegister rs1, FPURegister rs2, FPURegister rs3,
FPURoundingMode frm);
void GenInstrRAtomic(uint8_t funct5, bool aq, bool rl, uint8_t funct3,
Register rd, Register rs1, Register rs2);
void GenInstrRFrm(uint8_t funct7, BaseOpcode opcode, Register rd,
Register rs1, Register rs2, FPURoundingMode frm);
void GenInstrI(uint8_t funct3, BaseOpcode opcode, Register rd, Register rs1,
int16_t imm12);
void GenInstrI(uint8_t funct3, BaseOpcode opcode, FPURegister rd,
Register rs1, int16_t imm12);
void GenInstrIShift(bool arithshift, uint8_t funct3, BaseOpcode opcode,
Register rd, Register rs1, uint8_t shamt);
void GenInstrIShiftW(bool arithshift, uint8_t funct3, BaseOpcode opcode,
Register rd, Register rs1, uint8_t shamt);
void GenInstrS(uint8_t funct3, BaseOpcode opcode, Register rs1, Register rs2,
int16_t imm12);
void GenInstrS(uint8_t funct3, BaseOpcode opcode, Register rs1,
FPURegister rs2, int16_t imm12);
void GenInstrB(uint8_t funct3, BaseOpcode opcode, Register rs1, Register rs2,
int16_t imm12);
void GenInstrU(BaseOpcode opcode, Register rd, int32_t imm20);
void GenInstrJ(BaseOpcode opcode, Register rd, int32_t imm20);
void GenInstrCR(uint8_t funct4, BaseOpcode opcode, Register rd, Register rs2);
void GenInstrCA(uint8_t funct6, BaseOpcode opcode, Register rd, uint8_t funct,
Register rs2);
void GenInstrCI(uint8_t funct3, BaseOpcode opcode, Register rd, int8_t imm6);
void GenInstrCIU(uint8_t funct3, BaseOpcode opcode, Register rd,
uint8_t uimm6);
void GenInstrCIU(uint8_t funct3, BaseOpcode opcode, FPURegister rd,
uint8_t uimm6);
void GenInstrCIW(uint8_t funct3, BaseOpcode opcode, Register rd,
uint8_t uimm8);
void GenInstrCSS(uint8_t funct3, BaseOpcode opcode, FPURegister rs2,
uint8_t uimm6);
void GenInstrCSS(uint8_t funct3, BaseOpcode opcode, Register rs2,
uint8_t uimm6);
void GenInstrCL(uint8_t funct3, BaseOpcode opcode, Register rd, Register rs1,
uint8_t uimm5);
void GenInstrCL(uint8_t funct3, BaseOpcode opcode, FPURegister rd,
Register rs1, uint8_t uimm5);
void GenInstrCS(uint8_t funct3, BaseOpcode opcode, Register rs2, Register rs1,
uint8_t uimm5);
void GenInstrCS(uint8_t funct3, BaseOpcode opcode, FPURegister rs2,
Register rs1, uint8_t uimm5);
void GenInstrCJ(uint8_t funct3, BaseOpcode opcode, uint16_t uint11);
void GenInstrCB(uint8_t funct3, BaseOpcode opcode, Register rs1,
uint8_t uimm8);
void GenInstrCBA(uint8_t funct3, uint8_t funct2, BaseOpcode opcode,
Register rs1, int8_t imm6);
// ----- Instruction class templates match those in LLVM's RISCVInstrInfo.td
void GenInstrBranchCC_rri(uint8_t funct3, Register rs1, Register rs2,
int16_t imm12);
void GenInstrLoad_ri(uint8_t funct3, Register rd, Register rs1,
int16_t imm12);
void GenInstrStore_rri(uint8_t funct3, Register rs1, Register rs2,
int16_t imm12);
void GenInstrALU_ri(uint8_t funct3, Register rd, Register rs1, int16_t imm12);
void GenInstrShift_ri(bool arithshift, uint8_t funct3, Register rd,
Register rs1, uint8_t shamt);
void GenInstrALU_rr(uint8_t funct7, uint8_t funct3, Register rd, Register rs1,
Register rs2);
void GenInstrCSR_ir(uint8_t funct3, Register rd, ControlStatusReg csr,
Register rs1);
void GenInstrCSR_ii(uint8_t funct3, Register rd, ControlStatusReg csr,
uint8_t rs1);
void GenInstrShiftW_ri(bool arithshift, uint8_t funct3, Register rd,
Register rs1, uint8_t shamt);
void GenInstrALUW_rr(uint8_t funct7, uint8_t funct3, Register rd,
Register rs1, Register rs2);
void GenInstrPriv(uint8_t funct7, Register rs1, Register rs2);
void GenInstrLoadFP_ri(uint8_t funct3, FPURegister rd, Register rs1,
int16_t imm12);
void GenInstrStoreFP_rri(uint8_t funct3, Register rs1, FPURegister rs2,
int16_t imm12);
void GenInstrALUFP_rr(uint8_t funct7, uint8_t funct3, FPURegister rd,
FPURegister rs1, FPURegister rs2);
void GenInstrALUFP_rr(uint8_t funct7, uint8_t funct3, FPURegister rd,
Register rs1, Register rs2);
void GenInstrALUFP_rr(uint8_t funct7, uint8_t funct3, FPURegister rd,
FPURegister rs1, Register rs2);
void GenInstrALUFP_rr(uint8_t funct7, uint8_t funct3, Register rd,
FPURegister rs1, Register rs2);
void GenInstrALUFP_rr(uint8_t funct7, uint8_t funct3, Register rd,
FPURegister rs1, FPURegister rs2);
virtual void BlockTrampolinePoolFor(int instructions) = 0;
};
} // namespace internal
} // namespace v8
#endif // V8_CODEGEN_RISCV_BASE_ASSEMBLER_RISCV_H_
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/codegen/riscv/base-riscv-i.h"
namespace v8 {
namespace internal {
void AssemblerRISCVI::lui(Register rd, int32_t imm20) {
GenInstrU(LUI, rd, imm20);
}
void AssemblerRISCVI::auipc(Register rd, int32_t imm20) {
GenInstrU(AUIPC, rd, imm20);
}
// Jumps
void AssemblerRISCVI::jal(Register rd, int32_t imm21) {
GenInstrJ(JAL, rd, imm21);
BlockTrampolinePoolFor(1);
}
void AssemblerRISCVI::jalr(Register rd, Register rs1, int16_t imm12) {
GenInstrI(0b000, JALR, rd, rs1, imm12);
BlockTrampolinePoolFor(1);
}
// Branches
void AssemblerRISCVI::beq(Register rs1, Register rs2, int16_t imm13) {
GenInstrBranchCC_rri(0b000, rs1, rs2, imm13);
}
void AssemblerRISCVI::bne(Register rs1, Register rs2, int16_t imm13) {
GenInstrBranchCC_rri(0b001, rs1, rs2, imm13);
}
void AssemblerRISCVI::blt(Register rs1, Register rs2, int16_t imm13) {
GenInstrBranchCC_rri(0b100, rs1, rs2, imm13);
}
void AssemblerRISCVI::bge(Register rs1, Register rs2, int16_t imm13) {
GenInstrBranchCC_rri(0b101, rs1, rs2, imm13);
}
void AssemblerRISCVI::bltu(Register rs1, Register rs2, int16_t imm13) {
GenInstrBranchCC_rri(0b110, rs1, rs2, imm13);
}
void AssemblerRISCVI::bgeu(Register rs1, Register rs2, int16_t imm13) {
GenInstrBranchCC_rri(0b111, rs1, rs2, imm13);
}
// Loads
void AssemblerRISCVI::lb(Register rd, Register rs1, int16_t imm12) {
GenInstrLoad_ri(0b000, rd, rs1, imm12);
}
void AssemblerRISCVI::lh(Register rd, Register rs1, int16_t imm12) {
GenInstrLoad_ri(0b001, rd, rs1, imm12);
}
void AssemblerRISCVI::lw(Register rd, Register rs1, int16_t imm12) {
GenInstrLoad_ri(0b010, rd, rs1, imm12);
}
void AssemblerRISCVI::lbu(Register rd, Register rs1, int16_t imm12) {
GenInstrLoad_ri(0b100, rd, rs1, imm12);
}
void AssemblerRISCVI::lhu(Register rd, Register rs1, int16_t imm12) {
GenInstrLoad_ri(0b101, rd, rs1, imm12);
}
// Stores
void AssemblerRISCVI::sb(Register source, Register base, int16_t imm12) {
GenInstrStore_rri(0b000, base, source, imm12);
}
void AssemblerRISCVI::sh(Register source, Register base, int16_t imm12) {
GenInstrStore_rri(0b001, base, source, imm12);
}
void AssemblerRISCVI::sw(Register source, Register base, int16_t imm12) {
GenInstrStore_rri(0b010, base, source, imm12);
}
// Arithmetic with immediate
void AssemblerRISCVI::addi(Register rd, Register rs1, int16_t imm12) {
GenInstrALU_ri(0b000, rd, rs1, imm12);
}
void AssemblerRISCVI::slti(Register rd, Register rs1, int16_t imm12) {
GenInstrALU_ri(0b010, rd, rs1, imm12);
}
void AssemblerRISCVI::sltiu(Register rd, Register rs1, int16_t imm12) {
GenInstrALU_ri(0b011, rd, rs1, imm12);
}
void AssemblerRISCVI::xori(Register rd, Register rs1, int16_t imm12) {
GenInstrALU_ri(0b100, rd, rs1, imm12);
}
void AssemblerRISCVI::ori(Register rd, Register rs1, int16_t imm12) {
GenInstrALU_ri(0b110, rd, rs1, imm12);
}
void AssemblerRISCVI::andi(Register rd, Register rs1, int16_t imm12) {
GenInstrALU_ri(0b111, rd, rs1, imm12);
}
void AssemblerRISCVI::slli(Register rd, Register rs1, uint8_t shamt) {
GenInstrShift_ri(0, 0b001, rd, rs1, shamt & 0x3f);
}
void AssemblerRISCVI::srli(Register rd, Register rs1, uint8_t shamt) {
GenInstrShift_ri(0, 0b101, rd, rs1, shamt & 0x3f);
}
void AssemblerRISCVI::srai(Register rd, Register rs1, uint8_t shamt) {
GenInstrShift_ri(1, 0b101, rd, rs1, shamt & 0x3f);
}
// Arithmetic
void AssemblerRISCVI::add(Register rd, Register rs1, Register rs2) {
GenInstrALU_rr(0b0000000, 0b000, rd, rs1, rs2);
}
void AssemblerRISCVI::sub(Register rd, Register rs1, Register rs2) {
GenInstrALU_rr(0b0100000, 0b000, rd, rs1, rs2);
}
void AssemblerRISCVI::sll(Register rd, Register rs1, Register rs2) {
GenInstrALU_rr(0b0000000, 0b001, rd, rs1, rs2);
}
void AssemblerRISCVI::slt(Register rd, Register rs1, Register rs2) {
GenInstrALU_rr(0b0000000, 0b010, rd, rs1, rs2);
}
void AssemblerRISCVI::sltu(Register rd, Register rs1, Register rs2) {
GenInstrALU_rr(0b0000000, 0b011, rd, rs1, rs2);
}
void AssemblerRISCVI::xor_(Register rd, Register rs1, Register rs2) {
GenInstrALU_rr(0b0000000, 0b100, rd, rs1, rs2);
}
void AssemblerRISCVI::srl(Register rd, Register rs1, Register rs2) {
GenInstrALU_rr(0b0000000, 0b101, rd, rs1, rs2);
}
void AssemblerRISCVI::sra(Register rd, Register rs1, Register rs2) {
GenInstrALU_rr(0b0100000, 0b101, rd, rs1, rs2);
}
void AssemblerRISCVI::or_(Register rd, Register rs1, Register rs2) {
GenInstrALU_rr(0b0000000, 0b110, rd, rs1, rs2);
}
void AssemblerRISCVI::and_(Register rd, Register rs1, Register rs2) {
GenInstrALU_rr(0b0000000, 0b111, rd, rs1, rs2);
}
// Memory fences
void AssemblerRISCVI::fence(uint8_t pred, uint8_t succ) {
DCHECK(is_uint4(pred) && is_uint4(succ));
uint16_t imm12 = succ | (pred << 4) | (0b0000 << 8);
GenInstrI(0b000, MISC_MEM, ToRegister(0), ToRegister(0), imm12);
}
void AssemblerRISCVI::fence_tso() {
uint16_t imm12 = (0b0011) | (0b0011 << 4) | (0b1000 << 8);
GenInstrI(0b000, MISC_MEM, ToRegister(0), ToRegister(0), imm12);
}
// Environment call / break
void AssemblerRISCVI::ecall() {
GenInstrI(0b000, SYSTEM, ToRegister(0), ToRegister(0), 0);
}
void AssemblerRISCVI::ebreak() {
GenInstrI(0b000, SYSTEM, ToRegister(0), ToRegister(0), 1);
}
// This is a de facto standard (as set by GNU binutils) 32-bit unimplemented
// instruction (i.e., it should always trap, if your implementation has invalid
// instruction traps).
void AssemblerRISCVI::unimp() {
GenInstrI(0b001, SYSTEM, ToRegister(0), ToRegister(0), 0b110000000000);
}
bool AssemblerRISCVI::IsBranch(Instr instr) {
return (instr & kBaseOpcodeMask) == BRANCH;
}
bool AssemblerRISCVI::IsJump(Instr instr) {
int Op = instr & kBaseOpcodeMask;
return Op == JAL || Op == JALR;
}
bool AssemblerRISCVI::IsNop(Instr instr) { return instr == kNopByte; }
bool AssemblerRISCVI::IsJal(Instr instr) {
return (instr & kBaseOpcodeMask) == JAL;
}
bool AssemblerRISCVI::IsJalr(Instr instr) {
return (instr & kBaseOpcodeMask) == JALR;
}
bool AssemblerRISCVI::IsLui(Instr instr) {
return (instr & kBaseOpcodeMask) == LUI;
}
bool AssemblerRISCVI::IsAuipc(Instr instr) {
return (instr & kBaseOpcodeMask) == AUIPC;
}
bool AssemblerRISCVI::IsAddi(Instr instr) {
return (instr & (kBaseOpcodeMask | kFunct3Mask)) == RO_ADDI;
}
bool AssemblerRISCVI::IsOri(Instr instr) {
return (instr & (kBaseOpcodeMask | kFunct3Mask)) == RO_ORI;
}
bool AssemblerRISCVI::IsSlli(Instr instr) {
return (instr & (kBaseOpcodeMask | kFunct3Mask)) == RO_SLLI;
}
int AssemblerRISCVI::JumpOffset(Instr instr) {
int32_t imm21 = ((instr & 0x7fe00000) >> 20) | ((instr & 0x100000) >> 9) |
(instr & 0xff000) | ((instr & 0x80000000) >> 11);
imm21 = imm21 << 11 >> 11;
return imm21;
}
int AssemblerRISCVI::JalrOffset(Instr instr) {
DCHECK(IsJalr(instr));
int32_t imm12 = static_cast<int32_t>(instr & kImm12Mask) >> 20;
return imm12;
}
int AssemblerRISCVI::AuipcOffset(Instr instr) {
DCHECK(IsAuipc(instr));
int32_t imm20 = static_cast<int32_t>(instr & kImm20Mask);
return imm20;
}
bool AssemblerRISCVI::IsLw(Instr instr) {
return (instr & (kBaseOpcodeMask | kFunct3Mask)) == RO_LW;
}
int AssemblerRISCVI::LoadOffset(Instr instr) {
#if V8_TARGET_ARCH_RISCV64
DCHECK(IsLd(instr));
#elif V8_TARGET_ARCH_RISCV32
DCHECK(IsLw(instr));
#endif
int32_t imm12 = static_cast<int32_t>(instr & kImm12Mask) >> 20;
return imm12;
}
#ifdef V8_TARGET_ARCH_RISCV64
bool AssemblerRISCVI::IsAddiw(Instr instr) {
return (instr & (kBaseOpcodeMask | kFunct3Mask)) == RO_ADDIW;
}
bool AssemblerRISCVI::IsLd(Instr instr) {
return (instr & (kBaseOpcodeMask | kFunct3Mask)) == RO_LD;
}
void AssemblerRISCVI::lwu(Register rd, Register rs1, int16_t imm12) {
GenInstrLoad_ri(0b110, rd, rs1, imm12);
}
void AssemblerRISCVI::ld(Register rd, Register rs1, int16_t imm12) {
GenInstrLoad_ri(0b011, rd, rs1, imm12);
}
void AssemblerRISCVI::sd(Register source, Register base, int16_t imm12) {
GenInstrStore_rri(0b011, base, source, imm12);
}
void AssemblerRISCVI::addiw(Register rd, Register rs1, int16_t imm12) {
GenInstrI(0b000, OP_IMM_32, rd, rs1, imm12);
}
void AssemblerRISCVI::slliw(Register rd, Register rs1, uint8_t shamt) {
GenInstrShiftW_ri(0, 0b001, rd, rs1, shamt & 0x1f);
}
void AssemblerRISCVI::srliw(Register rd, Register rs1, uint8_t shamt) {
GenInstrShiftW_ri(0, 0b101, rd, rs1, shamt & 0x1f);
}
void AssemblerRISCVI::sraiw(Register rd, Register rs1, uint8_t shamt) {
GenInstrShiftW_ri(1, 0b101, rd, rs1, shamt & 0x1f);
}
void AssemblerRISCVI::addw(Register rd, Register rs1, Register rs2) {
GenInstrALUW_rr(0b0000000, 0b000, rd, rs1, rs2);
}
void AssemblerRISCVI::subw(Register rd, Register rs1, Register rs2) {
GenInstrALUW_rr(0b0100000, 0b000, rd, rs1, rs2);
}
void AssemblerRISCVI::sllw(Register rd, Register rs1, Register rs2) {
GenInstrALUW_rr(0b0000000, 0b001, rd, rs1, rs2);
}
void AssemblerRISCVI::srlw(Register rd, Register rs1, Register rs2) {
GenInstrALUW_rr(0b0000000, 0b101, rd, rs1, rs2);
}
void AssemblerRISCVI::sraw(Register rd, Register rs1, Register rs2) {
GenInstrALUW_rr(0b0100000, 0b101, rd, rs1, rs2);
}
#endif
} // namespace internal
} // namespace v8
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/codegen/assembler.h"
#include "src/codegen/riscv/base-assembler-riscv.h"
#include "src/codegen/riscv/constant-riscv-i.h"
#include "src/codegen/riscv/register-riscv.h"
#ifndef V8_CODEGEN_RISCV_BASE_RISCV_I_H_
#define V8_CODEGEN_RISCV_BASE_RISCV_I_H_
namespace v8 {
namespace internal {
class AssemblerRISCVI : public AssemblerRiscvBase {
public:
void lui(Register rd, int32_t imm20);
void auipc(Register rd, int32_t imm20);
// Jumps
void jal(Register rd, int32_t imm20);
void jalr(Register rd, Register rs1, int16_t imm12);
// Branches
void beq(Register rs1, Register rs2, int16_t imm12);
void bne(Register rs1, Register rs2, int16_t imm12);
void blt(Register rs1, Register rs2, int16_t imm12);
void bge(Register rs1, Register rs2, int16_t imm12);
void bltu(Register rs1, Register rs2, int16_t imm12);
void bgeu(Register rs1, Register rs2, int16_t imm12);
// Loads
void lb(Register rd, Register rs1, int16_t imm12);
void lh(Register rd, Register rs1, int16_t imm12);
void lw(Register rd, Register rs1, int16_t imm12);
void lbu(Register rd, Register rs1, int16_t imm12);
void lhu(Register rd, Register rs1, int16_t imm12);
// Stores
void sb(Register source, Register base, int16_t imm12);
void sh(Register source, Register base, int16_t imm12);
void sw(Register source, Register base, int16_t imm12);
// Arithmetic with immediate
void addi(Register rd, Register rs1, int16_t imm12);
void slti(Register rd, Register rs1, int16_t imm12);
void sltiu(Register rd, Register rs1, int16_t imm12);
void xori(Register rd, Register rs1, int16_t imm12);
void ori(Register rd, Register rs1, int16_t imm12);
void andi(Register rd, Register rs1, int16_t imm12);
void slli(Register rd, Register rs1, uint8_t shamt);
void srli(Register rd, Register rs1, uint8_t shamt);
void srai(Register rd, Register rs1, uint8_t shamt);
// Arithmetic
void add(Register rd, Register rs1, Register rs2);
void sub(Register rd, Register rs1, Register rs2);
void sll(Register rd, Register rs1, Register rs2);
void slt(Register rd, Register rs1, Register rs2);
void sltu(Register rd, Register rs1, Register rs2);
void xor_(Register rd, Register rs1, Register rs2);
void srl(Register rd, Register rs1, Register rs2);
void sra(Register rd, Register rs1, Register rs2);
void or_(Register rd, Register rs1, Register rs2);
void and_(Register rd, Register rs1, Register rs2);
// Other pseudo instructions that are not part of RISCV pseudo assemly
void nor(Register rd, Register rs, Register rt) {
or_(rd, rs, rt);
not_(rd, rd);
}
// Memory fences
void fence(uint8_t pred, uint8_t succ);
void fence_tso();
// Environment call / break
void ecall();
void ebreak();
void sync() { fence(0b1111, 0b1111); }
// This is a de facto standard (as set by GNU binutils) 32-bit unimplemented
// instruction (i.e., it should always trap, if your implementation has
// invalid instruction traps).
void unimp();
static int JumpOffset(Instr instr);
static int AuipcOffset(Instr instr);
static int JalrOffset(Instr instr);
static int LoadOffset(Instr instr);
// Check if an instruction is a branch of some kind.
static bool IsBranch(Instr instr);
static bool IsNop(Instr instr);
static bool IsJump(Instr instr);
static bool IsJal(Instr instr);
static bool IsJalr(Instr instr);
static bool IsLui(Instr instr);
static bool IsAuipc(Instr instr);
static bool IsAddi(Instr instr);
static bool IsOri(Instr instr);
static bool IsSlli(Instr instr);
static bool IsLw(Instr instr);
inline int32_t branch_offset(Label* L) {
return branch_offset_helper(L, OffsetSize::kOffset13);
}
inline int32_t jump_offset(Label* L) {
return branch_offset_helper(L, OffsetSize::kOffset21);
}
// Branches
void beq(Register rs1, Register rs2, Label* L) {
beq(rs1, rs2, branch_offset(L));
}
void bne(Register rs1, Register rs2, Label* L) {
bne(rs1, rs2, branch_offset(L));
}
void blt(Register rs1, Register rs2, Label* L) {
blt(rs1, rs2, branch_offset(L));
}
void bge(Register rs1, Register rs2, Label* L) {
bge(rs1, rs2, branch_offset(L));
}
void bltu(Register rs1, Register rs2, Label* L) {
bltu(rs1, rs2, branch_offset(L));
}
void bgeu(Register rs1, Register rs2, Label* L) {
bgeu(rs1, rs2, branch_offset(L));
}
void beqz(Register rs, int16_t imm13) { beq(rs, zero_reg, imm13); }
void beqz(Register rs1, Label* L) { beqz(rs1, branch_offset(L)); }
void bnez(Register rs, int16_t imm13) { bne(rs, zero_reg, imm13); }
void bnez(Register rs1, Label* L) { bnez(rs1, branch_offset(L)); }
void blez(Register rs, int16_t imm13) { bge(zero_reg, rs, imm13); }
void blez(Register rs1, Label* L) { blez(rs1, branch_offset(L)); }
void bgez(Register rs, int16_t imm13) { bge(rs, zero_reg, imm13); }
void bgez(Register rs1, Label* L) { bgez(rs1, branch_offset(L)); }
void bltz(Register rs, int16_t imm13) { blt(rs, zero_reg, imm13); }
void bltz(Register rs1, Label* L) { bltz(rs1, branch_offset(L)); }
void bgtz(Register rs, int16_t imm13) { blt(zero_reg, rs, imm13); }
void bgtz(Register rs1, Label* L) { bgtz(rs1, branch_offset(L)); }
void bgt(Register rs1, Register rs2, int16_t imm13) { blt(rs2, rs1, imm13); }
void bgt(Register rs1, Register rs2, Label* L) {
bgt(rs1, rs2, branch_offset(L));
}
void ble(Register rs1, Register rs2, int16_t imm13) { bge(rs2, rs1, imm13); }
void ble(Register rs1, Register rs2, Label* L) {
ble(rs1, rs2, branch_offset(L));
}
void bgtu(Register rs1, Register rs2, int16_t imm13) {
bltu(rs2, rs1, imm13);
}
void bgtu(Register rs1, Register rs2, Label* L) {
bgtu(rs1, rs2, branch_offset(L));
}
void bleu(Register rs1, Register rs2, int16_t imm13) {
bgeu(rs2, rs1, imm13);
}
void bleu(Register rs1, Register rs2, Label* L) {
bleu(rs1, rs2, branch_offset(L));
}
void j(int32_t imm21) { jal(zero_reg, imm21); }
void j(Label* L) { j(jump_offset(L)); }
void b(Label* L) { j(L); }
void jal(int32_t imm21) { jal(ra, imm21); }
void jal(Label* L) { jal(jump_offset(L)); }
void jr(Register rs) { jalr(zero_reg, rs, 0); }
void jr(Register rs, int32_t imm12) { jalr(zero_reg, rs, imm12); }
void jalr(Register rs, int32_t imm12) { jalr(ra, rs, imm12); }
void jalr(Register rs) { jalr(ra, rs, 0); }
void ret() { jalr(zero_reg, ra, 0); }
void call(int32_t offset) {
auipc(ra, (offset >> 12) + ((offset & 0x800) >> 11));
jalr(ra, ra, offset << 20 >> 20);
}
void mv(Register rd, Register rs) { addi(rd, rs, 0); }
void not_(Register rd, Register rs) { xori(rd, rs, -1); }
void neg(Register rd, Register rs) { sub(rd, zero_reg, rs); }
void seqz(Register rd, Register rs) { sltiu(rd, rs, 1); }
void snez(Register rd, Register rs) { sltu(rd, zero_reg, rs); }
void sltz(Register rd, Register rs) { slt(rd, rs, zero_reg); }
void sgtz(Register rd, Register rs) { slt(rd, zero_reg, rs); }
#if V8_TARGET_ARCH_RISCV64
void lwu(Register rd, Register rs1, int16_t imm12);
void ld(Register rd, Register rs1, int16_t imm12);
void sd(Register source, Register base, int16_t imm12);
void addiw(Register rd, Register rs1, int16_t imm12);
void slliw(Register rd, Register rs1, uint8_t shamt);
void srliw(Register rd, Register rs1, uint8_t shamt);
void sraiw(Register rd, Register rs1, uint8_t shamt);
void addw(Register rd, Register rs1, Register rs2);
void subw(Register rd, Register rs1, Register rs2);
void sllw(Register rd, Register rs1, Register rs2);
void srlw(Register rd, Register rs1, Register rs2);
void sraw(Register rd, Register rs1, Register rs2);
void negw(Register rd, Register rs) { subw(rd, zero_reg, rs); }
void sext_w(Register rd, Register rs) { addiw(rd, rs, 0); }
static bool IsAddiw(Instr instr);
static bool IsLd(Instr instr);
#endif
};
} // namespace internal
} // namespace v8
#endif // V8_CODEGEN_RISCV_BASE_RISCV_I_H_
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_CODEGEN_RISCV_CONSTANT_RISCV_A_H_
#define V8_CODEGEN_RISCV_CONSTANT_RISCV_A_H_
#include "src/codegen/riscv/base-constants-riscv.h"
namespace v8 {
namespace internal {
enum OpcodeRISCVA : uint32_t {
// RV32A Standard Extension
RO_LR_W = AMO | (0b010 << kFunct3Shift) | (0b00010 << kFunct5Shift),
RO_SC_W = AMO | (0b010 << kFunct3Shift) | (0b00011 << kFunct5Shift),
RO_AMOSWAP_W = AMO | (0b010 << kFunct3Shift) | (0b00001 << kFunct5Shift),
RO_AMOADD_W = AMO | (0b010 << kFunct3Shift) | (0b00000 << kFunct5Shift),
RO_AMOXOR_W = AMO | (0b010 << kFunct3Shift) | (0b00100 << kFunct5Shift),
RO_AMOAND_W = AMO | (0b010 << kFunct3Shift) | (0b01100 << kFunct5Shift),
RO_AMOOR_W = AMO | (0b010 << kFunct3Shift) | (0b01000 << kFunct5Shift),
RO_AMOMIN_W = AMO | (0b010 << kFunct3Shift) | (0b10000 << kFunct5Shift),
RO_AMOMAX_W = AMO | (0b010 << kFunct3Shift) | (0b10100 << kFunct5Shift),
RO_AMOMINU_W = AMO | (0b010 << kFunct3Shift) | (0b11000 << kFunct5Shift),
RO_AMOMAXU_W = AMO | (0b010 << kFunct3Shift) | (0b11100 << kFunct5Shift),
#ifdef V8_TARGET_ARCH_RISCV64
// RV64A Standard Extension (in addition to RV32A)
RO_LR_D = AMO | (0b011 << kFunct3Shift) | (0b00010 << kFunct5Shift),
RO_SC_D = AMO | (0b011 << kFunct3Shift) | (0b00011 << kFunct5Shift),
RO_AMOSWAP_D = AMO | (0b011 << kFunct3Shift) | (0b00001 << kFunct5Shift),
RO_AMOADD_D = AMO | (0b011 << kFunct3Shift) | (0b00000 << kFunct5Shift),
RO_AMOXOR_D = AMO | (0b011 << kFunct3Shift) | (0b00100 << kFunct5Shift),
RO_AMOAND_D = AMO | (0b011 << kFunct3Shift) | (0b01100 << kFunct5Shift),
RO_AMOOR_D = AMO | (0b011 << kFunct3Shift) | (0b01000 << kFunct5Shift),
RO_AMOMIN_D = AMO | (0b011 << kFunct3Shift) | (0b10000 << kFunct5Shift),
RO_AMOMAX_D = AMO | (0b011 << kFunct3Shift) | (0b10100 << kFunct5Shift),
RO_AMOMINU_D = AMO | (0b011 << kFunct3Shift) | (0b11000 << kFunct5Shift),
RO_AMOMAXU_D = AMO | (0b011 << kFunct3Shift) | (0b11100 << kFunct5Shift),
#endif // V8_TARGET_ARCH_RISCV64
};
} // namespace internal
} // namespace v8
#endif // V8_CODEGEN_RISCV_CONSTANT_RISCV_A_H_
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_CODEGEN_RISCV_CONSTANT_RISCV_C_H_
#define V8_CODEGEN_RISCV_CONSTANT_RISCV_C_H_
#include "src/codegen/riscv/base-constants-riscv.h"
namespace v8 {
namespace internal {
enum OpcodeRISCVC : uint32_t {
RO_C_ADDI4SPN = C0 | (0b000 << kRvcFunct3Shift),
RO_C_ADDI16SP = C1 | (0b011 << kRvcFunct3Shift),
RO_C_LW = C0 | (0b010 << kRvcFunct3Shift),
RO_C_SW = C0 | (0b110 << kRvcFunct3Shift),
RO_C_NOP_ADDI = C1 | (0b000 << kRvcFunct3Shift),
RO_C_LI = C1 | (0b010 << kRvcFunct3Shift),
RO_C_SUB = C1 | (0b100011 << kRvcFunct6Shift) | (FUNCT2_0 << kRvcFunct2Shift),
RO_C_XOR = C1 | (0b100011 << kRvcFunct6Shift) | (FUNCT2_1 << kRvcFunct2Shift),
RO_C_OR = C1 | (0b100011 << kRvcFunct6Shift) | (FUNCT2_2 << kRvcFunct2Shift),
RO_C_AND = C1 | (0b100011 << kRvcFunct6Shift) | (FUNCT2_3 << kRvcFunct2Shift),
RO_C_LUI_ADD = C1 | (0b011 << kRvcFunct3Shift),
RO_C_MISC_ALU = C1 | (0b100 << kRvcFunct3Shift),
RO_C_J = C1 | (0b101 << kRvcFunct3Shift),
RO_C_BEQZ = C1 | (0b110 << kRvcFunct3Shift),
RO_C_BNEZ = C1 | (0b111 << kRvcFunct3Shift),
RO_C_SLLI = C2 | (0b000 << kRvcFunct3Shift),
RO_C_LWSP = C2 | (0b010 << kRvcFunct3Shift),
RO_C_JR_MV_ADD = C2 | (0b100 << kRvcFunct3Shift),
RO_C_JR = C2 | (0b1000 << kRvcFunct4Shift),
RO_C_MV = C2 | (0b1000 << kRvcFunct4Shift),
RO_C_EBREAK = C2 | (0b1001 << kRvcFunct4Shift),
RO_C_JALR = C2 | (0b1001 << kRvcFunct4Shift),
RO_C_ADD = C2 | (0b1001 << kRvcFunct4Shift),
RO_C_SWSP = C2 | (0b110 << kRvcFunct3Shift),
RO_C_FSD = C0 | (0b101 << kRvcFunct3Shift),
RO_C_FLD = C0 | (0b001 << kRvcFunct3Shift),
RO_C_FLDSP = C2 | (0b001 << kRvcFunct3Shift),
RO_C_FSDSP = C2 | (0b101 << kRvcFunct3Shift),
#ifdef V8_TARGET_ARCH_RISCV64
RO_C_LD = C0 | (0b011 << kRvcFunct3Shift),
RO_C_SD = C0 | (0b111 << kRvcFunct3Shift),
RO_C_LDSP = C2 | (0b011 << kRvcFunct3Shift),
RO_C_SDSP = C2 | (0b111 << kRvcFunct3Shift),
RO_C_ADDIW = C1 | (0b001 << kRvcFunct3Shift),
RO_C_SUBW =
C1 | (0b100111 << kRvcFunct6Shift) | (FUNCT2_0 << kRvcFunct2Shift),
RO_C_ADDW =
C1 | (0b100111 << kRvcFunct6Shift) | (FUNCT2_1 << kRvcFunct2Shift),
#endif
#ifdef V8_TARGET_ARCH_RISCV32
RO_C_FLWSP = C2 | (0b011 << kRvcFunct3Shift),
RO_C_FSWSP = C2 | (0b111 << kRvcFunct3Shift),
RO_C_FLW = C0 | (0b011 << kRvcFunct3Shift),
RO_C_FSW = C0 | (0b111 << kRvcFunct3Shift),
#endif
};
} // namespace internal
} // namespace v8
#endif // V8_CODEGEN_RISCV_CONSTANT_RISCV_C_H_
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_CODEGEN_RISCV_CONSTANT_RISCV_D_H_
#define V8_CODEGEN_RISCV_CONSTANT_RISCV_D_H_
#include "src/codegen/riscv/base-constants-riscv.h"
namespace v8 {
namespace internal {
enum OpcodeRISCVD : uint32_t {
// RV32D Standard Extension
RO_FLD = LOAD_FP | (0b011 << kFunct3Shift),
RO_FSD = STORE_FP | (0b011 << kFunct3Shift),
RO_FMADD_D = MADD | (0b01 << kFunct2Shift),
RO_FMSUB_D = MSUB | (0b01 << kFunct2Shift),
RO_FNMSUB_D = NMSUB | (0b01 << kFunct2Shift),
RO_FNMADD_D = NMADD | (0b01 << kFunct2Shift),
RO_FADD_D = OP_FP | (0b0000001 << kFunct7Shift),
RO_FSUB_D = OP_FP | (0b0000101 << kFunct7Shift),
RO_FMUL_D = OP_FP | (0b0001001 << kFunct7Shift),
RO_FDIV_D = OP_FP | (0b0001101 << kFunct7Shift),
RO_FSQRT_D = OP_FP | (0b0101101 << kFunct7Shift) | (0b00000 << kRs2Shift),
RO_FSGNJ_D = OP_FP | (0b000 << kFunct3Shift) | (0b0010001 << kFunct7Shift),
RO_FSGNJN_D = OP_FP | (0b001 << kFunct3Shift) | (0b0010001 << kFunct7Shift),
RO_FSQNJX_D = OP_FP | (0b010 << kFunct3Shift) | (0b0010001 << kFunct7Shift),
RO_FMIN_D = OP_FP | (0b000 << kFunct3Shift) | (0b0010101 << kFunct7Shift),
RO_FMAX_D = OP_FP | (0b001 << kFunct3Shift) | (0b0010101 << kFunct7Shift),
RO_FCVT_S_D = OP_FP | (0b0100000 << kFunct7Shift) | (0b00001 << kRs2Shift),
RO_FCVT_D_S = OP_FP | (0b0100001 << kFunct7Shift) | (0b00000 << kRs2Shift),
RO_FEQ_D = OP_FP | (0b010 << kFunct3Shift) | (0b1010001 << kFunct7Shift),
RO_FLT_D = OP_FP | (0b001 << kFunct3Shift) | (0b1010001 << kFunct7Shift),
RO_FLE_D = OP_FP | (0b000 << kFunct3Shift) | (0b1010001 << kFunct7Shift),
RO_FCLASS_D = OP_FP | (0b001 << kFunct3Shift) | (0b1110001 << kFunct7Shift) |
(0b00000 << kRs2Shift),
RO_FCVT_W_D = OP_FP | (0b1100001 << kFunct7Shift) | (0b00000 << kRs2Shift),
RO_FCVT_WU_D = OP_FP | (0b1100001 << kFunct7Shift) | (0b00001 << kRs2Shift),
RO_FCVT_D_W = OP_FP | (0b1101001 << kFunct7Shift) | (0b00000 << kRs2Shift),
RO_FCVT_D_WU = OP_FP | (0b1101001 << kFunct7Shift) | (0b00001 << kRs2Shift),
#ifdef V8_TARGET_ARCH_RISCV64
// RV64D Standard Extension (in addition to RV32D)
RO_FCVT_L_D = OP_FP | (0b1100001 << kFunct7Shift) | (0b00010 << kRs2Shift),
RO_FCVT_LU_D = OP_FP | (0b1100001 << kFunct7Shift) | (0b00011 << kRs2Shift),
RO_FMV_X_D = OP_FP | (0b000 << kFunct3Shift) | (0b1110001 << kFunct7Shift) |
(0b00000 << kRs2Shift),
RO_FCVT_D_L = OP_FP | (0b1101001 << kFunct7Shift) | (0b00010 << kRs2Shift),
RO_FCVT_D_LU = OP_FP | (0b1101001 << kFunct7Shift) | (0b00011 << kRs2Shift),
RO_FMV_D_X = OP_FP | (0b000 << kFunct3Shift) | (0b1111001 << kFunct7Shift) |
(0b00000 << kRs2Shift),
#endif
};
} // namespace internal
} // namespace v8
#endif // V8_CODEGEN_RISCV_CONSTANT_RISCV_D_H_
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_CODEGEN_RISCV_CONSTANT_RISCV_F_H_
#define V8_CODEGEN_RISCV_CONSTANT_RISCV_F_H_
#include "src/codegen/riscv/base-constants-riscv.h"
namespace v8 {
namespace internal {
enum OpcodeRISCVF : uint32_t {
// RV32F Standard Extension
RO_FLW = LOAD_FP | (0b010 << kFunct3Shift),
RO_FSW = STORE_FP | (0b010 << kFunct3Shift),
RO_FMADD_S = MADD | (0b00 << kFunct2Shift),
RO_FMSUB_S = MSUB | (0b00 << kFunct2Shift),
RO_FNMSUB_S = NMSUB | (0b00 << kFunct2Shift),
RO_FNMADD_S = NMADD | (0b00 << kFunct2Shift),
RO_FADD_S = OP_FP | (0b0000000 << kFunct7Shift),
RO_FSUB_S = OP_FP | (0b0000100 << kFunct7Shift),
RO_FMUL_S = OP_FP | (0b0001000 << kFunct7Shift),
RO_FDIV_S = OP_FP | (0b0001100 << kFunct7Shift),
RO_FSQRT_S = OP_FP | (0b0101100 << kFunct7Shift) | (0b00000 << kRs2Shift),
RO_FSGNJ_S = OP_FP | (0b000 << kFunct3Shift) | (0b0010000 << kFunct7Shift),
RO_FSGNJN_S = OP_FP | (0b001 << kFunct3Shift) | (0b0010000 << kFunct7Shift),
RO_FSQNJX_S = OP_FP | (0b010 << kFunct3Shift) | (0b0010000 << kFunct7Shift),
RO_FMIN_S = OP_FP | (0b000 << kFunct3Shift) | (0b0010100 << kFunct7Shift),
RO_FMAX_S = OP_FP | (0b001 << kFunct3Shift) | (0b0010100 << kFunct7Shift),
RO_FCVT_W_S = OP_FP | (0b1100000 << kFunct7Shift) | (0b00000 << kRs2Shift),
RO_FCVT_WU_S = OP_FP | (0b1100000 << kFunct7Shift) | (0b00001 << kRs2Shift),
RO_FMV = OP_FP | (0b1110000 << kFunct7Shift) | (0b000 << kFunct3Shift) |
(0b00000 << kRs2Shift),
RO_FEQ_S = OP_FP | (0b010 << kFunct3Shift) | (0b1010000 << kFunct7Shift),
RO_FLT_S = OP_FP | (0b001 << kFunct3Shift) | (0b1010000 << kFunct7Shift),
RO_FLE_S = OP_FP | (0b000 << kFunct3Shift) | (0b1010000 << kFunct7Shift),
RO_FCLASS_S = OP_FP | (0b001 << kFunct3Shift) | (0b1110000 << kFunct7Shift),
RO_FCVT_S_W = OP_FP | (0b1101000 << kFunct7Shift) | (0b00000 << kRs2Shift),
RO_FCVT_S_WU = OP_FP | (0b1101000 << kFunct7Shift) | (0b00001 << kRs2Shift),
RO_FMV_W_X = OP_FP | (0b000 << kFunct3Shift) | (0b1111000 << kFunct7Shift),
#ifdef V8_TARGET_ARCH_RISCV64
// RV64F Standard Extension (in addition to RV32F)
RO_FCVT_L_S = OP_FP | (0b1100000 << kFunct7Shift) | (0b00010 << kRs2Shift),
RO_FCVT_LU_S = OP_FP | (0b1100000 << kFunct7Shift) | (0b00011 << kRs2Shift),
RO_FCVT_S_L = OP_FP | (0b1101000 << kFunct7Shift) | (0b00010 << kRs2Shift),
RO_FCVT_S_LU = OP_FP | (0b1101000 << kFunct7Shift) | (0b00011 << kRs2Shift),
#endif // V8_TARGET_ARCH_RISCV64
};
} // namespace internal
} // namespace v8
#endif // V8_CODEGEN_RISCV_CONSTANT_RISCV_F_H_
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_CODEGEN_RISCV_CONSTANT_RISCV_I_H_
#define V8_CODEGEN_RISCV_CONSTANT_RISCV_I_H_
#include "src/codegen/riscv/base-constants-riscv.h"
namespace v8 {
namespace internal {
enum OpcodeRISCV32I : uint32_t {
// Note use RO (RiscV Opcode) prefix
// RV32I Base Instruction Set
RO_LUI = LUI,
RO_AUIPC = AUIPC,
RO_JAL = JAL,
RO_JALR = JALR | (0b000 << kFunct3Shift),
RO_BEQ = BRANCH | (0b000 << kFunct3Shift),
RO_BNE = BRANCH | (0b001 << kFunct3Shift),
RO_BLT = BRANCH | (0b100 << kFunct3Shift),
RO_BGE = BRANCH | (0b101 << kFunct3Shift),
RO_BLTU = BRANCH | (0b110 << kFunct3Shift),
RO_BGEU = BRANCH | (0b111 << kFunct3Shift),
RO_LB = LOAD | (0b000 << kFunct3Shift),
RO_LH = LOAD | (0b001 << kFunct3Shift),
RO_LW = LOAD | (0b010 << kFunct3Shift),
RO_LBU = LOAD | (0b100 << kFunct3Shift),
RO_LHU = LOAD | (0b101 << kFunct3Shift),
RO_SB = STORE | (0b000 << kFunct3Shift),
RO_SH = STORE | (0b001 << kFunct3Shift),
RO_SW = STORE | (0b010 << kFunct3Shift),
RO_ADDI = OP_IMM | (0b000 << kFunct3Shift),
RO_SLTI = OP_IMM | (0b010 << kFunct3Shift),
RO_SLTIU = OP_IMM | (0b011 << kFunct3Shift),
RO_XORI = OP_IMM | (0b100 << kFunct3Shift),
RO_ORI = OP_IMM | (0b110 << kFunct3Shift),
RO_ANDI = OP_IMM | (0b111 << kFunct3Shift),
RO_SLLI = OP_IMM | (0b001 << kFunct3Shift),
RO_SRLI = OP_IMM | (0b101 << kFunct3Shift),
// RO_SRAI = OP_IMM | (0b101 << kFunct3Shift), // Same as SRLI, use func7
RO_ADD = OP | (0b000 << kFunct3Shift) | (0b0000000 << kFunct7Shift),
RO_SUB = OP | (0b000 << kFunct3Shift) | (0b0100000 << kFunct7Shift),
RO_SLL = OP | (0b001 << kFunct3Shift) | (0b0000000 << kFunct7Shift),
RO_SLT = OP | (0b010 << kFunct3Shift) | (0b0000000 << kFunct7Shift),
RO_SLTU = OP | (0b011 << kFunct3Shift) | (0b0000000 << kFunct7Shift),
RO_XOR = OP | (0b100 << kFunct3Shift) | (0b0000000 << kFunct7Shift),
RO_SRL = OP | (0b101 << kFunct3Shift) | (0b0000000 << kFunct7Shift),
RO_SRA = OP | (0b101 << kFunct3Shift) | (0b0100000 << kFunct7Shift),
RO_OR = OP | (0b110 << kFunct3Shift) | (0b0000000 << kFunct7Shift),
RO_AND = OP | (0b111 << kFunct3Shift) | (0b0000000 << kFunct7Shift),
RO_FENCE = MISC_MEM | (0b000 << kFunct3Shift),
RO_ECALL = SYSTEM | (0b000 << kFunct3Shift),
// RO_EBREAK = SYSTEM | (0b000 << kFunct3Shift), // Same as ECALL, use imm12
#if V8_TARGET_ARCH_RISCV64
// RV64I Base Instruction Set (in addition to RV32I)
RO_LWU = LOAD | (0b110 << kFunct3Shift),
RO_LD = LOAD | (0b011 << kFunct3Shift),
RO_SD = STORE | (0b011 << kFunct3Shift),
RO_ADDIW = OP_IMM_32 | (0b000 << kFunct3Shift),
RO_SLLIW = OP_IMM_32 | (0b001 << kFunct3Shift),
RO_SRLIW = OP_IMM_32 | (0b101 << kFunct3Shift),
// RO_SRAIW = OP_IMM_32 | (0b101 << kFunct3Shift), // Same as SRLIW, use func7
RO_ADDW = OP_32 | (0b000 << kFunct3Shift) | (0b0000000 << kFunct7Shift),
RO_SUBW = OP_32 | (0b000 << kFunct3Shift) | (0b0100000 << kFunct7Shift),
RO_SLLW = OP_32 | (0b001 << kFunct3Shift) | (0b0000000 << kFunct7Shift),
RO_SRLW = OP_32 | (0b101 << kFunct3Shift) | (0b0000000 << kFunct7Shift),
RO_SRAW = OP_32 | (0b101 << kFunct3Shift) | (0b0100000 << kFunct7Shift),
#endif
};
} // namespace internal
} // namespace v8
#endif // V8_CODEGEN_RISCV_CONSTANT_RISCV_I_H_
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_CODEGEN_RISCV_CONSTANT_RISCV_M_H_
#define V8_CODEGEN_RISCV_CONSTANT_RISCV_M_H_
#include "src/codegen/riscv/base-constants-riscv.h"
namespace v8 {
namespace internal {
enum OpcodeRISCVM : uint32_t {
// RV32M Standard Extension
RO_MUL = OP | (0b000 << kFunct3Shift) | (0b0000001 << kFunct7Shift),
RO_MULH = OP | (0b001 << kFunct3Shift) | (0b0000001 << kFunct7Shift),
RO_MULHSU = OP | (0b010 << kFunct3Shift) | (0b0000001 << kFunct7Shift),
RO_MULHU = OP | (0b011 << kFunct3Shift) | (0b0000001 << kFunct7Shift),
RO_DIV = OP | (0b100 << kFunct3Shift) | (0b0000001 << kFunct7Shift),
RO_DIVU = OP | (0b101 << kFunct3Shift) | (0b0000001 << kFunct7Shift),
RO_REM = OP | (0b110 << kFunct3Shift) | (0b0000001 << kFunct7Shift),
RO_REMU = OP | (0b111 << kFunct3Shift) | (0b0000001 << kFunct7Shift),
#ifdef V8_TARGET_ARCH_RISCV64
// RV64M Standard Extension (in addition to RV32M)
RO_MULW = OP_32 | (0b000 << kFunct3Shift) | (0b0000001 << kFunct7Shift),
RO_DIVW = OP_32 | (0b100 << kFunct3Shift) | (0b0000001 << kFunct7Shift),
RO_DIVUW = OP_32 | (0b101 << kFunct3Shift) | (0b0000001 << kFunct7Shift),
RO_REMW = OP_32 | (0b110 << kFunct3Shift) | (0b0000001 << kFunct7Shift),
RO_REMUW = OP_32 | (0b111 << kFunct3Shift) | (0b0000001 << kFunct7Shift),
#endif
};
} // namespace internal
} // namespace v8
#endif // V8_CODEGEN_RISCV_CONSTANT_RISCV_M_H_
This diff is collapsed.
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_CODEGEN_RISCV_CONSTANT_RISCV_ZICSR_H_
#define V8_CODEGEN_RISCV_CONSTANT_RISCV_ZICSR_H_
#include "src/codegen/riscv/base-constants-riscv.h"
namespace v8 {
namespace internal {
// RISCV CSR related bit mask and shift
const int kFcsrFlagsBits = 5;
const uint32_t kFcsrFlagsMask = (1 << kFcsrFlagsBits) - 1;
const int kFcsrFrmBits = 3;
const int kFcsrFrmShift = kFcsrFlagsBits;
const uint32_t kFcsrFrmMask = ((1 << kFcsrFrmBits) - 1) << kFcsrFrmShift;
const int kFcsrBits = kFcsrFlagsBits + kFcsrFrmBits;
const uint32_t kFcsrMask = kFcsrFlagsMask | kFcsrFrmMask;
enum OpcodeRISCVZICSR : uint32_t {
// RV32/RV64 Zicsr Standard Extension
RO_CSRRW = SYSTEM | (0b001 << kFunct3Shift),
RO_CSRRS = SYSTEM | (0b010 << kFunct3Shift),
RO_CSRRC = SYSTEM | (0b011 << kFunct3Shift),
RO_CSRRWI = SYSTEM | (0b101 << kFunct3Shift),
RO_CSRRSI = SYSTEM | (0b110 << kFunct3Shift),
RO_CSRRCI = SYSTEM | (0b111 << kFunct3Shift),
};
} // namespace internal
} // namespace v8
#endif // V8_CODEGEN_RISCV_CONSTANT_RISCV_ZICSR_H_
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_CODEGEN_RISCV_CONSTANT_RISCV_ZIFENCEI_H_
#define V8_CODEGEN_RISCV_CONSTANT_RISCV_ZIFENCEI_H_
#include "src/codegen/riscv/base-constants-riscv.h"
namespace v8 {
namespace internal {
enum OpcodeRISCVIFENCEI : uint32_t {
RO_FENCE_I = MISC_MEM | (0b001 << kFunct3Shift),
};
}
} // namespace v8
#endif // V8_CODEGEN_RISCV_CONSTANT_RISCV_ZIFENCEI_H_
// Copyright 2021 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#ifndef V8_CODEGEN_RISCV_CONSTANTS_RISCV_H_
#define V8_CODEGEN_RISCV_CONSTANTS_RISCV_H_
#include "src/codegen/riscv/base-constants-riscv.h"
#include "src/codegen/riscv/constant-riscv-a.h"
#include "src/codegen/riscv/constant-riscv-c.h"
#include "src/codegen/riscv/constant-riscv-d.h"
#include "src/codegen/riscv/constant-riscv-f.h"
#include "src/codegen/riscv/constant-riscv-i.h"
#include "src/codegen/riscv/constant-riscv-m.h"
#include "src/codegen/riscv/constant-riscv-v.h"
#include "src/codegen/riscv/constant-riscv-zicsr.h"
#include "src/codegen/riscv/constant-riscv-zifencei.h"
namespace v8 {
namespace internal {} // namespace internal
} // namespace v8
#endif // V8_CODEGEN_RISCV_CONSTANTS_RISCV_H_
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/codegen/riscv/extension-riscv-a.h"
namespace v8 {
namespace internal {
// RV32A Standard Extension
void AssemblerRISCVA::lr_w(bool aq, bool rl, Register rd, Register rs1) {
GenInstrRAtomic(0b00010, aq, rl, 0b010, rd, rs1, zero_reg);
}
void AssemblerRISCVA::sc_w(bool aq, bool rl, Register rd, Register rs1,
Register rs2) {
GenInstrRAtomic(0b00011, aq, rl, 0b010, rd, rs1, rs2);
}
void AssemblerRISCVA::amoswap_w(bool aq, bool rl, Register rd, Register rs1,
Register rs2) {
GenInstrRAtomic(0b00001, aq, rl, 0b010, rd, rs1, rs2);
}
void AssemblerRISCVA::amoadd_w(bool aq, bool rl, Register rd, Register rs1,
Register rs2) {
GenInstrRAtomic(0b00000, aq, rl, 0b010, rd, rs1, rs2);
}
void AssemblerRISCVA::amoxor_w(bool aq, bool rl, Register rd, Register rs1,
Register rs2) {
GenInstrRAtomic(0b00100, aq, rl, 0b010, rd, rs1, rs2);
}
void AssemblerRISCVA::amoand_w(bool aq, bool rl, Register rd, Register rs1,
Register rs2) {
GenInstrRAtomic(0b01100, aq, rl, 0b010, rd, rs1, rs2);
}
void AssemblerRISCVA::amoor_w(bool aq, bool rl, Register rd, Register rs1,
Register rs2) {
GenInstrRAtomic(0b01000, aq, rl, 0b010, rd, rs1, rs2);
}
void AssemblerRISCVA::amomin_w(bool aq, bool rl, Register rd, Register rs1,
Register rs2) {
GenInstrRAtomic(0b10000, aq, rl, 0b010, rd, rs1, rs2);
}
void AssemblerRISCVA::amomax_w(bool aq, bool rl, Register rd, Register rs1,
Register rs2) {
GenInstrRAtomic(0b10100, aq, rl, 0b010, rd, rs1, rs2);
}
void AssemblerRISCVA::amominu_w(bool aq, bool rl, Register rd, Register rs1,
Register rs2) {
GenInstrRAtomic(0b11000, aq, rl, 0b010, rd, rs1, rs2);
}
void AssemblerRISCVA::amomaxu_w(bool aq, bool rl, Register rd, Register rs1,
Register rs2) {
GenInstrRAtomic(0b11100, aq, rl, 0b010, rd, rs1, rs2);
}
// RV64A Standard Extension (in addition to RV32A)
#ifdef V8_TARGET_ARCH_RISCV64
void AssemblerRISCVA::lr_d(bool aq, bool rl, Register rd, Register rs1) {
GenInstrRAtomic(0b00010, aq, rl, 0b011, rd, rs1, zero_reg);
}
void AssemblerRISCVA::sc_d(bool aq, bool rl, Register rd, Register rs1,
Register rs2) {
GenInstrRAtomic(0b00011, aq, rl, 0b011, rd, rs1, rs2);
}
void AssemblerRISCVA::amoswap_d(bool aq, bool rl, Register rd, Register rs1,
Register rs2) {
GenInstrRAtomic(0b00001, aq, rl, 0b011, rd, rs1, rs2);
}
void AssemblerRISCVA::amoadd_d(bool aq, bool rl, Register rd, Register rs1,
Register rs2) {
GenInstrRAtomic(0b00000, aq, rl, 0b011, rd, rs1, rs2);
}
void AssemblerRISCVA::amoxor_d(bool aq, bool rl, Register rd, Register rs1,
Register rs2) {
GenInstrRAtomic(0b00100, aq, rl, 0b011, rd, rs1, rs2);
}
void AssemblerRISCVA::amoand_d(bool aq, bool rl, Register rd, Register rs1,
Register rs2) {
GenInstrRAtomic(0b01100, aq, rl, 0b011, rd, rs1, rs2);
}
void AssemblerRISCVA::amoor_d(bool aq, bool rl, Register rd, Register rs1,
Register rs2) {
GenInstrRAtomic(0b01000, aq, rl, 0b011, rd, rs1, rs2);
}
void AssemblerRISCVA::amomin_d(bool aq, bool rl, Register rd, Register rs1,
Register rs2) {
GenInstrRAtomic(0b10000, aq, rl, 0b011, rd, rs1, rs2);
}
void AssemblerRISCVA::amomax_d(bool aq, bool rl, Register rd, Register rs1,
Register rs2) {
GenInstrRAtomic(0b10100, aq, rl, 0b011, rd, rs1, rs2);
}
void AssemblerRISCVA::amominu_d(bool aq, bool rl, Register rd, Register rs1,
Register rs2) {
GenInstrRAtomic(0b11000, aq, rl, 0b011, rd, rs1, rs2);
}
void AssemblerRISCVA::amomaxu_d(bool aq, bool rl, Register rd, Register rs1,
Register rs2) {
GenInstrRAtomic(0b11100, aq, rl, 0b011, rd, rs1, rs2);
}
#endif
} // namespace internal
} // namespace v8
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/codegen/assembler.h"
#include "src/codegen/riscv/base-assembler-riscv.h"
#include "src/codegen/riscv/constant-riscv-a.h"
#include "src/codegen/riscv/register-riscv.h"
#ifndef V8_CODEGEN_RISCV_EXTENSION_RISCV_A_H_
#define V8_CODEGEN_RISCV_EXTENSION_RISCV_A_H_
namespace v8 {
namespace internal {
class AssemblerRISCVA : public AssemblerRiscvBase {
// RV32A Standard Extension
public:
void lr_w(bool aq, bool rl, Register rd, Register rs1);
void sc_w(bool aq, bool rl, Register rd, Register rs1, Register rs2);
void amoswap_w(bool aq, bool rl, Register rd, Register rs1, Register rs2);
void amoadd_w(bool aq, bool rl, Register rd, Register rs1, Register rs2);
void amoxor_w(bool aq, bool rl, Register rd, Register rs1, Register rs2);
void amoand_w(bool aq, bool rl, Register rd, Register rs1, Register rs2);
void amoor_w(bool aq, bool rl, Register rd, Register rs1, Register rs2);
void amomin_w(bool aq, bool rl, Register rd, Register rs1, Register rs2);
void amomax_w(bool aq, bool rl, Register rd, Register rs1, Register rs2);
void amominu_w(bool aq, bool rl, Register rd, Register rs1, Register rs2);
void amomaxu_w(bool aq, bool rl, Register rd, Register rs1, Register rs2);
#ifdef V8_TARGET_ARCH_RISCV64
// RV64A Standard Extension (in addition to RV32A)
void lr_d(bool aq, bool rl, Register rd, Register rs1);
void sc_d(bool aq, bool rl, Register rd, Register rs1, Register rs2);
void amoswap_d(bool aq, bool rl, Register rd, Register rs1, Register rs2);
void amoadd_d(bool aq, bool rl, Register rd, Register rs1, Register rs2);
void amoxor_d(bool aq, bool rl, Register rd, Register rs1, Register rs2);
void amoand_d(bool aq, bool rl, Register rd, Register rs1, Register rs2);
void amoor_d(bool aq, bool rl, Register rd, Register rs1, Register rs2);
void amomin_d(bool aq, bool rl, Register rd, Register rs1, Register rs2);
void amomax_d(bool aq, bool rl, Register rd, Register rs1, Register rs2);
void amominu_d(bool aq, bool rl, Register rd, Register rs1, Register rs2);
void amomaxu_d(bool aq, bool rl, Register rd, Register rs1, Register rs2);
#endif
};
} // namespace internal
} // namespace v8
#endif // V8_CODEGEN_RISCV_EXTENSION_RISCV_A_H_
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/codegen/riscv/extension-riscv-c.h"
namespace v8 {
namespace internal {
// RV64C Standard Extension
void AssemblerRISCVC::c_nop() { GenInstrCI(0b000, C1, zero_reg, 0); }
void AssemblerRISCVC::c_addi(Register rd, int8_t imm6) {
DCHECK(rd != zero_reg && imm6 != 0);
GenInstrCI(0b000, C1, rd, imm6);
}
#ifdef V8_TARGET_ARCH_RISCV64
void AssemblerRISCVC::c_addiw(Register rd, int8_t imm6) {
DCHECK(rd != zero_reg);
GenInstrCI(0b001, C1, rd, imm6);
}
#endif
void AssemblerRISCVC::c_addi16sp(int16_t imm10) {
DCHECK(is_int10(imm10) && (imm10 & 0xf) == 0);
uint8_t uimm6 = ((imm10 & 0x200) >> 4) | (imm10 & 0x10) |
((imm10 & 0x40) >> 3) | ((imm10 & 0x180) >> 6) |
((imm10 & 0x20) >> 5);
GenInstrCIU(0b011, C1, sp, uimm6);
}
void AssemblerRISCVC::c_addi4spn(Register rd, int16_t uimm10) {
DCHECK(is_uint10(uimm10) && (uimm10 != 0));
uint8_t uimm8 = ((uimm10 & 0x4) >> 1) | ((uimm10 & 0x8) >> 3) |
((uimm10 & 0x30) << 2) | ((uimm10 & 0x3c0) >> 4);
GenInstrCIW(0b000, C0, rd, uimm8);
}
void AssemblerRISCVC::c_li(Register rd, int8_t imm6) {
DCHECK(rd != zero_reg);
GenInstrCI(0b010, C1, rd, imm6);
}
void AssemblerRISCVC::c_lui(Register rd, int8_t imm6) {
DCHECK(rd != zero_reg && rd != sp && imm6 != 0);
GenInstrCI(0b011, C1, rd, imm6);
}
void AssemblerRISCVC::c_slli(Register rd, uint8_t shamt6) {
DCHECK(rd != zero_reg && shamt6 != 0);
GenInstrCIU(0b000, C2, rd, shamt6);
}
void AssemblerRISCVC::c_fldsp(FPURegister rd, uint16_t uimm9) {
DCHECK(is_uint9(uimm9) && (uimm9 & 0x7) == 0);
uint8_t uimm6 = (uimm9 & 0x38) | ((uimm9 & 0x1c0) >> 6);
GenInstrCIU(0b001, C2, rd, uimm6);
}
#ifdef V8_TARGET_ARCH_RISCV64
void AssemblerRISCVC::c_ldsp(Register rd, uint16_t uimm9) {
DCHECK(rd != zero_reg && is_uint9(uimm9) && (uimm9 & 0x7) == 0);
uint8_t uimm6 = (uimm9 & 0x38) | ((uimm9 & 0x1c0) >> 6);
GenInstrCIU(0b011, C2, rd, uimm6);
}
#endif
void AssemblerRISCVC::c_lwsp(Register rd, uint16_t uimm8) {
DCHECK(rd != zero_reg && is_uint8(uimm8) && (uimm8 & 0x3) == 0);
uint8_t uimm6 = (uimm8 & 0x3c) | ((uimm8 & 0xc0) >> 6);
GenInstrCIU(0b010, C2, rd, uimm6);
}
void AssemblerRISCVC::c_jr(Register rs1) {
DCHECK(rs1 != zero_reg);
GenInstrCR(0b1000, C2, rs1, zero_reg);
BlockTrampolinePoolFor(1);
}
void AssemblerRISCVC::c_mv(Register rd, Register rs2) {
DCHECK(rd != zero_reg && rs2 != zero_reg);
GenInstrCR(0b1000, C2, rd, rs2);
}
void AssemblerRISCVC::c_ebreak() { GenInstrCR(0b1001, C2, zero_reg, zero_reg); }
void AssemblerRISCVC::c_jalr(Register rs1) {
DCHECK(rs1 != zero_reg);
GenInstrCR(0b1001, C2, rs1, zero_reg);
BlockTrampolinePoolFor(1);
}
void AssemblerRISCVC::c_add(Register rd, Register rs2) {
DCHECK(rd != zero_reg && rs2 != zero_reg);
GenInstrCR(0b1001, C2, rd, rs2);
}
// CA Instructions
void AssemblerRISCVC::c_sub(Register rd, Register rs2) {
DCHECK(((rd.code() & 0b11000) == 0b01000) &&
((rs2.code() & 0b11000) == 0b01000));
GenInstrCA(0b100011, C1, rd, 0b00, rs2);
}
void AssemblerRISCVC::c_xor(Register rd, Register rs2) {
DCHECK(((rd.code() & 0b11000) == 0b01000) &&
((rs2.code() & 0b11000) == 0b01000));
GenInstrCA(0b100011, C1, rd, 0b01, rs2);
}
void AssemblerRISCVC::c_or(Register rd, Register rs2) {
DCHECK(((rd.code() & 0b11000) == 0b01000) &&
((rs2.code() & 0b11000) == 0b01000));
GenInstrCA(0b100011, C1, rd, 0b10, rs2);
}
void AssemblerRISCVC::c_and(Register rd, Register rs2) {
DCHECK(((rd.code() & 0b11000) == 0b01000) &&
((rs2.code() & 0b11000) == 0b01000));
GenInstrCA(0b100011, C1, rd, 0b11, rs2);
}
#ifdef V8_TARGET_ARCH_RISCV64
void AssemblerRISCVC::c_subw(Register rd, Register rs2) {
DCHECK(((rd.code() & 0b11000) == 0b01000) &&
((rs2.code() & 0b11000) == 0b01000));
GenInstrCA(0b100111, C1, rd, 0b00, rs2);
}
void AssemblerRISCVC::c_addw(Register rd, Register rs2) {
DCHECK(((rd.code() & 0b11000) == 0b01000) &&
((rs2.code() & 0b11000) == 0b01000));
GenInstrCA(0b100111, C1, rd, 0b01, rs2);
}
#endif
void AssemblerRISCVC::c_swsp(Register rs2, uint16_t uimm8) {
DCHECK(is_uint8(uimm8) && (uimm8 & 0x3) == 0);
uint8_t uimm6 = (uimm8 & 0x3c) | ((uimm8 & 0xc0) >> 6);
GenInstrCSS(0b110, C2, rs2, uimm6);
}
#ifdef V8_TARGET_ARCH_RISCV64
void AssemblerRISCVC::c_sdsp(Register rs2, uint16_t uimm9) {
DCHECK(is_uint9(uimm9) && (uimm9 & 0x7) == 0);
uint8_t uimm6 = (uimm9 & 0x38) | ((uimm9 & 0x1c0) >> 6);
GenInstrCSS(0b111, C2, rs2, uimm6);
}
#endif
void AssemblerRISCVC::c_fsdsp(FPURegister rs2, uint16_t uimm9) {
DCHECK(is_uint9(uimm9) && (uimm9 & 0x7) == 0);
uint8_t uimm6 = (uimm9 & 0x38) | ((uimm9 & 0x1c0) >> 6);
GenInstrCSS(0b101, C2, rs2, uimm6);
}
// CL Instructions
void AssemblerRISCVC::c_lw(Register rd, Register rs1, uint16_t uimm7) {
DCHECK(((rd.code() & 0b11000) == 0b01000) &&
((rs1.code() & 0b11000) == 0b01000) && is_uint7(uimm7) &&
((uimm7 & 0x3) == 0));
uint8_t uimm5 =
((uimm7 & 0x4) >> 1) | ((uimm7 & 0x40) >> 6) | ((uimm7 & 0x38) >> 1);
GenInstrCL(0b010, C0, rd, rs1, uimm5);
}
#ifdef V8_TARGET_ARCH_RISCV64
void AssemblerRISCVC::c_ld(Register rd, Register rs1, uint16_t uimm8) {
DCHECK(((rd.code() & 0b11000) == 0b01000) &&
((rs1.code() & 0b11000) == 0b01000) && is_uint8(uimm8) &&
((uimm8 & 0x7) == 0));
uint8_t uimm5 = ((uimm8 & 0x38) >> 1) | ((uimm8 & 0xc0) >> 6);
GenInstrCL(0b011, C0, rd, rs1, uimm5);
}
#endif
void AssemblerRISCVC::c_fld(FPURegister rd, Register rs1, uint16_t uimm8) {
DCHECK(((rd.code() & 0b11000) == 0b01000) &&
((rs1.code() & 0b11000) == 0b01000) && is_uint8(uimm8) &&
((uimm8 & 0x7) == 0));
uint8_t uimm5 = ((uimm8 & 0x38) >> 1) | ((uimm8 & 0xc0) >> 6);
GenInstrCL(0b001, C0, rd, rs1, uimm5);
}
// CS Instructions
void AssemblerRISCVC::c_sw(Register rs2, Register rs1, uint16_t uimm7) {
DCHECK(((rs2.code() & 0b11000) == 0b01000) &&
((rs1.code() & 0b11000) == 0b01000) && is_uint7(uimm7) &&
((uimm7 & 0x3) == 0));
uint8_t uimm5 =
((uimm7 & 0x4) >> 1) | ((uimm7 & 0x40) >> 6) | ((uimm7 & 0x38) >> 1);
GenInstrCS(0b110, C0, rs2, rs1, uimm5);
}
#ifdef V8_TARGET_ARCH_RISCV64
void AssemblerRISCVC::c_sd(Register rs2, Register rs1, uint16_t uimm8) {
DCHECK(((rs2.code() & 0b11000) == 0b01000) &&
((rs1.code() & 0b11000) == 0b01000) && is_uint8(uimm8) &&
((uimm8 & 0x7) == 0));
uint8_t uimm5 = ((uimm8 & 0x38) >> 1) | ((uimm8 & 0xc0) >> 6);
GenInstrCS(0b111, C0, rs2, rs1, uimm5);
}
#endif
void AssemblerRISCVC::c_fsd(FPURegister rs2, Register rs1, uint16_t uimm8) {
DCHECK(((rs2.code() & 0b11000) == 0b01000) &&
((rs1.code() & 0b11000) == 0b01000) && is_uint8(uimm8) &&
((uimm8 & 0x7) == 0));
uint8_t uimm5 = ((uimm8 & 0x38) >> 1) | ((uimm8 & 0xc0) >> 6);
GenInstrCS(0b101, C0, rs2, rs1, uimm5);
}
// CJ Instructions
void AssemblerRISCVC::c_j(int16_t imm12) {
DCHECK(is_int12(imm12));
int16_t uimm11 = ((imm12 & 0x800) >> 1) | ((imm12 & 0x400) >> 4) |
((imm12 & 0x300) >> 1) | ((imm12 & 0x80) >> 3) |
((imm12 & 0x40) >> 1) | ((imm12 & 0x20) >> 5) |
((imm12 & 0x10) << 5) | (imm12 & 0xe);
GenInstrCJ(0b101, C1, uimm11);
BlockTrampolinePoolFor(1);
}
// CB Instructions
void AssemblerRISCVC::c_bnez(Register rs1, int16_t imm9) {
DCHECK(((rs1.code() & 0b11000) == 0b01000) && is_int9(imm9));
uint8_t uimm8 = ((imm9 & 0x20) >> 5) | ((imm9 & 0x6)) | ((imm9 & 0xc0) >> 3) |
((imm9 & 0x18) << 2) | ((imm9 & 0x100) >> 1);
GenInstrCB(0b111, C1, rs1, uimm8);
}
void AssemblerRISCVC::c_beqz(Register rs1, int16_t imm9) {
DCHECK(((rs1.code() & 0b11000) == 0b01000) && is_int9(imm9));
uint8_t uimm8 = ((imm9 & 0x20) >> 5) | ((imm9 & 0x6)) | ((imm9 & 0xc0) >> 3) |
((imm9 & 0x18) << 2) | ((imm9 & 0x100) >> 1);
GenInstrCB(0b110, C1, rs1, uimm8);
}
void AssemblerRISCVC::c_srli(Register rs1, int8_t shamt6) {
DCHECK(((rs1.code() & 0b11000) == 0b01000) && is_int6(shamt6));
GenInstrCBA(0b100, 0b00, C1, rs1, shamt6);
}
void AssemblerRISCVC::c_srai(Register rs1, int8_t shamt6) {
DCHECK(((rs1.code() & 0b11000) == 0b01000) && is_int6(shamt6));
GenInstrCBA(0b100, 0b01, C1, rs1, shamt6);
}
void AssemblerRISCVC::c_andi(Register rs1, int8_t imm6) {
DCHECK(((rs1.code() & 0b11000) == 0b01000) && is_int6(imm6));
GenInstrCBA(0b100, 0b10, C1, rs1, imm6);
}
bool AssemblerRISCVC::IsCJal(Instr instr) {
return (instr & kRvcOpcodeMask) == RO_C_J;
}
bool AssemblerRISCVC::IsCBranch(Instr instr) {
int Op = instr & kRvcOpcodeMask;
return Op == RO_C_BNEZ || Op == RO_C_BEQZ;
}
int AssemblerRISCVC::CJumpOffset(Instr instr) {
int32_t imm12 = ((instr & 0x4) << 3) | ((instr & 0x38) >> 2) |
((instr & 0x40) << 1) | ((instr & 0x80) >> 1) |
((instr & 0x100) << 2) | ((instr & 0x600) >> 1) |
((instr & 0x800) >> 7) | ((instr & 0x1000) >> 1);
imm12 = imm12 << 20 >> 20;
return imm12;
}
} // namespace internal
} // namespace v8
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/codegen/assembler.h"
#include "src/codegen/riscv/base-assembler-riscv.h"
#include "src/codegen/riscv/constant-riscv-c.h"
#include "src/codegen/riscv/register-riscv.h"
#ifndef V8_CODEGEN_RISCV_EXTENSION_RISCV_C_H_
#define V8_CODEGEN_RISCV_EXTENSION_RISCV_C_H_
namespace v8 {
namespace internal {
class AssemblerRISCVC : public AssemblerRiscvBase {
// RV64C Standard Extension
public:
void c_nop();
void c_addi(Register rd, int8_t imm6);
void c_addi16sp(int16_t imm10);
void c_addi4spn(Register rd, int16_t uimm10);
void c_li(Register rd, int8_t imm6);
void c_lui(Register rd, int8_t imm6);
void c_slli(Register rd, uint8_t shamt6);
void c_lwsp(Register rd, uint16_t uimm8);
void c_jr(Register rs1);
void c_mv(Register rd, Register rs2);
void c_ebreak();
void c_jalr(Register rs1);
void c_j(int16_t imm12);
void c_add(Register rd, Register rs2);
void c_sub(Register rd, Register rs2);
void c_and(Register rd, Register rs2);
void c_xor(Register rd, Register rs2);
void c_or(Register rd, Register rs2);
void c_swsp(Register rs2, uint16_t uimm8);
void c_lw(Register rd, Register rs1, uint16_t uimm7);
void c_sw(Register rs2, Register rs1, uint16_t uimm7);
void c_bnez(Register rs1, int16_t imm9);
void c_beqz(Register rs1, int16_t imm9);
void c_srli(Register rs1, int8_t shamt6);
void c_srai(Register rs1, int8_t shamt6);
void c_andi(Register rs1, int8_t imm6);
void c_fld(FPURegister rd, Register rs1, uint16_t uimm8);
void c_fsd(FPURegister rs2, Register rs1, uint16_t uimm8);
void c_fldsp(FPURegister rd, uint16_t uimm9);
void c_fsdsp(FPURegister rs2, uint16_t uimm9);
#ifdef V8_TARGET_ARCH_RISCV64
void c_ld(Register rd, Register rs1, uint16_t uimm8);
void c_sd(Register rs2, Register rs1, uint16_t uimm8);
void c_subw(Register rd, Register rs2);
void c_addw(Register rd, Register rs2);
void c_addiw(Register rd, int8_t imm6);
void c_ldsp(Register rd, uint16_t uimm9);
void c_sdsp(Register rs2, uint16_t uimm9);
#endif
int CJumpOffset(Instr instr);
static bool IsCBranch(Instr instr);
static bool IsCJal(Instr instr);
inline int16_t cjump_offset(Label* L) {
return (int16_t)branch_offset_helper(L, OffsetSize::kOffset11);
}
inline int32_t cbranch_offset(Label* L) {
return branch_offset_helper(L, OffsetSize::kOffset9);
}
void c_j(Label* L) { c_j(cjump_offset(L)); }
void c_bnez(Register rs1, Label* L) { c_bnez(rs1, cbranch_offset(L)); }
void c_beqz(Register rs1, Label* L) { c_beqz(rs1, cbranch_offset(L)); }
};
} // namespace internal
} // namespace v8
#endif // V8_CODEGEN_RISCV_EXTENSION_RISCV_C_H_
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/codegen/riscv/extension-riscv-d.h"
namespace v8 {
namespace internal {
// RV32D Standard Extension
void AssemblerRISCVD::fld(FPURegister rd, Register rs1, int16_t imm12) {
GenInstrLoadFP_ri(0b011, rd, rs1, imm12);
}
void AssemblerRISCVD::fsd(FPURegister source, Register base, int16_t imm12) {
GenInstrStoreFP_rri(0b011, base, source, imm12);
}
void AssemblerRISCVD::fmadd_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
FPURegister rs3, FPURoundingMode frm) {
GenInstrR4(0b01, MADD, rd, rs1, rs2, rs3, frm);
}
void AssemblerRISCVD::fmsub_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
FPURegister rs3, FPURoundingMode frm) {
GenInstrR4(0b01, MSUB, rd, rs1, rs2, rs3, frm);
}
void AssemblerRISCVD::fnmsub_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
FPURegister rs3, FPURoundingMode frm) {
GenInstrR4(0b01, NMSUB, rd, rs1, rs2, rs3, frm);
}
void AssemblerRISCVD::fnmadd_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
FPURegister rs3, FPURoundingMode frm) {
GenInstrR4(0b01, NMADD, rd, rs1, rs2, rs3, frm);
}
void AssemblerRISCVD::fadd_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
FPURoundingMode frm) {
GenInstrALUFP_rr(0b0000001, frm, rd, rs1, rs2);
}
void AssemblerRISCVD::fsub_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
FPURoundingMode frm) {
GenInstrALUFP_rr(0b0000101, frm, rd, rs1, rs2);
}
void AssemblerRISCVD::fmul_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
FPURoundingMode frm) {
GenInstrALUFP_rr(0b0001001, frm, rd, rs1, rs2);
}
void AssemblerRISCVD::fdiv_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
FPURoundingMode frm) {
GenInstrALUFP_rr(0b0001101, frm, rd, rs1, rs2);
}
void AssemblerRISCVD::fsqrt_d(FPURegister rd, FPURegister rs1,
FPURoundingMode frm) {
GenInstrALUFP_rr(0b0101101, frm, rd, rs1, zero_reg);
}
void AssemblerRISCVD::fsgnj_d(FPURegister rd, FPURegister rs1,
FPURegister rs2) {
GenInstrALUFP_rr(0b0010001, 0b000, rd, rs1, rs2);
}
void AssemblerRISCVD::fsgnjn_d(FPURegister rd, FPURegister rs1,
FPURegister rs2) {
GenInstrALUFP_rr(0b0010001, 0b001, rd, rs1, rs2);
}
void AssemblerRISCVD::fsgnjx_d(FPURegister rd, FPURegister rs1,
FPURegister rs2) {
GenInstrALUFP_rr(0b0010001, 0b010, rd, rs1, rs2);
}
void AssemblerRISCVD::fmin_d(FPURegister rd, FPURegister rs1, FPURegister rs2) {
GenInstrALUFP_rr(0b0010101, 0b000, rd, rs1, rs2);
}
void AssemblerRISCVD::fmax_d(FPURegister rd, FPURegister rs1, FPURegister rs2) {
GenInstrALUFP_rr(0b0010101, 0b001, rd, rs1, rs2);
}
void AssemblerRISCVD::fcvt_s_d(FPURegister rd, FPURegister rs1,
FPURoundingMode frm) {
GenInstrALUFP_rr(0b0100000, frm, rd, rs1, ToRegister(1));
}
void AssemblerRISCVD::fcvt_d_s(FPURegister rd, FPURegister rs1,
FPURoundingMode frm) {
GenInstrALUFP_rr(0b0100001, frm, rd, rs1, zero_reg);
}
void AssemblerRISCVD::feq_d(Register rd, FPURegister rs1, FPURegister rs2) {
GenInstrALUFP_rr(0b1010001, 0b010, rd, rs1, rs2);
}
void AssemblerRISCVD::flt_d(Register rd, FPURegister rs1, FPURegister rs2) {
GenInstrALUFP_rr(0b1010001, 0b001, rd, rs1, rs2);
}
void AssemblerRISCVD::fle_d(Register rd, FPURegister rs1, FPURegister rs2) {
GenInstrALUFP_rr(0b1010001, 0b000, rd, rs1, rs2);
}
void AssemblerRISCVD::fclass_d(Register rd, FPURegister rs1) {
GenInstrALUFP_rr(0b1110001, 0b001, rd, rs1, zero_reg);
}
void AssemblerRISCVD::fcvt_w_d(Register rd, FPURegister rs1,
FPURoundingMode frm) {
GenInstrALUFP_rr(0b1100001, frm, rd, rs1, zero_reg);
}
void AssemblerRISCVD::fcvt_wu_d(Register rd, FPURegister rs1,
FPURoundingMode frm) {
GenInstrALUFP_rr(0b1100001, frm, rd, rs1, ToRegister(1));
}
void AssemblerRISCVD::fcvt_d_w(FPURegister rd, Register rs1,
FPURoundingMode frm) {
GenInstrALUFP_rr(0b1101001, frm, rd, rs1, zero_reg);
}
void AssemblerRISCVD::fcvt_d_wu(FPURegister rd, Register rs1,
FPURoundingMode frm) {
GenInstrALUFP_rr(0b1101001, frm, rd, rs1, ToRegister(1));
}
#ifdef V8_TARGET_ARCH_RISCV64
// RV64D Standard Extension (in addition to RV32D)
void AssemblerRISCVD::fcvt_l_d(Register rd, FPURegister rs1,
FPURoundingMode frm) {
GenInstrALUFP_rr(0b1100001, frm, rd, rs1, ToRegister(2));
}
void AssemblerRISCVD::fcvt_lu_d(Register rd, FPURegister rs1,
FPURoundingMode frm) {
GenInstrALUFP_rr(0b1100001, frm, rd, rs1, ToRegister(3));
}
void AssemblerRISCVD::fmv_x_d(Register rd, FPURegister rs1) {
GenInstrALUFP_rr(0b1110001, 0b000, rd, rs1, zero_reg);
}
void AssemblerRISCVD::fcvt_d_l(FPURegister rd, Register rs1,
FPURoundingMode frm) {
GenInstrALUFP_rr(0b1101001, frm, rd, rs1, ToRegister(2));
}
void AssemblerRISCVD::fcvt_d_lu(FPURegister rd, Register rs1,
FPURoundingMode frm) {
GenInstrALUFP_rr(0b1101001, frm, rd, rs1, ToRegister(3));
}
void AssemblerRISCVD::fmv_d_x(FPURegister rd, Register rs1) {
GenInstrALUFP_rr(0b1111001, 0b000, rd, rs1, zero_reg);
}
#endif
} // namespace internal
} // namespace v8
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/codegen/assembler.h"
#include "src/codegen/riscv/base-assembler-riscv.h"
#include "src/codegen/riscv/constant-riscv-d.h"
#include "src/codegen/riscv/register-riscv.h"
#ifndef V8_CODEGEN_RISCV_EXTENSION_RISCV_D_H_
#define V8_CODEGEN_RISCV_EXTENSION_RISCV_D_H_
namespace v8 {
namespace internal {
class AssemblerRISCVD : public AssemblerRiscvBase {
// RV32D Standard Extension
public:
void fld(FPURegister rd, Register rs1, int16_t imm12);
void fsd(FPURegister source, Register base, int16_t imm12);
void fmadd_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
FPURegister rs3, FPURoundingMode frm = RNE);
void fmsub_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
FPURegister rs3, FPURoundingMode frm = RNE);
void fnmsub_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
FPURegister rs3, FPURoundingMode frm = RNE);
void fnmadd_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
FPURegister rs3, FPURoundingMode frm = RNE);
void fadd_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
FPURoundingMode frm = RNE);
void fsub_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
FPURoundingMode frm = RNE);
void fmul_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
FPURoundingMode frm = RNE);
void fdiv_d(FPURegister rd, FPURegister rs1, FPURegister rs2,
FPURoundingMode frm = RNE);
void fsqrt_d(FPURegister rd, FPURegister rs1, FPURoundingMode frm = RNE);
void fsgnj_d(FPURegister rd, FPURegister rs1, FPURegister rs2);
void fsgnjn_d(FPURegister rd, FPURegister rs1, FPURegister rs2);
void fsgnjx_d(FPURegister rd, FPURegister rs1, FPURegister rs2);
void fmin_d(FPURegister rd, FPURegister rs1, FPURegister rs2);
void fmax_d(FPURegister rd, FPURegister rs1, FPURegister rs2);
void fcvt_s_d(FPURegister rd, FPURegister rs1, FPURoundingMode frm = RNE);
void fcvt_d_s(FPURegister rd, FPURegister rs1, FPURoundingMode frm = RNE);
void feq_d(Register rd, FPURegister rs1, FPURegister rs2);
void flt_d(Register rd, FPURegister rs1, FPURegister rs2);
void fle_d(Register rd, FPURegister rs1, FPURegister rs2);
void fclass_d(Register rd, FPURegister rs1);
void fcvt_w_d(Register rd, FPURegister rs1, FPURoundingMode frm = RNE);
void fcvt_wu_d(Register rd, FPURegister rs1, FPURoundingMode frm = RNE);
void fcvt_d_w(FPURegister rd, Register rs1, FPURoundingMode frm = RNE);
void fcvt_d_wu(FPURegister rd, Register rs1, FPURoundingMode frm = RNE);
#ifdef V8_TARGET_ARCH_RISCV64
// RV64D Standard Extension (in addition to RV32D)
void fcvt_l_d(Register rd, FPURegister rs1, FPURoundingMode frm = RNE);
void fcvt_lu_d(Register rd, FPURegister rs1, FPURoundingMode frm = RNE);
void fmv_x_d(Register rd, FPURegister rs1);
void fcvt_d_l(FPURegister rd, Register rs1, FPURoundingMode frm = RNE);
void fcvt_d_lu(FPURegister rd, Register rs1, FPURoundingMode frm = RNE);
void fmv_d_x(FPURegister rd, Register rs1);
#endif
void fmv_d(FPURegister rd, FPURegister rs) { fsgnj_d(rd, rs, rs); }
void fabs_d(FPURegister rd, FPURegister rs) { fsgnjx_d(rd, rs, rs); }
void fneg_d(FPURegister rd, FPURegister rs) { fsgnjn_d(rd, rs, rs); }
};
} // namespace internal
} // namespace v8
#endif // V8_CODEGEN_RISCV_EXTENSION_RISCV_D_H_
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
// Copyright 2022 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
#include "src/codegen/riscv/extension-riscv-zifencei.h"
#include "src/codegen/riscv/base-assembler-riscv.h"
#include "src/codegen/riscv/constant-riscv-zifencei.h"
namespace v8 {
namespace internal {
void AssemblerRISCVZifencei::fence_i() {
GenInstrI(0b001, MISC_MEM, ToRegister(0), ToRegister(0), 0);
}
} // namespace internal
} // namespace v8
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment