Commit 818d73ca authored by Lu Yahan's avatar Lu Yahan Committed by V8 LUCI CQ

[riscv64][register-alloc] Implement vector register independently allocating

vector register has different register file from float register in Risc64 rvv extension.
So this cl add third FPalising kind INDEPENDENT to allocate independently simd register.

Bug: v8:11976

doc: https://docs.google.com/document/d/1UwmUwOI3eeIMYzZFRmeXmfyNXRFHNZAQ4BcN0ODdMmo/edit?usp=sharing

Change-Id: I0fb8901294b4bc44b0bee55e630b60460e42bef2
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3383513Reviewed-by: 's avatarNico Hartmann <nicohartmann@chromium.org>
Reviewed-by: 's avatarClemens Backes <clemensb@chromium.org>
Auto-Submit: Yahan Lu <yahan@iscas.ac.cn>
Commit-Queue: Yahan Lu <yahan@iscas.ac.cn>
Cr-Commit-Position: refs/heads/main@{#79449}
parent 30974f92
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#define V8_CODEGEN_ARM_REGISTER_ARM_H_ #define V8_CODEGEN_ARM_REGISTER_ARM_H_
#include "src/codegen/register-base.h" #include "src/codegen/register-base.h"
#include "src/codegen/register-configuration.h"
#include "src/codegen/reglist.h" #include "src/codegen/reglist.h"
namespace v8 { namespace v8 {
...@@ -125,7 +126,7 @@ constexpr int ArgumentPaddingSlots(int argument_count) { ...@@ -125,7 +126,7 @@ constexpr int ArgumentPaddingSlots(int argument_count) {
return 0; return 0;
} }
constexpr bool kSimpleFPAliasing = false; constexpr AliasingKind kFPAliasing = AliasingKind::kCombine;
constexpr bool kSimdMaskRegisters = false; constexpr bool kSimdMaskRegisters = false;
enum SwVfpRegisterCode { enum SwVfpRegisterCode {
......
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#include "src/codegen/arm64/utils-arm64.h" #include "src/codegen/arm64/utils-arm64.h"
#include "src/codegen/register-base.h" #include "src/codegen/register-base.h"
#include "src/codegen/register-configuration.h"
#include "src/codegen/reglist.h" #include "src/codegen/reglist.h"
#include "src/common/globals.h" #include "src/common/globals.h"
...@@ -276,7 +277,7 @@ constexpr int ArgumentPaddingSlots(int argument_count) { ...@@ -276,7 +277,7 @@ constexpr int ArgumentPaddingSlots(int argument_count) {
return argument_count & alignment_mask; return argument_count & alignment_mask;
} }
constexpr bool kSimpleFPAliasing = true; constexpr AliasingKind kFPAliasing = AliasingKind::kOverlap;
constexpr bool kSimdMaskRegisters = false; constexpr bool kSimdMaskRegisters = false;
enum DoubleRegisterCode { enum DoubleRegisterCode {
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#define V8_CODEGEN_IA32_REGISTER_IA32_H_ #define V8_CODEGEN_IA32_REGISTER_IA32_H_
#include "src/codegen/register-base.h" #include "src/codegen/register-base.h"
#include "src/codegen/register-configuration.h"
#include "src/codegen/reglist.h" #include "src/codegen/reglist.h"
namespace v8 { namespace v8 {
...@@ -82,7 +83,7 @@ constexpr int ArgumentPaddingSlots(int argument_count) { ...@@ -82,7 +83,7 @@ constexpr int ArgumentPaddingSlots(int argument_count) {
return 0; return 0;
} }
constexpr bool kSimpleFPAliasing = true; constexpr AliasingKind kFPAliasing = AliasingKind::kOverlap;
constexpr bool kSimdMaskRegisters = false; constexpr bool kSimdMaskRegisters = false;
enum DoubleCode { enum DoubleCode {
......
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#include "src/codegen/loong64/constants-loong64.h" #include "src/codegen/loong64/constants-loong64.h"
#include "src/codegen/register-base.h" #include "src/codegen/register-base.h"
#include "src/codegen/register-configuration.h"
#include "src/codegen/reglist.h" #include "src/codegen/reglist.h"
namespace v8 { namespace v8 {
...@@ -177,7 +178,7 @@ constexpr int ArgumentPaddingSlots(int argument_count) { ...@@ -177,7 +178,7 @@ constexpr int ArgumentPaddingSlots(int argument_count) {
return 0; return 0;
} }
constexpr bool kSimpleFPAliasing = true; constexpr AliasingKind kFPAliasing = AliasingKind::kOverlap;
constexpr bool kSimdMaskRegisters = false; constexpr bool kSimdMaskRegisters = false;
enum DoubleRegisterCode { enum DoubleRegisterCode {
......
...@@ -340,6 +340,10 @@ inline bool IsFloatingPoint(MachineRepresentation rep) { ...@@ -340,6 +340,10 @@ inline bool IsFloatingPoint(MachineRepresentation rep) {
return rep >= MachineRepresentation::kFirstFPRepresentation; return rep >= MachineRepresentation::kFirstFPRepresentation;
} }
inline bool IsSimd128(MachineRepresentation rep) {
return rep == MachineRepresentation::kSimd128;
}
inline bool CanBeTaggedPointer(MachineRepresentation rep) { inline bool CanBeTaggedPointer(MachineRepresentation rep) {
return rep == MachineRepresentation::kTagged || return rep == MachineRepresentation::kTagged ||
rep == MachineRepresentation::kTaggedPointer || rep == MachineRepresentation::kTaggedPointer ||
......
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#include "src/codegen/mips/constants-mips.h" #include "src/codegen/mips/constants-mips.h"
#include "src/codegen/register-base.h" #include "src/codegen/register-base.h"
#include "src/codegen/register-configuration.h"
#include "src/codegen/reglist.h" #include "src/codegen/reglist.h"
namespace v8 { namespace v8 {
...@@ -209,7 +210,7 @@ constexpr int ArgumentPaddingSlots(int argument_count) { ...@@ -209,7 +210,7 @@ constexpr int ArgumentPaddingSlots(int argument_count) {
return 0; return 0;
} }
constexpr bool kSimpleFPAliasing = true; constexpr AliasingKind kFPAliasing = AliasingKind::kOverlap;
constexpr bool kSimdMaskRegisters = false; constexpr bool kSimdMaskRegisters = false;
enum DoubleRegisterCode { enum DoubleRegisterCode {
......
...@@ -7,6 +7,7 @@ ...@@ -7,6 +7,7 @@
#include "src/codegen/mips64/constants-mips64.h" #include "src/codegen/mips64/constants-mips64.h"
#include "src/codegen/register-base.h" #include "src/codegen/register-base.h"
#include "src/codegen/register-configuration.h"
#include "src/codegen/reglist.h" #include "src/codegen/reglist.h"
namespace v8 { namespace v8 {
...@@ -209,7 +210,7 @@ constexpr int ArgumentPaddingSlots(int argument_count) { ...@@ -209,7 +210,7 @@ constexpr int ArgumentPaddingSlots(int argument_count) {
return 0; return 0;
} }
constexpr bool kSimpleFPAliasing = true; constexpr AliasingKind kFPAliasing = AliasingKind::kOverlap;
constexpr bool kSimdMaskRegisters = false; constexpr bool kSimdMaskRegisters = false;
enum MSARegisterCode { enum MSARegisterCode {
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#define V8_CODEGEN_PPC_REGISTER_PPC_H_ #define V8_CODEGEN_PPC_REGISTER_PPC_H_
#include "src/codegen/register-base.h" #include "src/codegen/register-base.h"
#include "src/codegen/register-configuration.h"
#include "src/codegen/reglist.h" #include "src/codegen/reglist.h"
namespace v8 { namespace v8 {
...@@ -219,7 +220,7 @@ constexpr int ArgumentPaddingSlots(int argument_count) { ...@@ -219,7 +220,7 @@ constexpr int ArgumentPaddingSlots(int argument_count) {
return 0; return 0;
} }
constexpr bool kSimpleFPAliasing = true; constexpr AliasingKind kFPAliasing = AliasingKind::kOverlap;
constexpr bool kSimdMaskRegisters = false; constexpr bool kSimdMaskRegisters = false;
enum DoubleRegisterCode { enum DoubleRegisterCode {
......
...@@ -19,6 +19,10 @@ static const int kMaxAllocatableGeneralRegisterCount = ...@@ -19,6 +19,10 @@ static const int kMaxAllocatableGeneralRegisterCount =
ALLOCATABLE_GENERAL_REGISTERS(REGISTER_COUNT) 0; ALLOCATABLE_GENERAL_REGISTERS(REGISTER_COUNT) 0;
static const int kMaxAllocatableDoubleRegisterCount = static const int kMaxAllocatableDoubleRegisterCount =
ALLOCATABLE_DOUBLE_REGISTERS(REGISTER_COUNT) 0; ALLOCATABLE_DOUBLE_REGISTERS(REGISTER_COUNT) 0;
#if V8_TARGET_ARCH_RISCV64
static const int kMaxAllocatableSIMD128RegisterCount =
ALLOCATABLE_SIMD128_REGISTERS(REGISTER_COUNT) 0;
#endif
static const int kAllocatableGeneralCodes[] = { static const int kAllocatableGeneralCodes[] = {
#define REGISTER_CODE(R) kRegCode_##R, #define REGISTER_CODE(R) kRegCode_##R,
...@@ -34,6 +38,13 @@ static const int kAllocatableNoVFP32DoubleCodes[] = { ...@@ -34,6 +38,13 @@ static const int kAllocatableNoVFP32DoubleCodes[] = {
#endif // V8_TARGET_ARCH_ARM #endif // V8_TARGET_ARCH_ARM
#undef REGISTER_CODE #undef REGISTER_CODE
#if V8_TARGET_ARCH_RISCV64
static const int kAllocatableSIMD128Codes[] = {
#define REGISTER_CODE(R) kVRCode_##R,
ALLOCATABLE_SIMD128_REGISTERS(REGISTER_CODE)};
#undef REGISTER_CODE
#endif // V8_TARGET_ARCH_RISCV64
STATIC_ASSERT(RegisterConfiguration::kMaxGeneralRegisters >= STATIC_ASSERT(RegisterConfiguration::kMaxGeneralRegisters >=
Register::kNumRegisters); Register::kNumRegisters);
STATIC_ASSERT(RegisterConfiguration::kMaxFPRegisters >= STATIC_ASSERT(RegisterConfiguration::kMaxFPRegisters >=
...@@ -43,6 +54,15 @@ STATIC_ASSERT(RegisterConfiguration::kMaxFPRegisters >= ...@@ -43,6 +54,15 @@ STATIC_ASSERT(RegisterConfiguration::kMaxFPRegisters >=
STATIC_ASSERT(RegisterConfiguration::kMaxFPRegisters >= STATIC_ASSERT(RegisterConfiguration::kMaxFPRegisters >=
Simd128Register::kNumRegisters); Simd128Register::kNumRegisters);
static int get_num_simd128_registers() {
return
#if V8_TARGET_ARCH_RISCV64
Simd128Register::kNumRegisters;
#else
0;
#endif // V8_TARGET_ARCH_RISCV64
}
// Callers on architectures other than Arm expect this to be be constant // Callers on architectures other than Arm expect this to be be constant
// between build and runtime. Avoid adding variability on other platforms. // between build and runtime. Avoid adding variability on other platforms.
static int get_num_allocatable_double_registers() { static int get_num_allocatable_double_registers() {
...@@ -78,6 +98,15 @@ static int get_num_allocatable_double_registers() { ...@@ -78,6 +98,15 @@ static int get_num_allocatable_double_registers() {
#undef REGISTER_COUNT #undef REGISTER_COUNT
static int get_num_allocatable_simd128_registers() {
return
#if V8_TARGET_ARCH_RISCV64
kMaxAllocatableSIMD128RegisterCount;
#else
0;
#endif
}
// Callers on architectures other than Arm expect this to be be constant // Callers on architectures other than Arm expect this to be be constant
// between build and runtime. Avoid adding variability on other platforms. // between build and runtime. Avoid adding variability on other platforms.
static const int* get_allocatable_double_codes() { static const int* get_allocatable_double_codes() {
...@@ -90,16 +119,24 @@ static const int* get_allocatable_double_codes() { ...@@ -90,16 +119,24 @@ static const int* get_allocatable_double_codes() {
#endif #endif
} }
static const int* get_allocatable_simd128_codes() {
return
#if V8_TARGET_ARCH_RISCV64
kAllocatableSIMD128Codes;
#else
kAllocatableDoubleCodes;
#endif
}
class ArchDefaultRegisterConfiguration : public RegisterConfiguration { class ArchDefaultRegisterConfiguration : public RegisterConfiguration {
public: public:
ArchDefaultRegisterConfiguration() ArchDefaultRegisterConfiguration()
: RegisterConfiguration( : RegisterConfiguration(
Register::kNumRegisters, DoubleRegister::kNumRegisters, kFPAliasing, Register::kNumRegisters, DoubleRegister::kNumRegisters,
kMaxAllocatableGeneralRegisterCount, get_num_simd128_registers(), kMaxAllocatableGeneralRegisterCount,
get_num_allocatable_double_registers(), kAllocatableGeneralCodes, get_num_allocatable_double_registers(),
get_allocatable_double_codes(), get_num_allocatable_simd128_registers(), kAllocatableGeneralCodes,
kSimpleFPAliasing ? AliasingKind::OVERLAP : AliasingKind::COMBINE) { get_allocatable_double_codes(), get_allocatable_simd128_codes()) {}
}
}; };
DEFINE_LAZY_LEAKY_OBJECT_GETTER(ArchDefaultRegisterConfiguration, DEFINE_LAZY_LEAKY_OBJECT_GETTER(ArchDefaultRegisterConfiguration,
...@@ -115,12 +152,12 @@ class RestrictedRegisterConfiguration : public RegisterConfiguration { ...@@ -115,12 +152,12 @@ class RestrictedRegisterConfiguration : public RegisterConfiguration {
std::unique_ptr<int[]> allocatable_general_register_codes, std::unique_ptr<int[]> allocatable_general_register_codes,
std::unique_ptr<char const*[]> allocatable_general_register_names) std::unique_ptr<char const*[]> allocatable_general_register_names)
: RegisterConfiguration( : RegisterConfiguration(
Register::kNumRegisters, DoubleRegister::kNumRegisters, kFPAliasing, Register::kNumRegisters, DoubleRegister::kNumRegisters,
num_allocatable_general_registers, get_num_simd128_registers(), num_allocatable_general_registers,
get_num_allocatable_double_registers(), get_num_allocatable_double_registers(),
get_num_allocatable_simd128_registers(),
allocatable_general_register_codes.get(), allocatable_general_register_codes.get(),
get_allocatable_double_codes(), get_allocatable_double_codes(), get_allocatable_simd128_codes()),
kSimpleFPAliasing ? AliasingKind::OVERLAP : AliasingKind::COMBINE),
allocatable_general_register_codes_( allocatable_general_register_codes_(
std::move(allocatable_general_register_codes)), std::move(allocatable_general_register_codes)),
allocatable_general_register_names_( allocatable_general_register_names_(
...@@ -172,18 +209,20 @@ const RegisterConfiguration* RegisterConfiguration::RestrictGeneralRegisters( ...@@ -172,18 +209,20 @@ const RegisterConfiguration* RegisterConfiguration::RestrictGeneralRegisters(
} }
RegisterConfiguration::RegisterConfiguration( RegisterConfiguration::RegisterConfiguration(
int num_general_registers, int num_double_registers, AliasingKind fp_aliasing_kind, int num_general_registers,
int num_double_registers, int num_simd128_registers,
int num_allocatable_general_registers, int num_allocatable_double_registers, int num_allocatable_general_registers, int num_allocatable_double_registers,
const int* allocatable_general_codes, const int* allocatable_double_codes, int num_allocatable_simd128_registers, const int* allocatable_general_codes,
AliasingKind fp_aliasing_kind) const int* allocatable_double_codes,
const int* independent_allocatable_simd128_codes)
: num_general_registers_(num_general_registers), : num_general_registers_(num_general_registers),
num_float_registers_(0), num_float_registers_(0),
num_double_registers_(num_double_registers), num_double_registers_(num_double_registers),
num_simd128_registers_(0), num_simd128_registers_(num_simd128_registers),
num_allocatable_general_registers_(num_allocatable_general_registers), num_allocatable_general_registers_(num_allocatable_general_registers),
num_allocatable_float_registers_(0), num_allocatable_float_registers_(0),
num_allocatable_double_registers_(num_allocatable_double_registers), num_allocatable_double_registers_(num_allocatable_double_registers),
num_allocatable_simd128_registers_(0), num_allocatable_simd128_registers_(num_allocatable_simd128_registers),
allocatable_general_codes_mask_(0), allocatable_general_codes_mask_(0),
allocatable_float_codes_mask_(0), allocatable_float_codes_mask_(0),
allocatable_double_codes_mask_(0), allocatable_double_codes_mask_(0),
...@@ -201,7 +240,7 @@ RegisterConfiguration::RegisterConfiguration( ...@@ -201,7 +240,7 @@ RegisterConfiguration::RegisterConfiguration(
allocatable_double_codes_mask_ |= (1 << allocatable_double_codes_[i]); allocatable_double_codes_mask_ |= (1 << allocatable_double_codes_[i]);
} }
if (fp_aliasing_kind_ == COMBINE) { if (fp_aliasing_kind_ == AliasingKind::kCombine) {
num_float_registers_ = num_double_registers_ * 2 <= kMaxFPRegisters num_float_registers_ = num_double_registers_ * 2 <= kMaxFPRegisters
? num_double_registers_ * 2 ? num_double_registers_ * 2
: kMaxFPRegisters; : kMaxFPRegisters;
...@@ -228,8 +267,7 @@ RegisterConfiguration::RegisterConfiguration( ...@@ -228,8 +267,7 @@ RegisterConfiguration::RegisterConfiguration(
} }
last_simd128_code = next_simd128_code; last_simd128_code = next_simd128_code;
} }
} else { } else if (fp_aliasing_kind_ == AliasingKind::kOverlap) {
DCHECK(fp_aliasing_kind_ == OVERLAP);
num_float_registers_ = num_simd128_registers_ = num_double_registers_; num_float_registers_ = num_simd128_registers_ = num_double_registers_;
num_allocatable_float_registers_ = num_allocatable_simd128_registers_ = num_allocatable_float_registers_ = num_allocatable_simd128_registers_ =
num_allocatable_double_registers_; num_allocatable_double_registers_;
...@@ -239,6 +277,21 @@ RegisterConfiguration::RegisterConfiguration( ...@@ -239,6 +277,21 @@ RegisterConfiguration::RegisterConfiguration(
} }
allocatable_float_codes_mask_ = allocatable_simd128_codes_mask_ = allocatable_float_codes_mask_ = allocatable_simd128_codes_mask_ =
allocatable_double_codes_mask_; allocatable_double_codes_mask_;
} else {
DCHECK_EQ(fp_aliasing_kind_, AliasingKind::kIndependent);
DCHECK_NE(independent_allocatable_simd128_codes, nullptr);
num_float_registers_ = num_double_registers_;
num_allocatable_float_registers_ = num_allocatable_double_registers_;
for (int i = 0; i < num_allocatable_float_registers_; ++i) {
allocatable_float_codes_[i] = allocatable_double_codes_[i];
}
allocatable_float_codes_mask_ = allocatable_double_codes_mask_;
for (int i = 0; i < num_allocatable_simd128_registers; i++) {
allocatable_simd128_codes_[i] = independent_allocatable_simd128_codes[i];
}
for (int i = 0; i < num_allocatable_simd128_registers_; ++i) {
allocatable_simd128_codes_mask_ |= (1 << allocatable_simd128_codes_[i]);
}
} }
} }
...@@ -251,7 +304,7 @@ STATIC_ASSERT(static_cast<int>(MachineRepresentation::kFloat64) == ...@@ -251,7 +304,7 @@ STATIC_ASSERT(static_cast<int>(MachineRepresentation::kFloat64) ==
int RegisterConfiguration::GetAliases(MachineRepresentation rep, int index, int RegisterConfiguration::GetAliases(MachineRepresentation rep, int index,
MachineRepresentation other_rep, MachineRepresentation other_rep,
int* alias_base_index) const { int* alias_base_index) const {
DCHECK(fp_aliasing_kind_ == COMBINE); DCHECK(fp_aliasing_kind_ == AliasingKind::kCombine);
DCHECK(IsFloatingPoint(rep) && IsFloatingPoint(other_rep)); DCHECK(IsFloatingPoint(rep) && IsFloatingPoint(other_rep));
if (rep == other_rep) { if (rep == other_rep) {
*alias_base_index = index; *alias_base_index = index;
...@@ -277,7 +330,7 @@ int RegisterConfiguration::GetAliases(MachineRepresentation rep, int index, ...@@ -277,7 +330,7 @@ int RegisterConfiguration::GetAliases(MachineRepresentation rep, int index,
bool RegisterConfiguration::AreAliases(MachineRepresentation rep, int index, bool RegisterConfiguration::AreAliases(MachineRepresentation rep, int index,
MachineRepresentation other_rep, MachineRepresentation other_rep,
int other_index) const { int other_index) const {
DCHECK(fp_aliasing_kind_ == COMBINE); DCHECK(fp_aliasing_kind_ == AliasingKind::kCombine);
DCHECK(IsFloatingPoint(rep) && IsFloatingPoint(other_rep)); DCHECK(IsFloatingPoint(rep) && IsFloatingPoint(other_rep));
if (rep == other_rep) { if (rep == other_rep) {
return index == other_index; return index == other_index;
......
...@@ -16,15 +16,17 @@ namespace internal { ...@@ -16,15 +16,17 @@ namespace internal {
// An architecture independent representation of the sets of registers available // An architecture independent representation of the sets of registers available
// for instruction creation. // for instruction creation.
enum class AliasingKind {
// Registers alias a single register of every other size (e.g. Intel).
kOverlap,
// Registers alias two registers of the next smaller size (e.g. ARM).
kCombine,
// SIMD128 Registers are independent of every other size (e.g Riscv)
kIndependent
};
class V8_EXPORT_PRIVATE RegisterConfiguration { class V8_EXPORT_PRIVATE RegisterConfiguration {
public: public:
enum AliasingKind {
// Registers alias a single register of every other size (e.g. Intel).
OVERLAP,
// Registers alias two registers of the next smaller size (e.g. ARM).
COMBINE
};
// Architecture independent maxes. // Architecture independent maxes.
static constexpr int kMaxGeneralRegisters = 32; static constexpr int kMaxGeneralRegisters = 32;
static constexpr int kMaxFPRegisters = 32; static constexpr int kMaxFPRegisters = 32;
...@@ -40,12 +42,14 @@ class V8_EXPORT_PRIVATE RegisterConfiguration { ...@@ -40,12 +42,14 @@ class V8_EXPORT_PRIVATE RegisterConfiguration {
static const RegisterConfiguration* RestrictGeneralRegisters( static const RegisterConfiguration* RestrictGeneralRegisters(
RegList registers); RegList registers);
RegisterConfiguration(int num_general_registers, int num_double_registers, RegisterConfiguration(
int num_allocatable_general_registers, AliasingKind fp_aliasing_kind, int num_general_registers,
int num_allocatable_double_registers, int num_double_registers, int num_simd128_registers,
const int* allocatable_general_codes, int num_allocatable_general_registers,
const int* allocatable_double_codes, int num_allocatable_double_registers,
AliasingKind fp_aliasing_kind); int num_allocatable_simd128_registers,
const int* allocatable_general_codes, const int* allocatable_double_codes,
const int* independent_allocatable_simd128_codes = nullptr);
int num_general_registers() const { return num_general_registers_; } int num_general_registers() const { return num_general_registers_; }
int num_float_registers() const { return num_float_registers_; } int num_float_registers() const { return num_float_registers_; }
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#define V8_CODEGEN_RISCV64_REGISTER_RISCV64_H_ #define V8_CODEGEN_RISCV64_REGISTER_RISCV64_H_
#include "src/codegen/register-base.h" #include "src/codegen/register-base.h"
#include "src/codegen/register-configuration.h"
#include "src/codegen/reglist.h" #include "src/codegen/reglist.h"
#include "src/codegen/riscv64/constants-riscv64.h" #include "src/codegen/riscv64/constants-riscv64.h"
...@@ -55,10 +56,11 @@ namespace internal { ...@@ -55,10 +56,11 @@ namespace internal {
V(v16) V(v17) V(v18) V(v19) V(v20) V(v21) V(v22) V(v23) \ V(v16) V(v17) V(v18) V(v19) V(v20) V(v21) V(v22) V(v23) \
V(v24) V(v25) V(v26) V(v27) V(v28) V(v29) V(v30) V(v31) V(v24) V(v25) V(v26) V(v27) V(v28) V(v29) V(v30) V(v31)
#define UNALLOACTABLE_VECTOR_REGISTERS(V) \ #define ALLOCATABLE_SIMD128_REGISTERS(V) \
V(v9) V(v10) V(v11) V(v12) V(v13) V(v14) V(v15) \ V(v1) V(v2) V(v3) V(v4) V(v5) V(v6) V(v7) \
V(v18) V(v19) V(v20) V(v21) V(v22) V(v23) \ V(v10) V(v11) V(v12) V(v13) V(v14) V(v15) V(v16) \
V(v24) V(v25) V(v17) V(v18) V(v19) V(v20) V(v21) V(v22) V(v26) \
V(v27) V(v28) V(v29) V(v30) V(v31)
#define ALLOCATABLE_DOUBLE_REGISTERS(V) \ #define ALLOCATABLE_DOUBLE_REGISTERS(V) \
V(ft1) V(ft2) V(ft3) V(ft4) V(ft5) V(ft6) V(ft7) V(ft8) \ V(ft1) V(ft2) V(ft3) V(ft4) V(ft5) V(ft6) V(ft7) V(ft8) \
...@@ -253,7 +255,7 @@ int ToNumber(Register reg); ...@@ -253,7 +255,7 @@ int ToNumber(Register reg);
Register ToRegister(int num); Register ToRegister(int num);
constexpr bool kPadArguments = false; constexpr bool kPadArguments = false;
constexpr bool kSimpleFPAliasing = true; constexpr AliasingKind kFPAliasing = AliasingKind::kIndependent;
constexpr bool kSimdMaskRegisters = false; constexpr bool kSimdMaskRegisters = false;
enum DoubleRegisterCode { enum DoubleRegisterCode {
...@@ -299,11 +301,6 @@ class FPURegister : public RegisterBase<FPURegister, kDoubleAfterLast> { ...@@ -299,11 +301,6 @@ class FPURegister : public RegisterBase<FPURegister, kDoubleAfterLast> {
// register and floating point register are shared. // register and floating point register are shared.
VRegister toV() const { VRegister toV() const {
DCHECK(base::IsInRange(code(), 0, kVRAfterLast - 1)); DCHECK(base::IsInRange(code(), 0, kVRAfterLast - 1));
// FIXME(riscv): Because V0 is a special mask reg, so can't allocate it.
// And v8 is unallocated so we replace v0 with v8
if (code() == 0) {
return VRegister(8);
}
return VRegister(code()); return VRegister(code());
} }
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#define V8_CODEGEN_S390_REGISTER_S390_H_ #define V8_CODEGEN_S390_REGISTER_S390_H_
#include "src/codegen/register-base.h" #include "src/codegen/register-base.h"
#include "src/codegen/register-configuration.h"
#include "src/codegen/reglist.h" #include "src/codegen/reglist.h"
namespace v8 { namespace v8 {
...@@ -173,7 +174,7 @@ constexpr int ArgumentPaddingSlots(int argument_count) { ...@@ -173,7 +174,7 @@ constexpr int ArgumentPaddingSlots(int argument_count) {
return 0; return 0;
} }
constexpr bool kSimpleFPAliasing = true; constexpr AliasingKind kFPAliasing = AliasingKind::kOverlap;
constexpr bool kSimdMaskRegisters = false; constexpr bool kSimdMaskRegisters = false;
enum DoubleRegisterCode { enum DoubleRegisterCode {
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#define V8_CODEGEN_X64_REGISTER_X64_H_ #define V8_CODEGEN_X64_REGISTER_X64_H_
#include "src/codegen/register-base.h" #include "src/codegen/register-base.h"
#include "src/codegen/register-configuration.h"
#include "src/codegen/reglist.h" #include "src/codegen/reglist.h"
namespace v8 { namespace v8 {
...@@ -176,7 +177,7 @@ constexpr int ArgumentPaddingSlots(int argument_count) { ...@@ -176,7 +177,7 @@ constexpr int ArgumentPaddingSlots(int argument_count) {
return 0; return 0;
} }
constexpr bool kSimpleFPAliasing = true; constexpr AliasingKind kFPAliasing = AliasingKind::kOverlap;
constexpr bool kSimdMaskRegisters = false; constexpr bool kSimdMaskRegisters = false;
enum DoubleRegisterCode { enum DoubleRegisterCode {
......
...@@ -22,7 +22,7 @@ namespace { ...@@ -22,7 +22,7 @@ namespace {
// aliasing, and makes swaps much easier to implement. // aliasing, and makes swaps much easier to implement.
MoveOperands* Split(MoveOperands* move, MachineRepresentation smaller_rep, MoveOperands* Split(MoveOperands* move, MachineRepresentation smaller_rep,
ParallelMove* moves) { ParallelMove* moves) {
DCHECK(!kSimpleFPAliasing); DCHECK(kFPAliasing == AliasingKind::kCombine);
// Splitting is only possible when the slot size is the same as float size. // Splitting is only possible when the slot size is the same as float size.
DCHECK_EQ(kSystemPointerSize, kFloatSize); DCHECK_EQ(kSystemPointerSize, kFloatSize);
const LocationOperand& src_loc = LocationOperand::cast(move->source()); const LocationOperand& src_loc = LocationOperand::cast(move->source());
...@@ -104,7 +104,8 @@ void GapResolver::Resolve(ParallelMove* moves) { ...@@ -104,7 +104,8 @@ void GapResolver::Resolve(ParallelMove* moves) {
i++; i++;
source_kinds.Add(GetKind(move->source())); source_kinds.Add(GetKind(move->source()));
destination_kinds.Add(GetKind(move->destination())); destination_kinds.Add(GetKind(move->destination()));
if (!kSimpleFPAliasing && move->destination().IsFPRegister()) { if (kFPAliasing == AliasingKind::kCombine &&
move->destination().IsFPRegister()) {
fp_reps |= RepresentationBit( fp_reps |= RepresentationBit(
LocationOperand::cast(move->destination()).representation()); LocationOperand::cast(move->destination()).representation());
} }
...@@ -119,7 +120,7 @@ void GapResolver::Resolve(ParallelMove* moves) { ...@@ -119,7 +120,7 @@ void GapResolver::Resolve(ParallelMove* moves) {
return; return;
} }
if (!kSimpleFPAliasing) { if (kFPAliasing == AliasingKind::kCombine) {
if (fp_reps && !base::bits::IsPowerOfTwo(fp_reps)) { if (fp_reps && !base::bits::IsPowerOfTwo(fp_reps)) {
// Start with the smallest FP moves, so we never encounter smaller moves // Start with the smallest FP moves, so we never encounter smaller moves
// in the middle of a cycle of larger moves. // in the middle of a cycle of larger moves.
...@@ -166,8 +167,8 @@ void GapResolver::PerformMove(ParallelMove* moves, MoveOperands* move) { ...@@ -166,8 +167,8 @@ void GapResolver::PerformMove(ParallelMove* moves, MoveOperands* move) {
move->SetPending(); move->SetPending();
// We may need to split moves between FP locations differently. // We may need to split moves between FP locations differently.
const bool is_fp_loc_move = const bool is_fp_loc_move = kFPAliasing == AliasingKind::kCombine &&
!kSimpleFPAliasing && destination.IsFPLocationOperand(); destination.IsFPLocationOperand();
// Perform a depth-first traversal of the move graph to resolve dependencies. // Perform a depth-first traversal of the move graph to resolve dependencies.
// Any unperformed, unpending move with a source the same as this one's // Any unperformed, unpending move with a source the same as this one's
......
...@@ -81,13 +81,13 @@ FlagsCondition CommuteFlagsCondition(FlagsCondition condition) { ...@@ -81,13 +81,13 @@ FlagsCondition CommuteFlagsCondition(FlagsCondition condition) {
} }
bool InstructionOperand::InterferesWith(const InstructionOperand& other) const { bool InstructionOperand::InterferesWith(const InstructionOperand& other) const {
const bool kComplexFPAliasing = !kSimpleFPAliasing && const bool kCombineFPAliasing = kFPAliasing == AliasingKind::kCombine &&
this->IsFPLocationOperand() && this->IsFPLocationOperand() &&
other.IsFPLocationOperand(); other.IsFPLocationOperand();
const bool kComplexS128SlotAliasing = const bool kComplexS128SlotAliasing =
(this->IsSimd128StackSlot() && other.IsAnyStackSlot()) || (this->IsSimd128StackSlot() && other.IsAnyStackSlot()) ||
(other.IsSimd128StackSlot() && this->IsAnyStackSlot()); (other.IsSimd128StackSlot() && this->IsAnyStackSlot());
if (!kComplexFPAliasing && !kComplexS128SlotAliasing) { if (!kCombineFPAliasing && !kComplexS128SlotAliasing) {
return EqualsCanonicalized(other); return EqualsCanonicalized(other);
} }
const LocationOperand& loc = *LocationOperand::cast(this); const LocationOperand& loc = *LocationOperand::cast(this);
...@@ -98,7 +98,7 @@ bool InstructionOperand::InterferesWith(const InstructionOperand& other) const { ...@@ -98,7 +98,7 @@ bool InstructionOperand::InterferesWith(const InstructionOperand& other) const {
MachineRepresentation rep = loc.representation(); MachineRepresentation rep = loc.representation();
MachineRepresentation other_rep = other_loc.representation(); MachineRepresentation other_rep = other_loc.representation();
if (kComplexFPAliasing && !kComplexS128SlotAliasing) { if (kCombineFPAliasing && !kComplexS128SlotAliasing) {
if (rep == other_rep) return EqualsCanonicalized(other); if (rep == other_rep) return EqualsCanonicalized(other);
if (kind == LocationOperand::REGISTER) { if (kind == LocationOperand::REGISTER) {
// FP register-register interference. // FP register-register interference.
...@@ -126,7 +126,7 @@ bool InstructionOperand::InterferesWith(const InstructionOperand& other) const { ...@@ -126,7 +126,7 @@ bool InstructionOperand::InterferesWith(const InstructionOperand& other) const {
bool LocationOperand::IsCompatible(LocationOperand* op) { bool LocationOperand::IsCompatible(LocationOperand* op) {
if (IsRegister() || IsStackSlot()) { if (IsRegister() || IsStackSlot()) {
return op->IsRegister() || op->IsStackSlot(); return op->IsRegister() || op->IsStackSlot();
} else if (kSimpleFPAliasing) { } else if (kFPAliasing != AliasingKind::kCombine) {
// A backend may choose to generate the same instruction sequence regardless // A backend may choose to generate the same instruction sequence regardless
// of the FP representation. As a result, we can relax the compatibility and // of the FP representation. As a result, we can relax the compatibility and
// allow a Double to be moved in a Float for example. However, this is only // allow a Double to be moved in a Float for example. However, this is only
...@@ -162,8 +162,11 @@ std::ostream& operator<<(std::ostream& os, const InstructionOperand& op) { ...@@ -162,8 +162,11 @@ std::ostream& operator<<(std::ostream& os, const InstructionOperand& op) {
<< ")"; << ")";
case UnallocatedOperand::FIXED_FP_REGISTER: case UnallocatedOperand::FIXED_FP_REGISTER:
return os << "(=" return os << "(="
<< DoubleRegister::from_code( << (unalloc->IsSimd128Register()
unalloc->fixed_register_index()) ? i::RegisterName((Simd128Register::from_code(
unalloc->fixed_register_index())))
: i::RegisterName(DoubleRegister::from_code(
unalloc->fixed_register_index())))
<< ")"; << ")";
case UnallocatedOperand::MUST_HAVE_REGISTER: case UnallocatedOperand::MUST_HAVE_REGISTER:
return os << "(R)"; return os << "(R)";
...@@ -296,8 +299,8 @@ bool ParallelMove::IsRedundant() const { ...@@ -296,8 +299,8 @@ bool ParallelMove::IsRedundant() const {
void ParallelMove::PrepareInsertAfter( void ParallelMove::PrepareInsertAfter(
MoveOperands* move, ZoneVector<MoveOperands*>* to_eliminate) const { MoveOperands* move, ZoneVector<MoveOperands*>* to_eliminate) const {
bool no_aliasing = bool no_aliasing = kFPAliasing != AliasingKind::kCombine ||
kSimpleFPAliasing || !move->destination().IsFPLocationOperand(); !move->destination().IsFPLocationOperand();
MoveOperands* replacement = nullptr; MoveOperands* replacement = nullptr;
MoveOperands* eliminated = nullptr; MoveOperands* eliminated = nullptr;
for (MoveOperands* curr : *this) { for (MoveOperands* curr : *this) {
......
...@@ -695,12 +695,19 @@ uint64_t InstructionOperand::GetCanonicalizedValue() const { ...@@ -695,12 +695,19 @@ uint64_t InstructionOperand::GetCanonicalizedValue() const {
if (IsAnyLocationOperand()) { if (IsAnyLocationOperand()) {
MachineRepresentation canonical = MachineRepresentation::kNone; MachineRepresentation canonical = MachineRepresentation::kNone;
if (IsFPRegister()) { if (IsFPRegister()) {
if (kSimpleFPAliasing) { if (kFPAliasing == AliasingKind::kOverlap) {
// We treat all FP register operands the same for simple aliasing. // We treat all FP register operands the same for simple aliasing.
canonical = MachineRepresentation::kFloat64; canonical = MachineRepresentation::kFloat64;
} else if (kFPAliasing == AliasingKind::kIndependent) {
if (IsSimd128Register()) {
canonical = MachineRepresentation::kSimd128;
} else {
canonical = MachineRepresentation::kFloat64;
}
} else { } else {
// We need to distinguish FP register operands of different reps when // We need to distinguish FP register operands of different reps when
// aliasing is not simple (e.g. ARM). // aliasing is AliasingKind::kCombine (e.g. ARM).
DCHECK_EQ(kFPAliasing, AliasingKind::kCombine);
canonical = LocationOperand::cast(this)->representation(); canonical = LocationOperand::cast(this)->representation();
} }
} }
...@@ -1696,6 +1703,12 @@ class V8_EXPORT_PRIVATE InstructionSequence final ...@@ -1696,6 +1703,12 @@ class V8_EXPORT_PRIVATE InstructionSequence final
return (representation_mask() & kFPRepMask) != 0; return (representation_mask() & kFPRepMask) != 0;
} }
bool HasSimd128VirtualRegisters() const {
constexpr int kSimd128RepMask =
RepresentationBit(MachineRepresentation::kSimd128);
return (representation_mask() & kSimd128RepMask) != 0;
}
Instruction* GetBlockStart(RpoNumber rpo) const; Instruction* GetBlockStart(RpoNumber rpo) const;
using const_iterator = InstructionDeque::const_iterator; using const_iterator = InstructionDeque::const_iterator;
......
...@@ -78,6 +78,7 @@ class BlockState final { ...@@ -78,6 +78,7 @@ class BlockState final {
private: private:
RegisterState* general_registers_in_state_; RegisterState* general_registers_in_state_;
RegisterState* double_registers_in_state_; RegisterState* double_registers_in_state_;
RegisterState* simd128_registers_in_state_;
DeferredBlocksRegion* deferred_blocks_region_; DeferredBlocksRegion* deferred_blocks_region_;
...@@ -92,6 +93,8 @@ RegisterState* BlockState::register_in_state(RegisterKind kind) { ...@@ -92,6 +93,8 @@ RegisterState* BlockState::register_in_state(RegisterKind kind) {
return general_registers_in_state_; return general_registers_in_state_;
case RegisterKind::kDouble: case RegisterKind::kDouble:
return double_registers_in_state_; return double_registers_in_state_;
case RegisterKind::kSimd128:
return simd128_registers_in_state_;
} }
} }
...@@ -106,6 +109,10 @@ void BlockState::set_register_in_state(RegisterState* register_state, ...@@ -106,6 +109,10 @@ void BlockState::set_register_in_state(RegisterState* register_state,
DCHECK_NULL(double_registers_in_state_); DCHECK_NULL(double_registers_in_state_);
double_registers_in_state_ = register_state; double_registers_in_state_ = register_state;
break; break;
case RegisterKind::kSimd128:
DCHECK_NULL(simd128_registers_in_state_);
simd128_registers_in_state_ = register_state;
break;
} }
} }
...@@ -180,7 +187,8 @@ class RegisterIndex final { ...@@ -180,7 +187,8 @@ class RegisterIndex final {
} }
uintptr_t ToBit(MachineRepresentation rep) const { uintptr_t ToBit(MachineRepresentation rep) const {
if (kSimpleFPAliasing || rep != MachineRepresentation::kSimd128) { if (kFPAliasing != AliasingKind::kCombine ||
rep != MachineRepresentation::kSimd128) {
return 1ull << ToInt(); return 1ull << ToInt();
} else { } else {
DCHECK_EQ(rep, MachineRepresentation::kSimd128); DCHECK_EQ(rep, MachineRepresentation::kSimd128);
...@@ -1526,11 +1534,11 @@ class SinglePassRegisterAllocator final { ...@@ -1526,11 +1534,11 @@ class SinglePassRegisterAllocator final {
bool VirtualRegisterIsUnallocatedOrInReg(int virtual_register, bool VirtualRegisterIsUnallocatedOrInReg(int virtual_register,
RegisterIndex reg); RegisterIndex reg);
// If {!kSimpleFPAliasing}, two FP registers alias one SIMD register. This // If {if kFPAliasing kind is COMBINE}, two FP registers alias one SIMD
// returns the index of the higher aliasing FP register from the SIMD register // register. This returns the index of the higher aliasing FP register from
// index (which is the same as the lower register index). // the SIMD register index (which is the same as the lower register index).
RegisterIndex simdSibling(RegisterIndex reg) const { RegisterIndex simdSibling(RegisterIndex reg) const {
CHECK(!kSimpleFPAliasing); // Statically evaluated. CHECK_EQ(kFPAliasing, AliasingKind::kCombine); // Statically evaluated.
RegisterIndex sibling = RegisterIndex{reg.ToInt() + 1}; RegisterIndex sibling = RegisterIndex{reg.ToInt() + 1};
#ifdef DEBUG #ifdef DEBUG
// Check that {reg} is indeed the lower SIMD half and {sibling} is the // Check that {reg} is indeed the lower SIMD half and {sibling} is the
...@@ -1581,7 +1589,7 @@ class SinglePassRegisterAllocator final { ...@@ -1581,7 +1589,7 @@ class SinglePassRegisterAllocator final {
RegisterBitVector allocated_registers_bits_; RegisterBitVector allocated_registers_bits_;
RegisterBitVector same_input_output_registers_bits_; RegisterBitVector same_input_output_registers_bits_;
// These fields are only used when kSimpleFPAliasing == false. // These fields are only used when kFPAliasing == COMBINE.
base::Optional<ZoneVector<RegisterIndex>> float32_reg_code_to_index_; base::Optional<ZoneVector<RegisterIndex>> float32_reg_code_to_index_;
base::Optional<ZoneVector<int>> index_to_float32_reg_code_; base::Optional<ZoneVector<int>> index_to_float32_reg_code_;
base::Optional<ZoneVector<RegisterIndex>> simd128_reg_code_to_index_; base::Optional<ZoneVector<RegisterIndex>> simd128_reg_code_to_index_;
...@@ -1612,9 +1620,9 @@ SinglePassRegisterAllocator::SinglePassRegisterAllocator( ...@@ -1612,9 +1620,9 @@ SinglePassRegisterAllocator::SinglePassRegisterAllocator(
reg_code_to_index_[reg_code] = RegisterIndex(i); reg_code_to_index_[reg_code] = RegisterIndex(i);
} }
// If the architecture has non-simple FP aliasing, initialize float and // If the architecture has COMBINE FP aliasing, initialize float and
// simd128 specific register details. // simd128 specific register details.
if (!kSimpleFPAliasing && kind == RegisterKind::kDouble) { if (kFPAliasing == AliasingKind::kCombine && kind == RegisterKind::kDouble) {
const RegisterConfiguration* config = data->config(); const RegisterConfiguration* config = data->config();
// Float registers. // Float registers.
...@@ -1784,15 +1792,17 @@ void SinglePassRegisterAllocator::MergeStateFrom( ...@@ -1784,15 +1792,17 @@ void SinglePassRegisterAllocator::MergeStateFrom(
processed_regs.Add(reg, rep); processed_regs.Add(reg, rep);
bool reg_in_use = register_state_->IsAllocated(reg); bool reg_in_use = register_state_->IsAllocated(reg);
// For non-simple FP aliasing, the register is also "in use" if the // For COMBINE FP aliasing, the register is also "in use" if the
// FP register for the upper half is allocated. // FP register for the upper half is allocated.
if (!kSimpleFPAliasing && rep == MachineRepresentation::kSimd128) { if (kFPAliasing == AliasingKind::kCombine &&
rep == MachineRepresentation::kSimd128) {
reg_in_use |= register_state_->IsAllocated(simdSibling(reg)); reg_in_use |= register_state_->IsAllocated(simdSibling(reg));
} }
// Similarly (but the other way around), the register might be the upper // Similarly (but the other way around), the register might be the upper
// half of a SIMD register that is allocated. // half of a SIMD register that is allocated.
if (!kSimpleFPAliasing && (rep == MachineRepresentation::kFloat64 || if (kFPAliasing == AliasingKind::kCombine &&
rep == MachineRepresentation::kFloat32)) { (rep == MachineRepresentation::kFloat64 ||
rep == MachineRepresentation::kFloat32)) {
int simd_reg_code; int simd_reg_code;
CHECK_EQ(1, data_->config()->GetAliases( CHECK_EQ(1, data_->config()->GetAliases(
rep, ToRegCode(reg, rep), rep, ToRegCode(reg, rep),
...@@ -1881,7 +1891,8 @@ void SinglePassRegisterAllocator::SpillRegisterAtMerge( ...@@ -1881,7 +1891,8 @@ void SinglePassRegisterAllocator::SpillRegisterAtMerge(
reg_state->Spill(reg, allocated, current_block_, data_); reg_state->Spill(reg, allocated, current_block_, data_);
} }
// Also spill the "simd sibling" register if we want to use {reg} for SIMD. // Also spill the "simd sibling" register if we want to use {reg} for SIMD.
if (!kSimpleFPAliasing && rep == MachineRepresentation::kSimd128) { if (kFPAliasing == AliasingKind::kCombine &&
rep == MachineRepresentation::kSimd128) {
RegisterIndex sibling = simdSibling(reg); RegisterIndex sibling = simdSibling(reg);
if (reg_state->IsAllocated(sibling)) { if (reg_state->IsAllocated(sibling)) {
int virtual_register = reg_state->VirtualRegisterForRegister(sibling); int virtual_register = reg_state->VirtualRegisterForRegister(sibling);
...@@ -1893,8 +1904,9 @@ void SinglePassRegisterAllocator::SpillRegisterAtMerge( ...@@ -1893,8 +1904,9 @@ void SinglePassRegisterAllocator::SpillRegisterAtMerge(
} }
} }
// Similarly, spill the whole SIMD register if we want to use a part of it. // Similarly, spill the whole SIMD register if we want to use a part of it.
if (!kSimpleFPAliasing && (rep == MachineRepresentation::kFloat64 || if (kFPAliasing == AliasingKind::kCombine &&
rep == MachineRepresentation::kFloat32)) { (rep == MachineRepresentation::kFloat64 ||
rep == MachineRepresentation::kFloat32)) {
int simd_reg_code; int simd_reg_code;
CHECK_EQ(1, data_->config()->GetAliases(rep, ToRegCode(reg, rep), CHECK_EQ(1, data_->config()->GetAliases(rep, ToRegCode(reg, rep),
MachineRepresentation::kSimd128, MachineRepresentation::kSimd128,
...@@ -1980,7 +1992,8 @@ void SinglePassRegisterAllocator::CheckConsistency() { ...@@ -1980,7 +1992,8 @@ void SinglePassRegisterAllocator::CheckConsistency() {
RegisterIndex SinglePassRegisterAllocator::FromRegCode( RegisterIndex SinglePassRegisterAllocator::FromRegCode(
int reg_code, MachineRepresentation rep) const { int reg_code, MachineRepresentation rep) const {
if (!kSimpleFPAliasing && kind() == RegisterKind::kDouble) { if (kFPAliasing == AliasingKind::kCombine &&
kind() == RegisterKind::kDouble) {
if (rep == MachineRepresentation::kFloat32) { if (rep == MachineRepresentation::kFloat32) {
return RegisterIndex(float32_reg_code_to_index_->at(reg_code)); return RegisterIndex(float32_reg_code_to_index_->at(reg_code));
} else if (rep == MachineRepresentation::kSimd128) { } else if (rep == MachineRepresentation::kSimd128) {
...@@ -1994,7 +2007,8 @@ RegisterIndex SinglePassRegisterAllocator::FromRegCode( ...@@ -1994,7 +2007,8 @@ RegisterIndex SinglePassRegisterAllocator::FromRegCode(
int SinglePassRegisterAllocator::ToRegCode(RegisterIndex reg, int SinglePassRegisterAllocator::ToRegCode(RegisterIndex reg,
MachineRepresentation rep) const { MachineRepresentation rep) const {
if (!kSimpleFPAliasing && kind() == RegisterKind::kDouble) { if (kFPAliasing == AliasingKind::kCombine &&
kind() == RegisterKind::kDouble) {
if (rep == MachineRepresentation::kFloat32) { if (rep == MachineRepresentation::kFloat32) {
DCHECK_NE(-1, index_to_float32_reg_code_->at(reg.ToInt())); DCHECK_NE(-1, index_to_float32_reg_code_->at(reg.ToInt()));
return index_to_float32_reg_code_->at(reg.ToInt()); return index_to_float32_reg_code_->at(reg.ToInt());
...@@ -2129,7 +2143,8 @@ RegisterBitVector SinglePassRegisterAllocator::InUseBitmap(UsePosition pos) { ...@@ -2129,7 +2143,8 @@ RegisterBitVector SinglePassRegisterAllocator::InUseBitmap(UsePosition pos) {
bool SinglePassRegisterAllocator::IsValidForRep(RegisterIndex reg, bool SinglePassRegisterAllocator::IsValidForRep(RegisterIndex reg,
MachineRepresentation rep) { MachineRepresentation rep) {
if (kSimpleFPAliasing || kind() == RegisterKind::kGeneral) { if (kFPAliasing != AliasingKind::kCombine ||
kind() == RegisterKind::kGeneral) {
return true; return true;
} else { } else {
switch (rep) { switch (rep) {
...@@ -2157,7 +2172,8 @@ RegisterIndex SinglePassRegisterAllocator::ChooseFreeRegister( ...@@ -2157,7 +2172,8 @@ RegisterIndex SinglePassRegisterAllocator::ChooseFreeRegister(
RegisterIndex SinglePassRegisterAllocator::ChooseFreeRegister( RegisterIndex SinglePassRegisterAllocator::ChooseFreeRegister(
const RegisterBitVector& allocated_regs, MachineRepresentation rep) { const RegisterBitVector& allocated_regs, MachineRepresentation rep) {
RegisterIndex chosen_reg = RegisterIndex::Invalid(); RegisterIndex chosen_reg = RegisterIndex::Invalid();
if (kSimpleFPAliasing || kind() == RegisterKind::kGeneral) { if (kFPAliasing != AliasingKind::kCombine ||
kind() == RegisterKind::kGeneral) {
chosen_reg = allocated_regs.GetFirstCleared(num_allocatable_registers_); chosen_reg = allocated_regs.GetFirstCleared(num_allocatable_registers_);
} else { } else {
// If we don't have simple fp aliasing, we need to check each register // If we don't have simple fp aliasing, we need to check each register
...@@ -2195,8 +2211,11 @@ RegisterIndex SinglePassRegisterAllocator::ChooseRegisterToSpill( ...@@ -2195,8 +2211,11 @@ RegisterIndex SinglePassRegisterAllocator::ChooseRegisterToSpill(
if (!IsValidForRep(reg, rep) || in_use.Contains(reg, rep)) continue; if (!IsValidForRep(reg, rep) || in_use.Contains(reg, rep)) continue;
// With non-simple FP aliasing, a SIMD register might block more than one FP // With non-simple FP aliasing, a SIMD register might block more than one FP
// register. // register.
DCHECK_IMPLIES(kSimpleFPAliasing, register_state_->IsAllocated(reg)); DCHECK_IMPLIES(kFPAliasing != AliasingKind::kCombine,
if (!kSimpleFPAliasing && !register_state_->IsAllocated(reg)) continue; register_state_->IsAllocated(reg));
if (kFPAliasing == AliasingKind::kCombine &&
!register_state_->IsAllocated(reg))
continue;
VirtualRegisterData& vreg_data = VirtualRegisterData& vreg_data =
VirtualRegisterDataFor(VirtualRegisterForRegister(reg)); VirtualRegisterDataFor(VirtualRegisterForRegister(reg));
...@@ -2245,7 +2264,8 @@ void SinglePassRegisterAllocator::SpillRegisterAndPotentialSimdSibling( ...@@ -2245,7 +2264,8 @@ void SinglePassRegisterAllocator::SpillRegisterAndPotentialSimdSibling(
RegisterIndex reg, MachineRepresentation rep) { RegisterIndex reg, MachineRepresentation rep) {
SpillRegister(reg); SpillRegister(reg);
if (!kSimpleFPAliasing && rep == MachineRepresentation::kSimd128) { if (kFPAliasing == AliasingKind::kCombine &&
rep == MachineRepresentation::kSimd128) {
SpillRegister(simdSibling(reg)); SpillRegister(simdSibling(reg));
} }
} }
...@@ -2636,7 +2656,8 @@ void SinglePassRegisterAllocator::ReserveFixedRegister( ...@@ -2636,7 +2656,8 @@ void SinglePassRegisterAllocator::ReserveFixedRegister(
} }
// Also potentially spill the "sibling SIMD register" on architectures where a // Also potentially spill the "sibling SIMD register" on architectures where a
// SIMD register aliases two FP registers. // SIMD register aliases two FP registers.
if (!kSimpleFPAliasing && rep == MachineRepresentation::kSimd128) { if (kFPAliasing == AliasingKind::kCombine &&
rep == MachineRepresentation::kSimd128) {
if (register_state_->IsAllocated(simdSibling(reg)) && if (register_state_->IsAllocated(simdSibling(reg)) &&
!DefinedAfter(virtual_register, instr_index, pos)) { !DefinedAfter(virtual_register, instr_index, pos)) {
SpillRegister(simdSibling(reg)); SpillRegister(simdSibling(reg));
...@@ -2644,8 +2665,9 @@ void SinglePassRegisterAllocator::ReserveFixedRegister( ...@@ -2644,8 +2665,9 @@ void SinglePassRegisterAllocator::ReserveFixedRegister(
} }
// Similarly (but the other way around), spill a SIMD register that (partly) // Similarly (but the other way around), spill a SIMD register that (partly)
// overlaps with a fixed FP register. // overlaps with a fixed FP register.
if (!kSimpleFPAliasing && (rep == MachineRepresentation::kFloat64 || if (kFPAliasing == AliasingKind::kCombine &&
rep == MachineRepresentation::kFloat32)) { (rep == MachineRepresentation::kFloat64 ||
rep == MachineRepresentation::kFloat32)) {
int simd_reg_code; int simd_reg_code;
CHECK_EQ( CHECK_EQ(
1, data_->config()->GetAliases( 1, data_->config()->GetAliases(
......
...@@ -38,7 +38,7 @@ class OperandSet { ...@@ -38,7 +38,7 @@ class OperandSet {
void InsertOp(const InstructionOperand& op) { void InsertOp(const InstructionOperand& op) {
set_->push_back(op); set_->push_back(op);
if (!kSimpleFPAliasing && op.IsFPRegister()) if (kFPAliasing == AliasingKind::kCombine && op.IsFPRegister())
fp_reps_ |= RepresentationBit(LocationOperand::cast(op).representation()); fp_reps_ |= RepresentationBit(LocationOperand::cast(op).representation());
} }
...@@ -52,7 +52,7 @@ class OperandSet { ...@@ -52,7 +52,7 @@ class OperandSet {
bool ContainsOpOrAlias(const InstructionOperand& op) const { bool ContainsOpOrAlias(const InstructionOperand& op) const {
if (Contains(op)) return true; if (Contains(op)) return true;
if (!kSimpleFPAliasing && op.IsFPRegister()) { if (kFPAliasing == AliasingKind::kCombine && op.IsFPRegister()) {
// Platforms where FP registers have complex aliasing need extra checks. // Platforms where FP registers have complex aliasing need extra checks.
const LocationOperand& loc = LocationOperand::cast(op); const LocationOperand& loc = LocationOperand::cast(op);
MachineRepresentation rep = loc.representation(); MachineRepresentation rep = loc.representation();
......
...@@ -12,7 +12,7 @@ namespace v8 { ...@@ -12,7 +12,7 @@ namespace v8 {
namespace internal { namespace internal {
namespace compiler { namespace compiler {
enum class RegisterKind { kGeneral, kDouble }; enum class RegisterKind { kGeneral, kDouble, kSimd128 };
inline int GetRegisterCount(const RegisterConfiguration* config, inline int GetRegisterCount(const RegisterConfiguration* config,
RegisterKind kind) { RegisterKind kind) {
...@@ -21,6 +21,8 @@ inline int GetRegisterCount(const RegisterConfiguration* config, ...@@ -21,6 +21,8 @@ inline int GetRegisterCount(const RegisterConfiguration* config,
return config->num_general_registers(); return config->num_general_registers();
case RegisterKind::kDouble: case RegisterKind::kDouble:
return config->num_double_registers(); return config->num_double_registers();
case RegisterKind::kSimd128:
return config->num_simd128_registers();
} }
} }
...@@ -31,6 +33,8 @@ inline int GetAllocatableRegisterCount(const RegisterConfiguration* config, ...@@ -31,6 +33,8 @@ inline int GetAllocatableRegisterCount(const RegisterConfiguration* config,
return config->num_allocatable_general_registers(); return config->num_allocatable_general_registers();
case RegisterKind::kDouble: case RegisterKind::kDouble:
return config->num_allocatable_double_registers(); return config->num_allocatable_double_registers();
case RegisterKind::kSimd128:
return config->num_allocatable_simd128_registers();
} }
} }
...@@ -41,6 +45,8 @@ inline const int* GetAllocatableRegisterCodes( ...@@ -41,6 +45,8 @@ inline const int* GetAllocatableRegisterCodes(
return config->allocatable_general_codes(); return config->allocatable_general_codes();
case RegisterKind::kDouble: case RegisterKind::kDouble:
return config->allocatable_double_codes(); return config->allocatable_double_codes();
case RegisterKind::kSimd128:
return config->allocatable_simd128_codes();
} }
} }
......
This diff is collapsed.
...@@ -372,8 +372,10 @@ class TopTierRegisterAllocationData final : public RegisterAllocationData { ...@@ -372,8 +372,10 @@ class TopTierRegisterAllocationData final : public RegisterAllocationData {
DelayedReferences delayed_references_; DelayedReferences delayed_references_;
BitVector* assigned_registers_; BitVector* assigned_registers_;
BitVector* assigned_double_registers_; BitVector* assigned_double_registers_;
BitVector* assigned_simd128_registers_;
BitVector* fixed_register_use_; BitVector* fixed_register_use_;
BitVector* fixed_fp_register_use_; BitVector* fixed_fp_register_use_;
BitVector* fixed_simd128_register_use_;
int virtual_register_count_; int virtual_register_count_;
RangesWithPreassignedSlots preassigned_slot_ranges_; RangesWithPreassignedSlots preassigned_slot_ranges_;
ZoneVector<ZoneVector<LiveRange*>> spill_state_; ZoneVector<ZoneVector<LiveRange*>> spill_state_;
...@@ -1244,6 +1246,7 @@ class LiveRangeBuilder final : public ZoneObject { ...@@ -1244,6 +1246,7 @@ class LiveRangeBuilder final : public ZoneObject {
TopLevelLiveRange* FixedLiveRangeFor(int index, SpillMode spill_mode); TopLevelLiveRange* FixedLiveRangeFor(int index, SpillMode spill_mode);
TopLevelLiveRange* FixedFPLiveRangeFor(int index, MachineRepresentation rep, TopLevelLiveRange* FixedFPLiveRangeFor(int index, MachineRepresentation rep,
SpillMode spill_mode); SpillMode spill_mode);
TopLevelLiveRange* FixedSIMD128LiveRangeFor(int index, SpillMode spill_mode);
void MapPhiHint(InstructionOperand* operand, UsePosition* use_pos); void MapPhiHint(InstructionOperand* operand, UsePosition* use_pos);
void ResolvePhiHint(InstructionOperand* operand, UsePosition* use_pos); void ResolvePhiHint(InstructionOperand* operand, UsePosition* use_pos);
...@@ -1484,6 +1487,8 @@ class LinearScanAllocator final : public RegisterAllocator { ...@@ -1484,6 +1487,8 @@ class LinearScanAllocator final : public RegisterAllocator {
LiveRange* range, const base::Vector<LifetimePosition>& free_until_pos); LiveRange* range, const base::Vector<LifetimePosition>& free_until_pos);
void GetFPRegisterSet(MachineRepresentation rep, int* num_regs, void GetFPRegisterSet(MachineRepresentation rep, int* num_regs,
int* num_codes, const int** codes) const; int* num_codes, const int** codes) const;
void GetSIMD128RegisterSet(int* num_regs, int* num_codes,
const int** codes) const;
void FindFreeRegistersForRange(LiveRange* range, void FindFreeRegistersForRange(LiveRange* range,
base::Vector<LifetimePosition> free_until_pos); base::Vector<LifetimePosition> free_until_pos);
void ProcessCurrentRange(LiveRange* current, SpillMode spill_mode); void ProcessCurrentRange(LiveRange* current, SpillMode spill_mode);
......
...@@ -3073,7 +3073,7 @@ VISIT_SIMD_QFMOP(F32x4Qfms, kRiscvF32x4Qfms) ...@@ -3073,7 +3073,7 @@ VISIT_SIMD_QFMOP(F32x4Qfms, kRiscvF32x4Qfms)
void InstructionSelector::VisitI32x4DotI16x8S(Node* node) { void InstructionSelector::VisitI32x4DotI16x8S(Node* node) {
RiscvOperandGenerator g(this); RiscvOperandGenerator g(this);
InstructionOperand temp = g.TempFpRegister(v16); InstructionOperand temp = g.TempFpRegister(v16);
InstructionOperand temp1 = g.TempFpRegister(v17); InstructionOperand temp1 = g.TempFpRegister(v14);
InstructionOperand temp2 = g.TempFpRegister(v30); InstructionOperand temp2 = g.TempFpRegister(v30);
InstructionOperand dst = g.DefineAsRegister(node); InstructionOperand dst = g.DefineAsRegister(node);
this->Emit(kRiscvVwmul, temp, g.UseRegister(node->InputAt(0)), this->Emit(kRiscvVwmul, temp, g.UseRegister(node->InputAt(0)),
......
...@@ -2262,6 +2262,17 @@ struct AllocateFPRegistersPhase { ...@@ -2262,6 +2262,17 @@ struct AllocateFPRegistersPhase {
} }
}; };
template <typename RegAllocator>
struct AllocateSimd128RegistersPhase {
DECL_PIPELINE_PHASE_CONSTANTS(AllocateSIMD128Registers)
void Run(PipelineData* data, Zone* temp_zone) {
RegAllocator allocator(data->top_tier_register_allocation_data(),
RegisterKind::kSimd128, temp_zone);
allocator.AllocateRegisters();
}
};
struct DecideSpillingModePhase { struct DecideSpillingModePhase {
DECL_PIPELINE_PHASE_CONSTANTS(DecideSpillingMode) DECL_PIPELINE_PHASE_CONSTANTS(DecideSpillingMode)
...@@ -3734,6 +3745,11 @@ void PipelineImpl::AllocateRegistersForTopTier( ...@@ -3734,6 +3745,11 @@ void PipelineImpl::AllocateRegistersForTopTier(
Run<AllocateFPRegistersPhase<LinearScanAllocator>>(); Run<AllocateFPRegistersPhase<LinearScanAllocator>>();
} }
if (data->sequence()->HasSimd128VirtualRegisters() &&
(kFPAliasing == AliasingKind::kIndependent)) {
Run<AllocateSimd128RegistersPhase<LinearScanAllocator>>();
}
Run<DecideSpillingModePhase>(); Run<DecideSpillingModePhase>();
Run<AssignSpillSlotsPhase>(); Run<AssignSpillSlotsPhase>();
Run<CommitAssignmentPhase>(); Run<CommitAssignmentPhase>();
......
...@@ -317,8 +317,8 @@ class RuntimeCallTimer final { ...@@ -317,8 +317,8 @@ class RuntimeCallTimer final {
ADD_THREAD_SPECIFIC_COUNTER(V, Compile, ScopeAnalysis) \ ADD_THREAD_SPECIFIC_COUNTER(V, Compile, ScopeAnalysis) \
ADD_THREAD_SPECIFIC_COUNTER(V, Compile, Script) \ ADD_THREAD_SPECIFIC_COUNTER(V, Compile, Script) \
ADD_THREAD_SPECIFIC_COUNTER(V, Compile, CompileTask) \ ADD_THREAD_SPECIFIC_COUNTER(V, Compile, CompileTask) \
\
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, AllocateFPRegisters) \ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, AllocateFPRegisters) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, AllocateSIMD128Registers) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, AllocateGeneralRegisters) \ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, AllocateGeneralRegisters) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, AssembleCode) \ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, AssembleCode) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, AssignSpillSlots) \ ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, AssignSpillSlots) \
......
...@@ -17,7 +17,7 @@ namespace internal { ...@@ -17,7 +17,7 @@ namespace internal {
namespace wasm { namespace wasm {
static constexpr bool kNeedI64RegPair = kSystemPointerSize == 4; static constexpr bool kNeedI64RegPair = kSystemPointerSize == 4;
static constexpr bool kNeedS128RegPair = !kSimpleFPAliasing; static constexpr bool kNeedS128RegPair = kFPAliasing == AliasingKind::kCombine;
enum RegClass : uint8_t { enum RegClass : uint8_t {
kGpReg, kGpReg,
...@@ -190,7 +190,7 @@ class LiftoffRegister { ...@@ -190,7 +190,7 @@ class LiftoffRegister {
// LiftoffRegister. // LiftoffRegister.
static LiftoffRegister from_external_code(RegClass rc, ValueKind kind, static LiftoffRegister from_external_code(RegClass rc, ValueKind kind,
int code) { int code) {
if (!kSimpleFPAliasing && kind == kF32) { if (kFPAliasing == AliasingKind::kCombine && kind == kF32) {
// Liftoff assumes a one-to-one mapping between float registers and // Liftoff assumes a one-to-one mapping between float registers and
// double registers, and so does not distinguish between f32 and f64 // double registers, and so does not distinguish between f32 and f64
// registers. The f32 register code must therefore be halved in order // registers. The f32 register code must therefore be halved in order
......
...@@ -428,11 +428,6 @@ ...@@ -428,11 +428,6 @@
'test-cpu-profiler/CrossScriptInliningCallerLineNumbers2': [SKIP], 'test-cpu-profiler/CrossScriptInliningCallerLineNumbers2': [SKIP],
# SIMD not fully implemented yet. # SIMD not fully implemented yet.
'test-run-wasm-relaxed-simd/*': [SKIP],
'test-run-wasm-simd/RunWasm_F64x2ExtractLaneWithI64x2_liftoff': [SKIP],
'test-run-wasm-simd/RunWasm_I64x2ExtractWithF64x2_liftoff': [SKIP],
'test-run-wasm-simd-liftoff/*': [SKIP],
'test-run-wasm-simd/*':[SKIP],
'test-gc/RunWasmLiftoff_RefTrivialCasts': [SKIP], 'test-gc/RunWasmLiftoff_RefTrivialCasts': [SKIP],
'test-gc/RunWasmTurbofan_RefTrivialCasts': [SKIP], 'test-gc/RunWasmTurbofan_RefTrivialCasts': [SKIP],
......
...@@ -460,7 +460,7 @@ class TestEnvironment : public HandleAndZoneScope { ...@@ -460,7 +460,7 @@ class TestEnvironment : public HandleAndZoneScope {
((kDoubleRegisterCount % 2) == 0) && ((kDoubleRegisterCount % 3) == 0), ((kDoubleRegisterCount % 2) == 0) && ((kDoubleRegisterCount % 3) == 0),
"kDoubleRegisterCount should be a multiple of two and three."); "kDoubleRegisterCount should be a multiple of two and three.");
for (int i = 0; i < kDoubleRegisterCount; i += 2) { for (int i = 0; i < kDoubleRegisterCount; i += 2) {
if (kSimpleFPAliasing) { if (kFPAliasing != AliasingKind::kCombine) {
// Allocate three registers at once if kSimd128 is supported, else // Allocate three registers at once if kSimd128 is supported, else
// allocate in pairs. // allocate in pairs.
AddRegister(&test_signature, MachineRepresentation::kFloat32, AddRegister(&test_signature, MachineRepresentation::kFloat32,
......
...@@ -17,7 +17,7 @@ const auto GetRegConfig = RegisterConfiguration::Default; ...@@ -17,7 +17,7 @@ const auto GetRegConfig = RegisterConfiguration::Default;
// simplify ParallelMove equivalence testing. // simplify ParallelMove equivalence testing.
void GetCanonicalOperands(const InstructionOperand& op, void GetCanonicalOperands(const InstructionOperand& op,
std::vector<InstructionOperand>* fragments) { std::vector<InstructionOperand>* fragments) {
CHECK(!kSimpleFPAliasing); CHECK_EQ(kFPAliasing, AliasingKind::kCombine);
CHECK(op.IsFPLocationOperand()); CHECK(op.IsFPLocationOperand());
const LocationOperand& loc = LocationOperand::cast(op); const LocationOperand& loc = LocationOperand::cast(op);
MachineRepresentation rep = loc.representation(); MachineRepresentation rep = loc.representation();
...@@ -51,7 +51,7 @@ class InterpreterState { ...@@ -51,7 +51,7 @@ class InterpreterState {
CHECK(!m->IsRedundant()); CHECK(!m->IsRedundant());
const InstructionOperand& src = m->source(); const InstructionOperand& src = m->source();
const InstructionOperand& dst = m->destination(); const InstructionOperand& dst = m->destination();
if (!kSimpleFPAliasing && src.IsFPLocationOperand() && if (kFPAliasing == AliasingKind::kCombine && src.IsFPLocationOperand() &&
dst.IsFPLocationOperand()) { dst.IsFPLocationOperand()) {
// Canonicalize FP location-location moves by fragmenting them into // Canonicalize FP location-location moves by fragmenting them into
// an equivalent sequence of float32 moves, to simplify state // an equivalent sequence of float32 moves, to simplify state
...@@ -137,8 +137,15 @@ class InterpreterState { ...@@ -137,8 +137,15 @@ class InterpreterState {
// Preserve FP representation when FP register aliasing is complex. // Preserve FP representation when FP register aliasing is complex.
// Otherwise, canonicalize to kFloat64. // Otherwise, canonicalize to kFloat64.
if (IsFloatingPoint(loc_op.representation())) { if (IsFloatingPoint(loc_op.representation())) {
rep = kSimpleFPAliasing ? MachineRepresentation::kFloat64 if (kFPAliasing == AliasingKind::kIndependent) {
: loc_op.representation(); rep = IsSimd128(loc_op.representation())
? MachineRepresentation::kSimd128
: MachineRepresentation::kFloat64;
} else if (kFPAliasing == AliasingKind::kOverlap) {
rep = MachineRepresentation::kFloat64;
} else {
rep = loc_op.representation();
}
} }
if (loc_op.IsAnyRegister()) { if (loc_op.IsAnyRegister()) {
index = loc_op.register_code(); index = loc_op.register_code();
...@@ -234,7 +241,8 @@ class ParallelMoveCreator : public HandleAndZoneScope { ...@@ -234,7 +241,8 @@ class ParallelMoveCreator : public HandleAndZoneScope {
// On architectures where FP register aliasing is non-simple, update the // On architectures where FP register aliasing is non-simple, update the
// destinations set with the float equivalents of the operand and check // destinations set with the float equivalents of the operand and check
// that all destinations are unique and do not alias each other. // that all destinations are unique and do not alias each other.
if (!kSimpleFPAliasing && mo.destination().IsFPLocationOperand()) { if (kFPAliasing == AliasingKind::kCombine &&
mo.destination().IsFPLocationOperand()) {
std::vector<InstructionOperand> dst_fragments; std::vector<InstructionOperand> dst_fragments;
GetCanonicalOperands(dst, &dst_fragments); GetCanonicalOperands(dst, &dst_fragments);
CHECK(!dst_fragments.empty()); CHECK(!dst_fragments.empty());
...@@ -383,7 +391,7 @@ void RunTest(ParallelMove* pm, Zone* zone) { ...@@ -383,7 +391,7 @@ void RunTest(ParallelMove* pm, Zone* zone) {
TEST(Aliasing) { TEST(Aliasing) {
// On platforms with simple aliasing, these parallel moves are ill-formed. // On platforms with simple aliasing, these parallel moves are ill-formed.
if (kSimpleFPAliasing) return; if (kFPAliasing != AliasingKind::kCombine) return;
ParallelMoveCreator pmc; ParallelMoveCreator pmc;
Zone* zone = pmc.main_zone(); Zone* zone = pmc.main_zone();
......
...@@ -26,10 +26,10 @@ TEST_F(RegisterConfigurationUnitTest, BasicProperties) { ...@@ -26,10 +26,10 @@ TEST_F(RegisterConfigurationUnitTest, BasicProperties) {
int general_codes[kNumAllocatableGeneralRegs] = {1, 2}; int general_codes[kNumAllocatableGeneralRegs] = {1, 2};
int double_codes[kNumAllocatableDoubleRegs] = {2, 3}; int double_codes[kNumAllocatableDoubleRegs] = {2, 3};
RegisterConfiguration test(kNumGeneralRegs, kNumDoubleRegs, RegisterConfiguration test(AliasingKind::kOverlap, kNumGeneralRegs,
kNumAllocatableGeneralRegs, kNumDoubleRegs, 0, kNumAllocatableGeneralRegs,
kNumAllocatableDoubleRegs, general_codes, kNumAllocatableDoubleRegs, 0, general_codes,
double_codes, RegisterConfiguration::OVERLAP); double_codes);
EXPECT_EQ(test.num_general_registers(), kNumGeneralRegs); EXPECT_EQ(test.num_general_registers(), kNumGeneralRegs);
EXPECT_EQ(test.num_double_registers(), kNumDoubleRegs); EXPECT_EQ(test.num_double_registers(), kNumDoubleRegs);
...@@ -62,10 +62,10 @@ TEST_F(RegisterConfigurationUnitTest, CombineAliasing) { ...@@ -62,10 +62,10 @@ TEST_F(RegisterConfigurationUnitTest, CombineAliasing) {
int general_codes[] = {1, 2}; int general_codes[] = {1, 2};
int double_codes[] = {2, 3, 16}; // reg 16 should not alias registers 32, 33. int double_codes[] = {2, 3, 16}; // reg 16 should not alias registers 32, 33.
RegisterConfiguration test(kNumGeneralRegs, kNumDoubleRegs, RegisterConfiguration test(AliasingKind::kCombine, kNumGeneralRegs,
kNumAllocatableGeneralRegs, kNumDoubleRegs, 0, kNumAllocatableGeneralRegs,
kNumAllocatableDoubleRegs, general_codes, kNumAllocatableDoubleRegs, 0, general_codes,
double_codes, RegisterConfiguration::COMBINE); double_codes);
// There are 3 allocatable double regs, but only 2 can alias float regs. // There are 3 allocatable double regs, but only 2 can alias float regs.
EXPECT_EQ(test.num_allocatable_float_registers(), 4); EXPECT_EQ(test.num_allocatable_float_registers(), 4);
......
...@@ -24,6 +24,7 @@ InstructionSequenceTest::InstructionSequenceTest() ...@@ -24,6 +24,7 @@ InstructionSequenceTest::InstructionSequenceTest()
: sequence_(nullptr), : sequence_(nullptr),
num_general_registers_(Register::kNumRegisters), num_general_registers_(Register::kNumRegisters),
num_double_registers_(DoubleRegister::kNumRegisters), num_double_registers_(DoubleRegister::kNumRegisters),
num_simd128_registers_(Simd128Register::kNumRegisters),
instruction_blocks_(zone()), instruction_blocks_(zone()),
current_block_(nullptr), current_block_(nullptr),
block_returns_(false) {} block_returns_(false) {}
...@@ -69,11 +70,10 @@ int InstructionSequenceTest::GetAllocatableCode(int index, ...@@ -69,11 +70,10 @@ int InstructionSequenceTest::GetAllocatableCode(int index,
const RegisterConfiguration* InstructionSequenceTest::config() { const RegisterConfiguration* InstructionSequenceTest::config() {
if (!config_) { if (!config_) {
config_.reset(new RegisterConfiguration( config_.reset(new RegisterConfiguration(
num_general_registers_, num_double_registers_, num_general_registers_, kFPAliasing, num_general_registers_, num_double_registers_,
num_double_registers_, kAllocatableCodes.data(), num_simd128_registers_, num_general_registers_, num_double_registers_,
kAllocatableCodes.data(), num_simd128_registers_, kAllocatableCodes.data(),
kSimpleFPAliasing ? RegisterConfiguration::OVERLAP kAllocatableCodes.data(), kAllocatableCodes.data()));
: RegisterConfiguration::COMBINE));
} }
return config_.get(); return config_.get();
} }
......
...@@ -279,6 +279,7 @@ class InstructionSequenceTest : public TestWithIsolateAndZone { ...@@ -279,6 +279,7 @@ class InstructionSequenceTest : public TestWithIsolateAndZone {
InstructionSequence* sequence_; InstructionSequence* sequence_;
int num_general_registers_; int num_general_registers_;
int num_double_registers_; int num_double_registers_;
int num_simd128_registers_;
// Block building state. // Block building state.
InstructionBlocks instruction_blocks_; InstructionBlocks instruction_blocks_;
......
...@@ -85,7 +85,7 @@ TEST_F(InstructionTest, OperandInterference) { ...@@ -85,7 +85,7 @@ TEST_F(InstructionTest, OperandInterference) {
EXPECT_TRUE(Interfere(LocationOperand::STACK_SLOT, kDouble, i, kDouble, i)); EXPECT_TRUE(Interfere(LocationOperand::STACK_SLOT, kDouble, i, kDouble, i));
} }
if (kSimpleFPAliasing) { if (kFPAliasing != AliasingKind::kCombine) {
// Simple FP aliasing: interfering registers of different reps have the same // Simple FP aliasing: interfering registers of different reps have the same
// index. // index.
for (int i = 0; i < RegisterConfiguration::kMaxFPRegisters; ++i) { for (int i = 0; i < RegisterConfiguration::kMaxFPRegisters; ++i) {
...@@ -162,7 +162,7 @@ TEST_F(InstructionTest, PrepareInsertAfter) { ...@@ -162,7 +162,7 @@ TEST_F(InstructionTest, PrepareInsertAfter) {
CHECK(Contains(&to_eliminate, d2, d0)); CHECK(Contains(&to_eliminate, d2, d0));
} }
if (!kSimpleFPAliasing) { if (kFPAliasing == AliasingKind::kCombine) {
// Moves inserted after should cause all interfering moves to be eliminated. // Moves inserted after should cause all interfering moves to be eliminated.
auto s0 = AllocatedOperand(LocationOperand::REGISTER, auto s0 = AllocatedOperand(LocationOperand::REGISTER,
MachineRepresentation::kFloat32, 0); MachineRepresentation::kFloat32, 0);
......
...@@ -360,7 +360,7 @@ TEST_F(MoveOptimizerTest, ClobberedFPDestinationsAreEliminated) { ...@@ -360,7 +360,7 @@ TEST_F(MoveOptimizerTest, ClobberedFPDestinationsAreEliminated) {
EmitNop(); EmitNop();
Instruction* first_instr = LastInstruction(); Instruction* first_instr = LastInstruction();
AddMove(first_instr, FPReg(4, kFloat64), FPReg(1, kFloat64)); AddMove(first_instr, FPReg(4, kFloat64), FPReg(1, kFloat64));
if (!kSimpleFPAliasing) { if (kFPAliasing == AliasingKind::kCombine) {
// We clobber q0 below. This is aliased by d0, d1, s0, s1, s2, and s3. // We clobber q0 below. This is aliased by d0, d1, s0, s1, s2, and s3.
// Add moves to registers s2 and s3. // Add moves to registers s2 and s3.
AddMove(first_instr, FPReg(10, kFloat32), FPReg(0, kFloat32)); AddMove(first_instr, FPReg(10, kFloat32), FPReg(0, kFloat32));
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment