Commit 818d73ca authored by Lu Yahan's avatar Lu Yahan Committed by V8 LUCI CQ

[riscv64][register-alloc] Implement vector register independently allocating

vector register has different register file from float register in Risc64 rvv extension.
So this cl add third FPalising kind INDEPENDENT to allocate independently simd register.

Bug: v8:11976

doc: https://docs.google.com/document/d/1UwmUwOI3eeIMYzZFRmeXmfyNXRFHNZAQ4BcN0ODdMmo/edit?usp=sharing

Change-Id: I0fb8901294b4bc44b0bee55e630b60460e42bef2
Reviewed-on: https://chromium-review.googlesource.com/c/v8/v8/+/3383513Reviewed-by: 's avatarNico Hartmann <nicohartmann@chromium.org>
Reviewed-by: 's avatarClemens Backes <clemensb@chromium.org>
Auto-Submit: Yahan Lu <yahan@iscas.ac.cn>
Commit-Queue: Yahan Lu <yahan@iscas.ac.cn>
Cr-Commit-Position: refs/heads/main@{#79449}
parent 30974f92
......@@ -6,6 +6,7 @@
#define V8_CODEGEN_ARM_REGISTER_ARM_H_
#include "src/codegen/register-base.h"
#include "src/codegen/register-configuration.h"
#include "src/codegen/reglist.h"
namespace v8 {
......@@ -125,7 +126,7 @@ constexpr int ArgumentPaddingSlots(int argument_count) {
return 0;
}
constexpr bool kSimpleFPAliasing = false;
constexpr AliasingKind kFPAliasing = AliasingKind::kCombine;
constexpr bool kSimdMaskRegisters = false;
enum SwVfpRegisterCode {
......
......@@ -7,6 +7,7 @@
#include "src/codegen/arm64/utils-arm64.h"
#include "src/codegen/register-base.h"
#include "src/codegen/register-configuration.h"
#include "src/codegen/reglist.h"
#include "src/common/globals.h"
......@@ -276,7 +277,7 @@ constexpr int ArgumentPaddingSlots(int argument_count) {
return argument_count & alignment_mask;
}
constexpr bool kSimpleFPAliasing = true;
constexpr AliasingKind kFPAliasing = AliasingKind::kOverlap;
constexpr bool kSimdMaskRegisters = false;
enum DoubleRegisterCode {
......
......@@ -6,6 +6,7 @@
#define V8_CODEGEN_IA32_REGISTER_IA32_H_
#include "src/codegen/register-base.h"
#include "src/codegen/register-configuration.h"
#include "src/codegen/reglist.h"
namespace v8 {
......@@ -82,7 +83,7 @@ constexpr int ArgumentPaddingSlots(int argument_count) {
return 0;
}
constexpr bool kSimpleFPAliasing = true;
constexpr AliasingKind kFPAliasing = AliasingKind::kOverlap;
constexpr bool kSimdMaskRegisters = false;
enum DoubleCode {
......
......@@ -7,6 +7,7 @@
#include "src/codegen/loong64/constants-loong64.h"
#include "src/codegen/register-base.h"
#include "src/codegen/register-configuration.h"
#include "src/codegen/reglist.h"
namespace v8 {
......@@ -177,7 +178,7 @@ constexpr int ArgumentPaddingSlots(int argument_count) {
return 0;
}
constexpr bool kSimpleFPAliasing = true;
constexpr AliasingKind kFPAliasing = AliasingKind::kOverlap;
constexpr bool kSimdMaskRegisters = false;
enum DoubleRegisterCode {
......
......@@ -340,6 +340,10 @@ inline bool IsFloatingPoint(MachineRepresentation rep) {
return rep >= MachineRepresentation::kFirstFPRepresentation;
}
inline bool IsSimd128(MachineRepresentation rep) {
return rep == MachineRepresentation::kSimd128;
}
inline bool CanBeTaggedPointer(MachineRepresentation rep) {
return rep == MachineRepresentation::kTagged ||
rep == MachineRepresentation::kTaggedPointer ||
......
......@@ -7,6 +7,7 @@
#include "src/codegen/mips/constants-mips.h"
#include "src/codegen/register-base.h"
#include "src/codegen/register-configuration.h"
#include "src/codegen/reglist.h"
namespace v8 {
......@@ -209,7 +210,7 @@ constexpr int ArgumentPaddingSlots(int argument_count) {
return 0;
}
constexpr bool kSimpleFPAliasing = true;
constexpr AliasingKind kFPAliasing = AliasingKind::kOverlap;
constexpr bool kSimdMaskRegisters = false;
enum DoubleRegisterCode {
......
......@@ -7,6 +7,7 @@
#include "src/codegen/mips64/constants-mips64.h"
#include "src/codegen/register-base.h"
#include "src/codegen/register-configuration.h"
#include "src/codegen/reglist.h"
namespace v8 {
......@@ -209,7 +210,7 @@ constexpr int ArgumentPaddingSlots(int argument_count) {
return 0;
}
constexpr bool kSimpleFPAliasing = true;
constexpr AliasingKind kFPAliasing = AliasingKind::kOverlap;
constexpr bool kSimdMaskRegisters = false;
enum MSARegisterCode {
......
......@@ -6,6 +6,7 @@
#define V8_CODEGEN_PPC_REGISTER_PPC_H_
#include "src/codegen/register-base.h"
#include "src/codegen/register-configuration.h"
#include "src/codegen/reglist.h"
namespace v8 {
......@@ -219,7 +220,7 @@ constexpr int ArgumentPaddingSlots(int argument_count) {
return 0;
}
constexpr bool kSimpleFPAliasing = true;
constexpr AliasingKind kFPAliasing = AliasingKind::kOverlap;
constexpr bool kSimdMaskRegisters = false;
enum DoubleRegisterCode {
......
......@@ -19,6 +19,10 @@ static const int kMaxAllocatableGeneralRegisterCount =
ALLOCATABLE_GENERAL_REGISTERS(REGISTER_COUNT) 0;
static const int kMaxAllocatableDoubleRegisterCount =
ALLOCATABLE_DOUBLE_REGISTERS(REGISTER_COUNT) 0;
#if V8_TARGET_ARCH_RISCV64
static const int kMaxAllocatableSIMD128RegisterCount =
ALLOCATABLE_SIMD128_REGISTERS(REGISTER_COUNT) 0;
#endif
static const int kAllocatableGeneralCodes[] = {
#define REGISTER_CODE(R) kRegCode_##R,
......@@ -34,6 +38,13 @@ static const int kAllocatableNoVFP32DoubleCodes[] = {
#endif // V8_TARGET_ARCH_ARM
#undef REGISTER_CODE
#if V8_TARGET_ARCH_RISCV64
static const int kAllocatableSIMD128Codes[] = {
#define REGISTER_CODE(R) kVRCode_##R,
ALLOCATABLE_SIMD128_REGISTERS(REGISTER_CODE)};
#undef REGISTER_CODE
#endif // V8_TARGET_ARCH_RISCV64
STATIC_ASSERT(RegisterConfiguration::kMaxGeneralRegisters >=
Register::kNumRegisters);
STATIC_ASSERT(RegisterConfiguration::kMaxFPRegisters >=
......@@ -43,6 +54,15 @@ STATIC_ASSERT(RegisterConfiguration::kMaxFPRegisters >=
STATIC_ASSERT(RegisterConfiguration::kMaxFPRegisters >=
Simd128Register::kNumRegisters);
static int get_num_simd128_registers() {
return
#if V8_TARGET_ARCH_RISCV64
Simd128Register::kNumRegisters;
#else
0;
#endif // V8_TARGET_ARCH_RISCV64
}
// Callers on architectures other than Arm expect this to be be constant
// between build and runtime. Avoid adding variability on other platforms.
static int get_num_allocatable_double_registers() {
......@@ -78,6 +98,15 @@ static int get_num_allocatable_double_registers() {
#undef REGISTER_COUNT
static int get_num_allocatable_simd128_registers() {
return
#if V8_TARGET_ARCH_RISCV64
kMaxAllocatableSIMD128RegisterCount;
#else
0;
#endif
}
// Callers on architectures other than Arm expect this to be be constant
// between build and runtime. Avoid adding variability on other platforms.
static const int* get_allocatable_double_codes() {
......@@ -90,16 +119,24 @@ static const int* get_allocatable_double_codes() {
#endif
}
static const int* get_allocatable_simd128_codes() {
return
#if V8_TARGET_ARCH_RISCV64
kAllocatableSIMD128Codes;
#else
kAllocatableDoubleCodes;
#endif
}
class ArchDefaultRegisterConfiguration : public RegisterConfiguration {
public:
ArchDefaultRegisterConfiguration()
: RegisterConfiguration(
Register::kNumRegisters, DoubleRegister::kNumRegisters,
kMaxAllocatableGeneralRegisterCount,
get_num_allocatable_double_registers(), kAllocatableGeneralCodes,
get_allocatable_double_codes(),
kSimpleFPAliasing ? AliasingKind::OVERLAP : AliasingKind::COMBINE) {
}
kFPAliasing, Register::kNumRegisters, DoubleRegister::kNumRegisters,
get_num_simd128_registers(), kMaxAllocatableGeneralRegisterCount,
get_num_allocatable_double_registers(),
get_num_allocatable_simd128_registers(), kAllocatableGeneralCodes,
get_allocatable_double_codes(), get_allocatable_simd128_codes()) {}
};
DEFINE_LAZY_LEAKY_OBJECT_GETTER(ArchDefaultRegisterConfiguration,
......@@ -115,12 +152,12 @@ class RestrictedRegisterConfiguration : public RegisterConfiguration {
std::unique_ptr<int[]> allocatable_general_register_codes,
std::unique_ptr<char const*[]> allocatable_general_register_names)
: RegisterConfiguration(
Register::kNumRegisters, DoubleRegister::kNumRegisters,
num_allocatable_general_registers,
kFPAliasing, Register::kNumRegisters, DoubleRegister::kNumRegisters,
get_num_simd128_registers(), num_allocatable_general_registers,
get_num_allocatable_double_registers(),
get_num_allocatable_simd128_registers(),
allocatable_general_register_codes.get(),
get_allocatable_double_codes(),
kSimpleFPAliasing ? AliasingKind::OVERLAP : AliasingKind::COMBINE),
get_allocatable_double_codes(), get_allocatable_simd128_codes()),
allocatable_general_register_codes_(
std::move(allocatable_general_register_codes)),
allocatable_general_register_names_(
......@@ -172,18 +209,20 @@ const RegisterConfiguration* RegisterConfiguration::RestrictGeneralRegisters(
}
RegisterConfiguration::RegisterConfiguration(
int num_general_registers, int num_double_registers,
AliasingKind fp_aliasing_kind, int num_general_registers,
int num_double_registers, int num_simd128_registers,
int num_allocatable_general_registers, int num_allocatable_double_registers,
const int* allocatable_general_codes, const int* allocatable_double_codes,
AliasingKind fp_aliasing_kind)
int num_allocatable_simd128_registers, const int* allocatable_general_codes,
const int* allocatable_double_codes,
const int* independent_allocatable_simd128_codes)
: num_general_registers_(num_general_registers),
num_float_registers_(0),
num_double_registers_(num_double_registers),
num_simd128_registers_(0),
num_simd128_registers_(num_simd128_registers),
num_allocatable_general_registers_(num_allocatable_general_registers),
num_allocatable_float_registers_(0),
num_allocatable_double_registers_(num_allocatable_double_registers),
num_allocatable_simd128_registers_(0),
num_allocatable_simd128_registers_(num_allocatable_simd128_registers),
allocatable_general_codes_mask_(0),
allocatable_float_codes_mask_(0),
allocatable_double_codes_mask_(0),
......@@ -201,7 +240,7 @@ RegisterConfiguration::RegisterConfiguration(
allocatable_double_codes_mask_ |= (1 << allocatable_double_codes_[i]);
}
if (fp_aliasing_kind_ == COMBINE) {
if (fp_aliasing_kind_ == AliasingKind::kCombine) {
num_float_registers_ = num_double_registers_ * 2 <= kMaxFPRegisters
? num_double_registers_ * 2
: kMaxFPRegisters;
......@@ -228,8 +267,7 @@ RegisterConfiguration::RegisterConfiguration(
}
last_simd128_code = next_simd128_code;
}
} else {
DCHECK(fp_aliasing_kind_ == OVERLAP);
} else if (fp_aliasing_kind_ == AliasingKind::kOverlap) {
num_float_registers_ = num_simd128_registers_ = num_double_registers_;
num_allocatable_float_registers_ = num_allocatable_simd128_registers_ =
num_allocatable_double_registers_;
......@@ -239,6 +277,21 @@ RegisterConfiguration::RegisterConfiguration(
}
allocatable_float_codes_mask_ = allocatable_simd128_codes_mask_ =
allocatable_double_codes_mask_;
} else {
DCHECK_EQ(fp_aliasing_kind_, AliasingKind::kIndependent);
DCHECK_NE(independent_allocatable_simd128_codes, nullptr);
num_float_registers_ = num_double_registers_;
num_allocatable_float_registers_ = num_allocatable_double_registers_;
for (int i = 0; i < num_allocatable_float_registers_; ++i) {
allocatable_float_codes_[i] = allocatable_double_codes_[i];
}
allocatable_float_codes_mask_ = allocatable_double_codes_mask_;
for (int i = 0; i < num_allocatable_simd128_registers; i++) {
allocatable_simd128_codes_[i] = independent_allocatable_simd128_codes[i];
}
for (int i = 0; i < num_allocatable_simd128_registers_; ++i) {
allocatable_simd128_codes_mask_ |= (1 << allocatable_simd128_codes_[i]);
}
}
}
......@@ -251,7 +304,7 @@ STATIC_ASSERT(static_cast<int>(MachineRepresentation::kFloat64) ==
int RegisterConfiguration::GetAliases(MachineRepresentation rep, int index,
MachineRepresentation other_rep,
int* alias_base_index) const {
DCHECK(fp_aliasing_kind_ == COMBINE);
DCHECK(fp_aliasing_kind_ == AliasingKind::kCombine);
DCHECK(IsFloatingPoint(rep) && IsFloatingPoint(other_rep));
if (rep == other_rep) {
*alias_base_index = index;
......@@ -277,7 +330,7 @@ int RegisterConfiguration::GetAliases(MachineRepresentation rep, int index,
bool RegisterConfiguration::AreAliases(MachineRepresentation rep, int index,
MachineRepresentation other_rep,
int other_index) const {
DCHECK(fp_aliasing_kind_ == COMBINE);
DCHECK(fp_aliasing_kind_ == AliasingKind::kCombine);
DCHECK(IsFloatingPoint(rep) && IsFloatingPoint(other_rep));
if (rep == other_rep) {
return index == other_index;
......
......@@ -16,15 +16,17 @@ namespace internal {
// An architecture independent representation of the sets of registers available
// for instruction creation.
enum class AliasingKind {
// Registers alias a single register of every other size (e.g. Intel).
kOverlap,
// Registers alias two registers of the next smaller size (e.g. ARM).
kCombine,
// SIMD128 Registers are independent of every other size (e.g Riscv)
kIndependent
};
class V8_EXPORT_PRIVATE RegisterConfiguration {
public:
enum AliasingKind {
// Registers alias a single register of every other size (e.g. Intel).
OVERLAP,
// Registers alias two registers of the next smaller size (e.g. ARM).
COMBINE
};
// Architecture independent maxes.
static constexpr int kMaxGeneralRegisters = 32;
static constexpr int kMaxFPRegisters = 32;
......@@ -40,12 +42,14 @@ class V8_EXPORT_PRIVATE RegisterConfiguration {
static const RegisterConfiguration* RestrictGeneralRegisters(
RegList registers);
RegisterConfiguration(int num_general_registers, int num_double_registers,
int num_allocatable_general_registers,
int num_allocatable_double_registers,
const int* allocatable_general_codes,
const int* allocatable_double_codes,
AliasingKind fp_aliasing_kind);
RegisterConfiguration(
AliasingKind fp_aliasing_kind, int num_general_registers,
int num_double_registers, int num_simd128_registers,
int num_allocatable_general_registers,
int num_allocatable_double_registers,
int num_allocatable_simd128_registers,
const int* allocatable_general_codes, const int* allocatable_double_codes,
const int* independent_allocatable_simd128_codes = nullptr);
int num_general_registers() const { return num_general_registers_; }
int num_float_registers() const { return num_float_registers_; }
......
......@@ -6,6 +6,7 @@
#define V8_CODEGEN_RISCV64_REGISTER_RISCV64_H_
#include "src/codegen/register-base.h"
#include "src/codegen/register-configuration.h"
#include "src/codegen/reglist.h"
#include "src/codegen/riscv64/constants-riscv64.h"
......@@ -55,10 +56,11 @@ namespace internal {
V(v16) V(v17) V(v18) V(v19) V(v20) V(v21) V(v22) V(v23) \
V(v24) V(v25) V(v26) V(v27) V(v28) V(v29) V(v30) V(v31)
#define UNALLOACTABLE_VECTOR_REGISTERS(V) \
V(v9) V(v10) V(v11) V(v12) V(v13) V(v14) V(v15) \
V(v18) V(v19) V(v20) V(v21) V(v22) V(v23) \
V(v24) V(v25)
#define ALLOCATABLE_SIMD128_REGISTERS(V) \
V(v1) V(v2) V(v3) V(v4) V(v5) V(v6) V(v7) \
V(v10) V(v11) V(v12) V(v13) V(v14) V(v15) V(v16) \
V(v17) V(v18) V(v19) V(v20) V(v21) V(v22) V(v26) \
V(v27) V(v28) V(v29) V(v30) V(v31)
#define ALLOCATABLE_DOUBLE_REGISTERS(V) \
V(ft1) V(ft2) V(ft3) V(ft4) V(ft5) V(ft6) V(ft7) V(ft8) \
......@@ -253,7 +255,7 @@ int ToNumber(Register reg);
Register ToRegister(int num);
constexpr bool kPadArguments = false;
constexpr bool kSimpleFPAliasing = true;
constexpr AliasingKind kFPAliasing = AliasingKind::kIndependent;
constexpr bool kSimdMaskRegisters = false;
enum DoubleRegisterCode {
......@@ -299,11 +301,6 @@ class FPURegister : public RegisterBase<FPURegister, kDoubleAfterLast> {
// register and floating point register are shared.
VRegister toV() const {
DCHECK(base::IsInRange(code(), 0, kVRAfterLast - 1));
// FIXME(riscv): Because V0 is a special mask reg, so can't allocate it.
// And v8 is unallocated so we replace v0 with v8
if (code() == 0) {
return VRegister(8);
}
return VRegister(code());
}
......
......@@ -6,6 +6,7 @@
#define V8_CODEGEN_S390_REGISTER_S390_H_
#include "src/codegen/register-base.h"
#include "src/codegen/register-configuration.h"
#include "src/codegen/reglist.h"
namespace v8 {
......@@ -173,7 +174,7 @@ constexpr int ArgumentPaddingSlots(int argument_count) {
return 0;
}
constexpr bool kSimpleFPAliasing = true;
constexpr AliasingKind kFPAliasing = AliasingKind::kOverlap;
constexpr bool kSimdMaskRegisters = false;
enum DoubleRegisterCode {
......
......@@ -6,6 +6,7 @@
#define V8_CODEGEN_X64_REGISTER_X64_H_
#include "src/codegen/register-base.h"
#include "src/codegen/register-configuration.h"
#include "src/codegen/reglist.h"
namespace v8 {
......@@ -176,7 +177,7 @@ constexpr int ArgumentPaddingSlots(int argument_count) {
return 0;
}
constexpr bool kSimpleFPAliasing = true;
constexpr AliasingKind kFPAliasing = AliasingKind::kOverlap;
constexpr bool kSimdMaskRegisters = false;
enum DoubleRegisterCode {
......
......@@ -22,7 +22,7 @@ namespace {
// aliasing, and makes swaps much easier to implement.
MoveOperands* Split(MoveOperands* move, MachineRepresentation smaller_rep,
ParallelMove* moves) {
DCHECK(!kSimpleFPAliasing);
DCHECK(kFPAliasing == AliasingKind::kCombine);
// Splitting is only possible when the slot size is the same as float size.
DCHECK_EQ(kSystemPointerSize, kFloatSize);
const LocationOperand& src_loc = LocationOperand::cast(move->source());
......@@ -104,7 +104,8 @@ void GapResolver::Resolve(ParallelMove* moves) {
i++;
source_kinds.Add(GetKind(move->source()));
destination_kinds.Add(GetKind(move->destination()));
if (!kSimpleFPAliasing && move->destination().IsFPRegister()) {
if (kFPAliasing == AliasingKind::kCombine &&
move->destination().IsFPRegister()) {
fp_reps |= RepresentationBit(
LocationOperand::cast(move->destination()).representation());
}
......@@ -119,7 +120,7 @@ void GapResolver::Resolve(ParallelMove* moves) {
return;
}
if (!kSimpleFPAliasing) {
if (kFPAliasing == AliasingKind::kCombine) {
if (fp_reps && !base::bits::IsPowerOfTwo(fp_reps)) {
// Start with the smallest FP moves, so we never encounter smaller moves
// in the middle of a cycle of larger moves.
......@@ -166,8 +167,8 @@ void GapResolver::PerformMove(ParallelMove* moves, MoveOperands* move) {
move->SetPending();
// We may need to split moves between FP locations differently.
const bool is_fp_loc_move =
!kSimpleFPAliasing && destination.IsFPLocationOperand();
const bool is_fp_loc_move = kFPAliasing == AliasingKind::kCombine &&
destination.IsFPLocationOperand();
// Perform a depth-first traversal of the move graph to resolve dependencies.
// Any unperformed, unpending move with a source the same as this one's
......
......@@ -81,13 +81,13 @@ FlagsCondition CommuteFlagsCondition(FlagsCondition condition) {
}
bool InstructionOperand::InterferesWith(const InstructionOperand& other) const {
const bool kComplexFPAliasing = !kSimpleFPAliasing &&
const bool kCombineFPAliasing = kFPAliasing == AliasingKind::kCombine &&
this->IsFPLocationOperand() &&
other.IsFPLocationOperand();
const bool kComplexS128SlotAliasing =
(this->IsSimd128StackSlot() && other.IsAnyStackSlot()) ||
(other.IsSimd128StackSlot() && this->IsAnyStackSlot());
if (!kComplexFPAliasing && !kComplexS128SlotAliasing) {
if (!kCombineFPAliasing && !kComplexS128SlotAliasing) {
return EqualsCanonicalized(other);
}
const LocationOperand& loc = *LocationOperand::cast(this);
......@@ -98,7 +98,7 @@ bool InstructionOperand::InterferesWith(const InstructionOperand& other) const {
MachineRepresentation rep = loc.representation();
MachineRepresentation other_rep = other_loc.representation();
if (kComplexFPAliasing && !kComplexS128SlotAliasing) {
if (kCombineFPAliasing && !kComplexS128SlotAliasing) {
if (rep == other_rep) return EqualsCanonicalized(other);
if (kind == LocationOperand::REGISTER) {
// FP register-register interference.
......@@ -126,7 +126,7 @@ bool InstructionOperand::InterferesWith(const InstructionOperand& other) const {
bool LocationOperand::IsCompatible(LocationOperand* op) {
if (IsRegister() || IsStackSlot()) {
return op->IsRegister() || op->IsStackSlot();
} else if (kSimpleFPAliasing) {
} else if (kFPAliasing != AliasingKind::kCombine) {
// A backend may choose to generate the same instruction sequence regardless
// of the FP representation. As a result, we can relax the compatibility and
// allow a Double to be moved in a Float for example. However, this is only
......@@ -162,8 +162,11 @@ std::ostream& operator<<(std::ostream& os, const InstructionOperand& op) {
<< ")";
case UnallocatedOperand::FIXED_FP_REGISTER:
return os << "(="
<< DoubleRegister::from_code(
unalloc->fixed_register_index())
<< (unalloc->IsSimd128Register()
? i::RegisterName((Simd128Register::from_code(
unalloc->fixed_register_index())))
: i::RegisterName(DoubleRegister::from_code(
unalloc->fixed_register_index())))
<< ")";
case UnallocatedOperand::MUST_HAVE_REGISTER:
return os << "(R)";
......@@ -296,8 +299,8 @@ bool ParallelMove::IsRedundant() const {
void ParallelMove::PrepareInsertAfter(
MoveOperands* move, ZoneVector<MoveOperands*>* to_eliminate) const {
bool no_aliasing =
kSimpleFPAliasing || !move->destination().IsFPLocationOperand();
bool no_aliasing = kFPAliasing != AliasingKind::kCombine ||
!move->destination().IsFPLocationOperand();
MoveOperands* replacement = nullptr;
MoveOperands* eliminated = nullptr;
for (MoveOperands* curr : *this) {
......
......@@ -695,12 +695,19 @@ uint64_t InstructionOperand::GetCanonicalizedValue() const {
if (IsAnyLocationOperand()) {
MachineRepresentation canonical = MachineRepresentation::kNone;
if (IsFPRegister()) {
if (kSimpleFPAliasing) {
if (kFPAliasing == AliasingKind::kOverlap) {
// We treat all FP register operands the same for simple aliasing.
canonical = MachineRepresentation::kFloat64;
} else if (kFPAliasing == AliasingKind::kIndependent) {
if (IsSimd128Register()) {
canonical = MachineRepresentation::kSimd128;
} else {
canonical = MachineRepresentation::kFloat64;
}
} else {
// We need to distinguish FP register operands of different reps when
// aliasing is not simple (e.g. ARM).
// aliasing is AliasingKind::kCombine (e.g. ARM).
DCHECK_EQ(kFPAliasing, AliasingKind::kCombine);
canonical = LocationOperand::cast(this)->representation();
}
}
......@@ -1696,6 +1703,12 @@ class V8_EXPORT_PRIVATE InstructionSequence final
return (representation_mask() & kFPRepMask) != 0;
}
bool HasSimd128VirtualRegisters() const {
constexpr int kSimd128RepMask =
RepresentationBit(MachineRepresentation::kSimd128);
return (representation_mask() & kSimd128RepMask) != 0;
}
Instruction* GetBlockStart(RpoNumber rpo) const;
using const_iterator = InstructionDeque::const_iterator;
......
......@@ -78,6 +78,7 @@ class BlockState final {
private:
RegisterState* general_registers_in_state_;
RegisterState* double_registers_in_state_;
RegisterState* simd128_registers_in_state_;
DeferredBlocksRegion* deferred_blocks_region_;
......@@ -92,6 +93,8 @@ RegisterState* BlockState::register_in_state(RegisterKind kind) {
return general_registers_in_state_;
case RegisterKind::kDouble:
return double_registers_in_state_;
case RegisterKind::kSimd128:
return simd128_registers_in_state_;
}
}
......@@ -106,6 +109,10 @@ void BlockState::set_register_in_state(RegisterState* register_state,
DCHECK_NULL(double_registers_in_state_);
double_registers_in_state_ = register_state;
break;
case RegisterKind::kSimd128:
DCHECK_NULL(simd128_registers_in_state_);
simd128_registers_in_state_ = register_state;
break;
}
}
......@@ -180,7 +187,8 @@ class RegisterIndex final {
}
uintptr_t ToBit(MachineRepresentation rep) const {
if (kSimpleFPAliasing || rep != MachineRepresentation::kSimd128) {
if (kFPAliasing != AliasingKind::kCombine ||
rep != MachineRepresentation::kSimd128) {
return 1ull << ToInt();
} else {
DCHECK_EQ(rep, MachineRepresentation::kSimd128);
......@@ -1526,11 +1534,11 @@ class SinglePassRegisterAllocator final {
bool VirtualRegisterIsUnallocatedOrInReg(int virtual_register,
RegisterIndex reg);
// If {!kSimpleFPAliasing}, two FP registers alias one SIMD register. This
// returns the index of the higher aliasing FP register from the SIMD register
// index (which is the same as the lower register index).
// If {if kFPAliasing kind is COMBINE}, two FP registers alias one SIMD
// register. This returns the index of the higher aliasing FP register from
// the SIMD register index (which is the same as the lower register index).
RegisterIndex simdSibling(RegisterIndex reg) const {
CHECK(!kSimpleFPAliasing); // Statically evaluated.
CHECK_EQ(kFPAliasing, AliasingKind::kCombine); // Statically evaluated.
RegisterIndex sibling = RegisterIndex{reg.ToInt() + 1};
#ifdef DEBUG
// Check that {reg} is indeed the lower SIMD half and {sibling} is the
......@@ -1581,7 +1589,7 @@ class SinglePassRegisterAllocator final {
RegisterBitVector allocated_registers_bits_;
RegisterBitVector same_input_output_registers_bits_;
// These fields are only used when kSimpleFPAliasing == false.
// These fields are only used when kFPAliasing == COMBINE.
base::Optional<ZoneVector<RegisterIndex>> float32_reg_code_to_index_;
base::Optional<ZoneVector<int>> index_to_float32_reg_code_;
base::Optional<ZoneVector<RegisterIndex>> simd128_reg_code_to_index_;
......@@ -1612,9 +1620,9 @@ SinglePassRegisterAllocator::SinglePassRegisterAllocator(
reg_code_to_index_[reg_code] = RegisterIndex(i);
}
// If the architecture has non-simple FP aliasing, initialize float and
// If the architecture has COMBINE FP aliasing, initialize float and
// simd128 specific register details.
if (!kSimpleFPAliasing && kind == RegisterKind::kDouble) {
if (kFPAliasing == AliasingKind::kCombine && kind == RegisterKind::kDouble) {
const RegisterConfiguration* config = data->config();
// Float registers.
......@@ -1784,15 +1792,17 @@ void SinglePassRegisterAllocator::MergeStateFrom(
processed_regs.Add(reg, rep);
bool reg_in_use = register_state_->IsAllocated(reg);
// For non-simple FP aliasing, the register is also "in use" if the
// For COMBINE FP aliasing, the register is also "in use" if the
// FP register for the upper half is allocated.
if (!kSimpleFPAliasing && rep == MachineRepresentation::kSimd128) {
if (kFPAliasing == AliasingKind::kCombine &&
rep == MachineRepresentation::kSimd128) {
reg_in_use |= register_state_->IsAllocated(simdSibling(reg));
}
// Similarly (but the other way around), the register might be the upper
// half of a SIMD register that is allocated.
if (!kSimpleFPAliasing && (rep == MachineRepresentation::kFloat64 ||
rep == MachineRepresentation::kFloat32)) {
if (kFPAliasing == AliasingKind::kCombine &&
(rep == MachineRepresentation::kFloat64 ||
rep == MachineRepresentation::kFloat32)) {
int simd_reg_code;
CHECK_EQ(1, data_->config()->GetAliases(
rep, ToRegCode(reg, rep),
......@@ -1881,7 +1891,8 @@ void SinglePassRegisterAllocator::SpillRegisterAtMerge(
reg_state->Spill(reg, allocated, current_block_, data_);
}
// Also spill the "simd sibling" register if we want to use {reg} for SIMD.
if (!kSimpleFPAliasing && rep == MachineRepresentation::kSimd128) {
if (kFPAliasing == AliasingKind::kCombine &&
rep == MachineRepresentation::kSimd128) {
RegisterIndex sibling = simdSibling(reg);
if (reg_state->IsAllocated(sibling)) {
int virtual_register = reg_state->VirtualRegisterForRegister(sibling);
......@@ -1893,8 +1904,9 @@ void SinglePassRegisterAllocator::SpillRegisterAtMerge(
}
}
// Similarly, spill the whole SIMD register if we want to use a part of it.
if (!kSimpleFPAliasing && (rep == MachineRepresentation::kFloat64 ||
rep == MachineRepresentation::kFloat32)) {
if (kFPAliasing == AliasingKind::kCombine &&
(rep == MachineRepresentation::kFloat64 ||
rep == MachineRepresentation::kFloat32)) {
int simd_reg_code;
CHECK_EQ(1, data_->config()->GetAliases(rep, ToRegCode(reg, rep),
MachineRepresentation::kSimd128,
......@@ -1980,7 +1992,8 @@ void SinglePassRegisterAllocator::CheckConsistency() {
RegisterIndex SinglePassRegisterAllocator::FromRegCode(
int reg_code, MachineRepresentation rep) const {
if (!kSimpleFPAliasing && kind() == RegisterKind::kDouble) {
if (kFPAliasing == AliasingKind::kCombine &&
kind() == RegisterKind::kDouble) {
if (rep == MachineRepresentation::kFloat32) {
return RegisterIndex(float32_reg_code_to_index_->at(reg_code));
} else if (rep == MachineRepresentation::kSimd128) {
......@@ -1994,7 +2007,8 @@ RegisterIndex SinglePassRegisterAllocator::FromRegCode(
int SinglePassRegisterAllocator::ToRegCode(RegisterIndex reg,
MachineRepresentation rep) const {
if (!kSimpleFPAliasing && kind() == RegisterKind::kDouble) {
if (kFPAliasing == AliasingKind::kCombine &&
kind() == RegisterKind::kDouble) {
if (rep == MachineRepresentation::kFloat32) {
DCHECK_NE(-1, index_to_float32_reg_code_->at(reg.ToInt()));
return index_to_float32_reg_code_->at(reg.ToInt());
......@@ -2129,7 +2143,8 @@ RegisterBitVector SinglePassRegisterAllocator::InUseBitmap(UsePosition pos) {
bool SinglePassRegisterAllocator::IsValidForRep(RegisterIndex reg,
MachineRepresentation rep) {
if (kSimpleFPAliasing || kind() == RegisterKind::kGeneral) {
if (kFPAliasing != AliasingKind::kCombine ||
kind() == RegisterKind::kGeneral) {
return true;
} else {
switch (rep) {
......@@ -2157,7 +2172,8 @@ RegisterIndex SinglePassRegisterAllocator::ChooseFreeRegister(
RegisterIndex SinglePassRegisterAllocator::ChooseFreeRegister(
const RegisterBitVector& allocated_regs, MachineRepresentation rep) {
RegisterIndex chosen_reg = RegisterIndex::Invalid();
if (kSimpleFPAliasing || kind() == RegisterKind::kGeneral) {
if (kFPAliasing != AliasingKind::kCombine ||
kind() == RegisterKind::kGeneral) {
chosen_reg = allocated_regs.GetFirstCleared(num_allocatable_registers_);
} else {
// If we don't have simple fp aliasing, we need to check each register
......@@ -2195,8 +2211,11 @@ RegisterIndex SinglePassRegisterAllocator::ChooseRegisterToSpill(
if (!IsValidForRep(reg, rep) || in_use.Contains(reg, rep)) continue;
// With non-simple FP aliasing, a SIMD register might block more than one FP
// register.
DCHECK_IMPLIES(kSimpleFPAliasing, register_state_->IsAllocated(reg));
if (!kSimpleFPAliasing && !register_state_->IsAllocated(reg)) continue;
DCHECK_IMPLIES(kFPAliasing != AliasingKind::kCombine,
register_state_->IsAllocated(reg));
if (kFPAliasing == AliasingKind::kCombine &&
!register_state_->IsAllocated(reg))
continue;
VirtualRegisterData& vreg_data =
VirtualRegisterDataFor(VirtualRegisterForRegister(reg));
......@@ -2245,7 +2264,8 @@ void SinglePassRegisterAllocator::SpillRegisterAndPotentialSimdSibling(
RegisterIndex reg, MachineRepresentation rep) {
SpillRegister(reg);
if (!kSimpleFPAliasing && rep == MachineRepresentation::kSimd128) {
if (kFPAliasing == AliasingKind::kCombine &&
rep == MachineRepresentation::kSimd128) {
SpillRegister(simdSibling(reg));
}
}
......@@ -2636,7 +2656,8 @@ void SinglePassRegisterAllocator::ReserveFixedRegister(
}
// Also potentially spill the "sibling SIMD register" on architectures where a
// SIMD register aliases two FP registers.
if (!kSimpleFPAliasing && rep == MachineRepresentation::kSimd128) {
if (kFPAliasing == AliasingKind::kCombine &&
rep == MachineRepresentation::kSimd128) {
if (register_state_->IsAllocated(simdSibling(reg)) &&
!DefinedAfter(virtual_register, instr_index, pos)) {
SpillRegister(simdSibling(reg));
......@@ -2644,8 +2665,9 @@ void SinglePassRegisterAllocator::ReserveFixedRegister(
}
// Similarly (but the other way around), spill a SIMD register that (partly)
// overlaps with a fixed FP register.
if (!kSimpleFPAliasing && (rep == MachineRepresentation::kFloat64 ||
rep == MachineRepresentation::kFloat32)) {
if (kFPAliasing == AliasingKind::kCombine &&
(rep == MachineRepresentation::kFloat64 ||
rep == MachineRepresentation::kFloat32)) {
int simd_reg_code;
CHECK_EQ(
1, data_->config()->GetAliases(
......
......@@ -38,7 +38,7 @@ class OperandSet {
void InsertOp(const InstructionOperand& op) {
set_->push_back(op);
if (!kSimpleFPAliasing && op.IsFPRegister())
if (kFPAliasing == AliasingKind::kCombine && op.IsFPRegister())
fp_reps_ |= RepresentationBit(LocationOperand::cast(op).representation());
}
......@@ -52,7 +52,7 @@ class OperandSet {
bool ContainsOpOrAlias(const InstructionOperand& op) const {
if (Contains(op)) return true;
if (!kSimpleFPAliasing && op.IsFPRegister()) {
if (kFPAliasing == AliasingKind::kCombine && op.IsFPRegister()) {
// Platforms where FP registers have complex aliasing need extra checks.
const LocationOperand& loc = LocationOperand::cast(op);
MachineRepresentation rep = loc.representation();
......
......@@ -12,7 +12,7 @@ namespace v8 {
namespace internal {
namespace compiler {
enum class RegisterKind { kGeneral, kDouble };
enum class RegisterKind { kGeneral, kDouble, kSimd128 };
inline int GetRegisterCount(const RegisterConfiguration* config,
RegisterKind kind) {
......@@ -21,6 +21,8 @@ inline int GetRegisterCount(const RegisterConfiguration* config,
return config->num_general_registers();
case RegisterKind::kDouble:
return config->num_double_registers();
case RegisterKind::kSimd128:
return config->num_simd128_registers();
}
}
......@@ -31,6 +33,8 @@ inline int GetAllocatableRegisterCount(const RegisterConfiguration* config,
return config->num_allocatable_general_registers();
case RegisterKind::kDouble:
return config->num_allocatable_double_registers();
case RegisterKind::kSimd128:
return config->num_allocatable_simd128_registers();
}
}
......@@ -41,6 +45,8 @@ inline const int* GetAllocatableRegisterCodes(
return config->allocatable_general_codes();
case RegisterKind::kDouble:
return config->allocatable_double_codes();
case RegisterKind::kSimd128:
return config->allocatable_simd128_codes();
}
}
......
This diff is collapsed.
......@@ -372,8 +372,10 @@ class TopTierRegisterAllocationData final : public RegisterAllocationData {
DelayedReferences delayed_references_;
BitVector* assigned_registers_;
BitVector* assigned_double_registers_;
BitVector* assigned_simd128_registers_;
BitVector* fixed_register_use_;
BitVector* fixed_fp_register_use_;
BitVector* fixed_simd128_register_use_;
int virtual_register_count_;
RangesWithPreassignedSlots preassigned_slot_ranges_;
ZoneVector<ZoneVector<LiveRange*>> spill_state_;
......@@ -1244,6 +1246,7 @@ class LiveRangeBuilder final : public ZoneObject {
TopLevelLiveRange* FixedLiveRangeFor(int index, SpillMode spill_mode);
TopLevelLiveRange* FixedFPLiveRangeFor(int index, MachineRepresentation rep,
SpillMode spill_mode);
TopLevelLiveRange* FixedSIMD128LiveRangeFor(int index, SpillMode spill_mode);
void MapPhiHint(InstructionOperand* operand, UsePosition* use_pos);
void ResolvePhiHint(InstructionOperand* operand, UsePosition* use_pos);
......@@ -1484,6 +1487,8 @@ class LinearScanAllocator final : public RegisterAllocator {
LiveRange* range, const base::Vector<LifetimePosition>& free_until_pos);
void GetFPRegisterSet(MachineRepresentation rep, int* num_regs,
int* num_codes, const int** codes) const;
void GetSIMD128RegisterSet(int* num_regs, int* num_codes,
const int** codes) const;
void FindFreeRegistersForRange(LiveRange* range,
base::Vector<LifetimePosition> free_until_pos);
void ProcessCurrentRange(LiveRange* current, SpillMode spill_mode);
......
......@@ -3073,7 +3073,7 @@ VISIT_SIMD_QFMOP(F32x4Qfms, kRiscvF32x4Qfms)
void InstructionSelector::VisitI32x4DotI16x8S(Node* node) {
RiscvOperandGenerator g(this);
InstructionOperand temp = g.TempFpRegister(v16);
InstructionOperand temp1 = g.TempFpRegister(v17);
InstructionOperand temp1 = g.TempFpRegister(v14);
InstructionOperand temp2 = g.TempFpRegister(v30);
InstructionOperand dst = g.DefineAsRegister(node);
this->Emit(kRiscvVwmul, temp, g.UseRegister(node->InputAt(0)),
......
......@@ -2262,6 +2262,17 @@ struct AllocateFPRegistersPhase {
}
};
template <typename RegAllocator>
struct AllocateSimd128RegistersPhase {
DECL_PIPELINE_PHASE_CONSTANTS(AllocateSIMD128Registers)
void Run(PipelineData* data, Zone* temp_zone) {
RegAllocator allocator(data->top_tier_register_allocation_data(),
RegisterKind::kSimd128, temp_zone);
allocator.AllocateRegisters();
}
};
struct DecideSpillingModePhase {
DECL_PIPELINE_PHASE_CONSTANTS(DecideSpillingMode)
......@@ -3734,6 +3745,11 @@ void PipelineImpl::AllocateRegistersForTopTier(
Run<AllocateFPRegistersPhase<LinearScanAllocator>>();
}
if (data->sequence()->HasSimd128VirtualRegisters() &&
(kFPAliasing == AliasingKind::kIndependent)) {
Run<AllocateSimd128RegistersPhase<LinearScanAllocator>>();
}
Run<DecideSpillingModePhase>();
Run<AssignSpillSlotsPhase>();
Run<CommitAssignmentPhase>();
......
......@@ -317,8 +317,8 @@ class RuntimeCallTimer final {
ADD_THREAD_SPECIFIC_COUNTER(V, Compile, ScopeAnalysis) \
ADD_THREAD_SPECIFIC_COUNTER(V, Compile, Script) \
ADD_THREAD_SPECIFIC_COUNTER(V, Compile, CompileTask) \
\
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, AllocateFPRegisters) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, AllocateSIMD128Registers) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, AllocateGeneralRegisters) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, AssembleCode) \
ADD_THREAD_SPECIFIC_COUNTER(V, Optimize, AssignSpillSlots) \
......
......@@ -17,7 +17,7 @@ namespace internal {
namespace wasm {
static constexpr bool kNeedI64RegPair = kSystemPointerSize == 4;
static constexpr bool kNeedS128RegPair = !kSimpleFPAliasing;
static constexpr bool kNeedS128RegPair = kFPAliasing == AliasingKind::kCombine;
enum RegClass : uint8_t {
kGpReg,
......@@ -190,7 +190,7 @@ class LiftoffRegister {
// LiftoffRegister.
static LiftoffRegister from_external_code(RegClass rc, ValueKind kind,
int code) {
if (!kSimpleFPAliasing && kind == kF32) {
if (kFPAliasing == AliasingKind::kCombine && kind == kF32) {
// Liftoff assumes a one-to-one mapping between float registers and
// double registers, and so does not distinguish between f32 and f64
// registers. The f32 register code must therefore be halved in order
......
......@@ -428,11 +428,6 @@
'test-cpu-profiler/CrossScriptInliningCallerLineNumbers2': [SKIP],
# SIMD not fully implemented yet.
'test-run-wasm-relaxed-simd/*': [SKIP],
'test-run-wasm-simd/RunWasm_F64x2ExtractLaneWithI64x2_liftoff': [SKIP],
'test-run-wasm-simd/RunWasm_I64x2ExtractWithF64x2_liftoff': [SKIP],
'test-run-wasm-simd-liftoff/*': [SKIP],
'test-run-wasm-simd/*':[SKIP],
'test-gc/RunWasmLiftoff_RefTrivialCasts': [SKIP],
'test-gc/RunWasmTurbofan_RefTrivialCasts': [SKIP],
......
......@@ -460,7 +460,7 @@ class TestEnvironment : public HandleAndZoneScope {
((kDoubleRegisterCount % 2) == 0) && ((kDoubleRegisterCount % 3) == 0),
"kDoubleRegisterCount should be a multiple of two and three.");
for (int i = 0; i < kDoubleRegisterCount; i += 2) {
if (kSimpleFPAliasing) {
if (kFPAliasing != AliasingKind::kCombine) {
// Allocate three registers at once if kSimd128 is supported, else
// allocate in pairs.
AddRegister(&test_signature, MachineRepresentation::kFloat32,
......
......@@ -17,7 +17,7 @@ const auto GetRegConfig = RegisterConfiguration::Default;
// simplify ParallelMove equivalence testing.
void GetCanonicalOperands(const InstructionOperand& op,
std::vector<InstructionOperand>* fragments) {
CHECK(!kSimpleFPAliasing);
CHECK_EQ(kFPAliasing, AliasingKind::kCombine);
CHECK(op.IsFPLocationOperand());
const LocationOperand& loc = LocationOperand::cast(op);
MachineRepresentation rep = loc.representation();
......@@ -51,7 +51,7 @@ class InterpreterState {
CHECK(!m->IsRedundant());
const InstructionOperand& src = m->source();
const InstructionOperand& dst = m->destination();
if (!kSimpleFPAliasing && src.IsFPLocationOperand() &&
if (kFPAliasing == AliasingKind::kCombine && src.IsFPLocationOperand() &&
dst.IsFPLocationOperand()) {
// Canonicalize FP location-location moves by fragmenting them into
// an equivalent sequence of float32 moves, to simplify state
......@@ -137,8 +137,15 @@ class InterpreterState {
// Preserve FP representation when FP register aliasing is complex.
// Otherwise, canonicalize to kFloat64.
if (IsFloatingPoint(loc_op.representation())) {
rep = kSimpleFPAliasing ? MachineRepresentation::kFloat64
: loc_op.representation();
if (kFPAliasing == AliasingKind::kIndependent) {
rep = IsSimd128(loc_op.representation())
? MachineRepresentation::kSimd128
: MachineRepresentation::kFloat64;
} else if (kFPAliasing == AliasingKind::kOverlap) {
rep = MachineRepresentation::kFloat64;
} else {
rep = loc_op.representation();
}
}
if (loc_op.IsAnyRegister()) {
index = loc_op.register_code();
......@@ -234,7 +241,8 @@ class ParallelMoveCreator : public HandleAndZoneScope {
// On architectures where FP register aliasing is non-simple, update the
// destinations set with the float equivalents of the operand and check
// that all destinations are unique and do not alias each other.
if (!kSimpleFPAliasing && mo.destination().IsFPLocationOperand()) {
if (kFPAliasing == AliasingKind::kCombine &&
mo.destination().IsFPLocationOperand()) {
std::vector<InstructionOperand> dst_fragments;
GetCanonicalOperands(dst, &dst_fragments);
CHECK(!dst_fragments.empty());
......@@ -383,7 +391,7 @@ void RunTest(ParallelMove* pm, Zone* zone) {
TEST(Aliasing) {
// On platforms with simple aliasing, these parallel moves are ill-formed.
if (kSimpleFPAliasing) return;
if (kFPAliasing != AliasingKind::kCombine) return;
ParallelMoveCreator pmc;
Zone* zone = pmc.main_zone();
......
......@@ -26,10 +26,10 @@ TEST_F(RegisterConfigurationUnitTest, BasicProperties) {
int general_codes[kNumAllocatableGeneralRegs] = {1, 2};
int double_codes[kNumAllocatableDoubleRegs] = {2, 3};
RegisterConfiguration test(kNumGeneralRegs, kNumDoubleRegs,
kNumAllocatableGeneralRegs,
kNumAllocatableDoubleRegs, general_codes,
double_codes, RegisterConfiguration::OVERLAP);
RegisterConfiguration test(AliasingKind::kOverlap, kNumGeneralRegs,
kNumDoubleRegs, 0, kNumAllocatableGeneralRegs,
kNumAllocatableDoubleRegs, 0, general_codes,
double_codes);
EXPECT_EQ(test.num_general_registers(), kNumGeneralRegs);
EXPECT_EQ(test.num_double_registers(), kNumDoubleRegs);
......@@ -62,10 +62,10 @@ TEST_F(RegisterConfigurationUnitTest, CombineAliasing) {
int general_codes[] = {1, 2};
int double_codes[] = {2, 3, 16}; // reg 16 should not alias registers 32, 33.
RegisterConfiguration test(kNumGeneralRegs, kNumDoubleRegs,
kNumAllocatableGeneralRegs,
kNumAllocatableDoubleRegs, general_codes,
double_codes, RegisterConfiguration::COMBINE);
RegisterConfiguration test(AliasingKind::kCombine, kNumGeneralRegs,
kNumDoubleRegs, 0, kNumAllocatableGeneralRegs,
kNumAllocatableDoubleRegs, 0, general_codes,
double_codes);
// There are 3 allocatable double regs, but only 2 can alias float regs.
EXPECT_EQ(test.num_allocatable_float_registers(), 4);
......
......@@ -24,6 +24,7 @@ InstructionSequenceTest::InstructionSequenceTest()
: sequence_(nullptr),
num_general_registers_(Register::kNumRegisters),
num_double_registers_(DoubleRegister::kNumRegisters),
num_simd128_registers_(Simd128Register::kNumRegisters),
instruction_blocks_(zone()),
current_block_(nullptr),
block_returns_(false) {}
......@@ -69,11 +70,10 @@ int InstructionSequenceTest::GetAllocatableCode(int index,
const RegisterConfiguration* InstructionSequenceTest::config() {
if (!config_) {
config_.reset(new RegisterConfiguration(
num_general_registers_, num_double_registers_, num_general_registers_,
num_double_registers_, kAllocatableCodes.data(),
kAllocatableCodes.data(),
kSimpleFPAliasing ? RegisterConfiguration::OVERLAP
: RegisterConfiguration::COMBINE));
kFPAliasing, num_general_registers_, num_double_registers_,
num_simd128_registers_, num_general_registers_, num_double_registers_,
num_simd128_registers_, kAllocatableCodes.data(),
kAllocatableCodes.data(), kAllocatableCodes.data()));
}
return config_.get();
}
......
......@@ -279,6 +279,7 @@ class InstructionSequenceTest : public TestWithIsolateAndZone {
InstructionSequence* sequence_;
int num_general_registers_;
int num_double_registers_;
int num_simd128_registers_;
// Block building state.
InstructionBlocks instruction_blocks_;
......
......@@ -85,7 +85,7 @@ TEST_F(InstructionTest, OperandInterference) {
EXPECT_TRUE(Interfere(LocationOperand::STACK_SLOT, kDouble, i, kDouble, i));
}
if (kSimpleFPAliasing) {
if (kFPAliasing != AliasingKind::kCombine) {
// Simple FP aliasing: interfering registers of different reps have the same
// index.
for (int i = 0; i < RegisterConfiguration::kMaxFPRegisters; ++i) {
......@@ -162,7 +162,7 @@ TEST_F(InstructionTest, PrepareInsertAfter) {
CHECK(Contains(&to_eliminate, d2, d0));
}
if (!kSimpleFPAliasing) {
if (kFPAliasing == AliasingKind::kCombine) {
// Moves inserted after should cause all interfering moves to be eliminated.
auto s0 = AllocatedOperand(LocationOperand::REGISTER,
MachineRepresentation::kFloat32, 0);
......
......@@ -360,7 +360,7 @@ TEST_F(MoveOptimizerTest, ClobberedFPDestinationsAreEliminated) {
EmitNop();
Instruction* first_instr = LastInstruction();
AddMove(first_instr, FPReg(4, kFloat64), FPReg(1, kFloat64));
if (!kSimpleFPAliasing) {
if (kFPAliasing == AliasingKind::kCombine) {
// We clobber q0 below. This is aliased by d0, d1, s0, s1, s2, and s3.
// Add moves to registers s2 and s3.
AddMove(first_instr, FPReg(10, kFloat32), FPReg(0, kFloat32));
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment