Commit 55b01ccb authored by bbudge's avatar bbudge Committed by Commit bot

[Turbofan] Revert FP register aliasing support on Arm.

- Changes register allocation to only use even numbered registers on Arm.
- Turns on float32 testing in test-gap-resolver.cc.

This is effectively a revert of:
https://codereview.chromium.org/2086653003/

LOG=N
BUG=V8:4124, V8:5202

Review-Url: https://codereview.chromium.org/2176173003
Cr-Commit-Position: refs/heads/master@{#38151}
parent c07c675e
......@@ -136,14 +136,25 @@ class ArmOperandConverter final : public InstructionOperandConverter {
FrameOffset offset = frame_access_state()->GetFrameOffset(slot);
return MemOperand(offset.from_stack_pointer() ? sp : fp, offset.offset());
}
};
FloatRegister InputFloat32Register(size_t index) {
return ToFloat32Register(instr_->InputAt(index));
}
FloatRegister OutputFloat32Register() {
return ToFloat32Register(instr_->Output());
}
FloatRegister ToFloat32Register(InstructionOperand* op) {
return LowDwVfpRegister::from_code(ToDoubleRegister(op).code()).low();
}
};
namespace {
class OutOfLineLoadFloat final : public OutOfLineCode {
class OutOfLineLoadFloat32 final : public OutOfLineCode {
public:
OutOfLineLoadFloat(CodeGenerator* gen, SwVfpRegister result)
OutOfLineLoadFloat32(CodeGenerator* gen, SwVfpRegister result)
: OutOfLineCode(gen), result_(result) {}
void Generate() final {
......@@ -1074,54 +1085,54 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
case kArmVcmpF32:
if (instr->InputAt(1)->IsFPRegister()) {
__ VFPCompareAndSetFlags(i.InputFloatRegister(0),
i.InputFloatRegister(1));
__ VFPCompareAndSetFlags(i.InputFloat32Register(0),
i.InputFloat32Register(1));
} else {
DCHECK(instr->InputAt(1)->IsImmediate());
// 0.0 is the only immediate supported by vcmp instructions.
DCHECK(i.InputFloat32(1) == 0.0f);
__ VFPCompareAndSetFlags(i.InputFloatRegister(0), i.InputFloat32(1));
__ VFPCompareAndSetFlags(i.InputFloat32Register(0), i.InputFloat32(1));
}
DCHECK_EQ(SetCC, i.OutputSBit());
break;
case kArmVaddF32:
__ vadd(i.OutputFloatRegister(), i.InputFloatRegister(0),
i.InputFloatRegister(1));
__ vadd(i.OutputFloat32Register(), i.InputFloat32Register(0),
i.InputFloat32Register(1));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmVsubF32:
__ vsub(i.OutputFloatRegister(), i.InputFloatRegister(0),
i.InputFloatRegister(1));
__ vsub(i.OutputFloat32Register(), i.InputFloat32Register(0),
i.InputFloat32Register(1));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmVmulF32:
__ vmul(i.OutputFloatRegister(), i.InputFloatRegister(0),
i.InputFloatRegister(1));
__ vmul(i.OutputFloat32Register(), i.InputFloat32Register(0),
i.InputFloat32Register(1));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmVmlaF32:
__ vmla(i.OutputFloatRegister(), i.InputFloatRegister(1),
i.InputFloatRegister(2));
__ vmla(i.OutputFloat32Register(), i.InputFloat32Register(1),
i.InputFloat32Register(2));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmVmlsF32:
__ vmls(i.OutputFloatRegister(), i.InputFloatRegister(1),
i.InputFloatRegister(2));
__ vmls(i.OutputFloat32Register(), i.InputFloat32Register(1),
i.InputFloat32Register(2));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmVdivF32:
__ vdiv(i.OutputFloatRegister(), i.InputFloatRegister(0),
i.InputFloatRegister(1));
__ vdiv(i.OutputFloat32Register(), i.InputFloat32Register(0),
i.InputFloat32Register(1));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmVsqrtF32:
__ vsqrt(i.OutputFloatRegister(), i.InputFloatRegister(0));
__ vsqrt(i.OutputFloat32Register(), i.InputFloat32Register(0));
break;
case kArmVabsF32:
__ vabs(i.OutputFloatRegister(), i.InputFloatRegister(0));
__ vabs(i.OutputFloat32Register(), i.InputFloat32Register(0));
break;
case kArmVnegF32:
__ vneg(i.OutputFloatRegister(), i.InputFloatRegister(0));
__ vneg(i.OutputFloat32Register(), i.InputFloat32Register(0));
break;
case kArmVcmpF64:
if (instr->InputAt(1)->IsFPRegister()) {
......@@ -1189,19 +1200,19 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vneg(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
case kArmVrintmF32:
__ vrintm(i.OutputFloatRegister(), i.InputFloatRegister(0));
__ vrintm(i.OutputFloat32Register(), i.InputFloat32Register(0));
break;
case kArmVrintmF64:
__ vrintm(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
case kArmVrintpF32:
__ vrintp(i.OutputFloatRegister(), i.InputFloatRegister(0));
__ vrintp(i.OutputFloat32Register(), i.InputFloat32Register(0));
break;
case kArmVrintpF64:
__ vrintp(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
case kArmVrintzF32:
__ vrintz(i.OutputFloatRegister(), i.InputFloatRegister(0));
__ vrintz(i.OutputFloat32Register(), i.InputFloat32Register(0));
break;
case kArmVrintzF64:
__ vrintz(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
......@@ -1210,32 +1221,32 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
__ vrinta(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
case kArmVrintnF32:
__ vrintn(i.OutputFloatRegister(), i.InputFloatRegister(0));
__ vrintn(i.OutputFloat32Register(), i.InputFloat32Register(0));
break;
case kArmVrintnF64:
__ vrintn(i.OutputDoubleRegister(), i.InputDoubleRegister(0));
break;
case kArmVcvtF32F64: {
__ vcvt_f32_f64(i.OutputFloatRegister(), i.InputDoubleRegister(0));
__ vcvt_f32_f64(i.OutputFloat32Register(), i.InputDoubleRegister(0));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
case kArmVcvtF64F32: {
__ vcvt_f64_f32(i.OutputDoubleRegister(), i.InputFloatRegister(0));
__ vcvt_f64_f32(i.OutputDoubleRegister(), i.InputFloat32Register(0));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
case kArmVcvtF32S32: {
SwVfpRegister scratch = kScratchDoubleReg.low();
__ vmov(scratch, i.InputRegister(0));
__ vcvt_f32_s32(i.OutputFloatRegister(), scratch);
__ vcvt_f32_s32(i.OutputFloat32Register(), scratch);
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
case kArmVcvtF32U32: {
SwVfpRegister scratch = kScratchDoubleReg.low();
__ vmov(scratch, i.InputRegister(0));
__ vcvt_f32_u32(i.OutputFloatRegister(), scratch);
__ vcvt_f32_u32(i.OutputFloat32Register(), scratch);
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
......@@ -1255,7 +1266,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArmVcvtS32F32: {
SwVfpRegister scratch = kScratchDoubleReg.low();
__ vcvt_s32_f32(scratch, i.InputFloatRegister(0));
__ vcvt_s32_f32(scratch, i.InputFloat32Register(0));
__ vmov(i.OutputRegister(), scratch);
// Avoid INT32_MAX as an overflow indicator and use INT32_MIN instead,
// because INT32_MIN allows easier out-of-bounds detection.
......@@ -1266,7 +1277,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
}
case kArmVcvtU32F32: {
SwVfpRegister scratch = kScratchDoubleReg.low();
__ vcvt_u32_f32(scratch, i.InputFloatRegister(0));
__ vcvt_u32_f32(scratch, i.InputFloat32Register(0));
__ vmov(i.OutputRegister(), scratch);
// Avoid UINT32_MAX as an overflow indicator and use 0 instead,
// because 0 allows easier out-of-bounds detection.
......@@ -1290,11 +1301,11 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
break;
}
case kArmVmovU32F32:
__ vmov(i.OutputRegister(), i.InputFloatRegister(0));
__ vmov(i.OutputRegister(), i.InputFloat32Register(0));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmVmovF32U32:
__ vmov(i.OutputFloatRegister(), i.InputRegister(0));
__ vmov(i.OutputFloat32Register(), i.InputRegister(0));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmVmovLowU32F64:
......@@ -1352,12 +1363,12 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmVldrF32: {
__ vldr(i.OutputFloatRegister(), i.InputOffset());
__ vldr(i.OutputFloat32Register(), i.InputOffset());
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
}
case kArmVstrF32:
__ vstr(i.InputFloatRegister(0), i.InputOffset(1));
__ vstr(i.InputFloat32Register(0), i.InputOffset(1));
DCHECK_EQ(LeaveCC, i.OutputSBit());
break;
case kArmVldrF64:
......@@ -1453,7 +1464,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
frame_access_state()->IncreaseSPDelta(kDoubleSize / kPointerSize);
} else {
DCHECK_EQ(MachineRepresentation::kFloat32, op->representation());
__ vpush(i.InputFloatRegister(0));
__ vpush(i.InputFloat32Register(0));
frame_access_state()->IncreaseSPDelta(1);
}
} else {
......@@ -1484,7 +1495,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_CHECKED_LOAD_INTEGER(ldr);
break;
case kCheckedLoadFloat32:
ASSEMBLE_CHECKED_LOAD_FP(Float);
ASSEMBLE_CHECKED_LOAD_FP(Float32);
break;
case kCheckedLoadFloat64:
ASSEMBLE_CHECKED_LOAD_FP(Double);
......@@ -1499,7 +1510,7 @@ CodeGenerator::CodeGenResult CodeGenerator::AssembleArchInstruction(
ASSEMBLE_CHECKED_STORE_INTEGER(str);
break;
case kCheckedStoreFloat32:
ASSEMBLE_CHECKED_STORE_FP(Float);
ASSEMBLE_CHECKED_STORE_FP(Float32);
break;
case kCheckedStoreFloat64:
ASSEMBLE_CHECKED_STORE_FP(Double);
......@@ -1817,7 +1828,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
__ mov(ip, Operand(bit_cast<int32_t>(src.ToFloat32())));
__ str(ip, dst);
} else {
SwVfpRegister dst = g.ToFloatRegister(destination);
SwVfpRegister dst = g.ToFloat32Register(destination);
__ vmov(dst, src.ToFloat32());
}
} else {
......@@ -1831,50 +1842,23 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
}
}
} else if (source->IsFPRegister()) {
MachineRepresentation rep = LocationOperand::cast(source)->representation();
if (rep == MachineRepresentation::kFloat64) {
DwVfpRegister src = g.ToDoubleRegister(source);
if (destination->IsFPRegister()) {
DwVfpRegister dst = g.ToDoubleRegister(destination);
__ Move(dst, src);
} else {
DCHECK(destination->IsFPStackSlot());
__ vstr(src, g.ToMemOperand(destination));
}
DwVfpRegister src = g.ToDoubleRegister(source);
if (destination->IsFPRegister()) {
DwVfpRegister dst = g.ToDoubleRegister(destination);
__ Move(dst, src);
} else {
DCHECK_EQ(MachineRepresentation::kFloat32, rep);
SwVfpRegister src = g.ToFloatRegister(source);
if (destination->IsFPRegister()) {
SwVfpRegister dst = g.ToFloatRegister(destination);
__ Move(dst, src);
} else {
DCHECK(destination->IsFPStackSlot());
__ vstr(src, g.ToMemOperand(destination));
}
DCHECK(destination->IsFPStackSlot());
__ vstr(src, g.ToMemOperand(destination));
}
} else if (source->IsFPStackSlot()) {
MemOperand src = g.ToMemOperand(source);
MachineRepresentation rep =
LocationOperand::cast(destination)->representation();
if (destination->IsFPRegister()) {
if (rep == MachineRepresentation::kFloat64) {
__ vldr(g.ToDoubleRegister(destination), src);
} else {
DCHECK_EQ(MachineRepresentation::kFloat32, rep);
__ vldr(g.ToFloatRegister(destination), src);
}
} else {
DCHECK(destination->IsFPStackSlot());
if (rep == MachineRepresentation::kFloat64) {
DwVfpRegister temp = kScratchDoubleReg;
__ vldr(temp, src);
__ vstr(temp, g.ToMemOperand(destination));
} else {
DCHECK_EQ(MachineRepresentation::kFloat32, rep);
SwVfpRegister temp = kScratchDoubleReg.low();
__ vldr(temp, src);
__ vstr(temp, g.ToMemOperand(destination));
}
}
} else {
UNREACHABLE();
......@@ -1914,9 +1898,7 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
__ str(temp_0, dst);
__ vstr(temp_1, src);
} else if (source->IsFPRegister()) {
MachineRepresentation rep = LocationOperand::cast(source)->representation();
LowDwVfpRegister temp = kScratchDoubleReg;
if (rep == MachineRepresentation::kFloat64) {
DwVfpRegister src = g.ToDoubleRegister(source);
if (destination->IsFPRegister()) {
DwVfpRegister dst = g.ToDoubleRegister(destination);
......@@ -1930,30 +1912,12 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
__ vldr(src, dst);
__ vstr(temp, dst);
}
} else {
DCHECK_EQ(MachineRepresentation::kFloat32, rep);
SwVfpRegister src = g.ToFloatRegister(source);
if (destination->IsFPRegister()) {
SwVfpRegister dst = g.ToFloatRegister(destination);
__ Move(temp.low(), src);
__ Move(src, dst);
__ Move(dst, temp.low());
} else {
DCHECK(destination->IsFPStackSlot());
MemOperand dst = g.ToMemOperand(destination);
__ Move(temp.low(), src);
__ vldr(src, dst);
__ vstr(temp.low(), dst);
}
}
} else if (source->IsFPStackSlot()) {
DCHECK(destination->IsFPStackSlot());
Register temp_0 = kScratchReg;
LowDwVfpRegister temp_1 = kScratchDoubleReg;
MemOperand src0 = g.ToMemOperand(source);
MemOperand dst0 = g.ToMemOperand(destination);
MachineRepresentation rep = LocationOperand::cast(source)->representation();
if (rep == MachineRepresentation::kFloat64) {
MemOperand src1(src0.rn(), src0.offset() + kPointerSize);
MemOperand dst1(dst0.rn(), dst0.offset() + kPointerSize);
__ vldr(temp_1, dst0); // Save destination in temp_1.
......@@ -1962,13 +1926,6 @@ void CodeGenerator::AssembleSwap(InstructionOperand* source,
__ ldr(temp_0, src1);
__ str(temp_0, dst1);
__ vstr(temp_1, src0);
} else {
DCHECK_EQ(MachineRepresentation::kFloat32, rep);
__ vldr(temp_1.low(), dst0); // Save destination in temp_1.
__ ldr(temp_0, src0); // Then use temp_0 to copy source to destination.
__ str(temp_0, dst0);
__ vstr(temp_1.low(), src0);
}
} else {
// No other combinations are possible.
UNREACHABLE();
......
......@@ -65,14 +65,7 @@ FlagsCondition CommuteFlagsCondition(FlagsCondition condition) {
}
bool InstructionOperand::InterferesWith(const InstructionOperand& that) const {
if (!IsFPRegister() || !that.IsFPRegister() || kSimpleFPAliasing)
return EqualsCanonicalized(that);
// Both operands are fp registers and aliasing is non-simple.
const LocationOperand& loc1 = *LocationOperand::cast(this);
const LocationOperand& loc2 = LocationOperand::cast(that);
return GetRegConfig()->AreAliases(loc1.representation(), loc1.register_code(),
loc2.representation(),
loc2.register_code());
return EqualsCanonicalized(that);
}
void InstructionOperand::Print(const RegisterConfiguration* config) const {
......
......@@ -607,14 +607,8 @@ uint64_t InstructionOperand::GetCanonicalizedValue() const {
if (IsAllocated() || IsExplicit()) {
MachineRepresentation canonical = MachineRepresentation::kNone;
if (IsFPRegister()) {
if (kSimpleFPAliasing) {
// We treat all FP register operands the same for simple aliasing.
canonical = MachineRepresentation::kFloat64;
} else {
// We need to distinguish FP register operands of different reps when
// aliasing is not simple (e.g. ARM).
canonical = LocationOperand::cast(this)->representation();
}
// We treat all FP register operands the same for simple aliasing.
canonical = MachineRepresentation::kFloat64;
}
return InstructionOperand::KindField::update(
LocationOperand::RepresentationField::update(this->value_, canonical),
......
......@@ -28,48 +28,20 @@ typedef ZoneMap<MoveKey, unsigned, MoveKeyCompare> MoveMap;
typedef ZoneSet<InstructionOperand, CompareOperandModuloType> OperandSet;
bool Blocks(const OperandSet& set, const InstructionOperand& operand) {
if (set.find(operand) != set.end()) return true;
// Only FP registers on archs with non-simple aliasing need extra checks.
if (!operand.IsFPRegister() || kSimpleFPAliasing) return false;
if (!operand.IsFPRegister()) return set.find(operand) != set.end();
// Check operand against operands of other FP types for interference.
const LocationOperand& loc = LocationOperand::cast(operand);
MachineRepresentation rep = loc.representation();
MachineRepresentation other_rep1, other_rep2;
switch (rep) {
case MachineRepresentation::kFloat32:
other_rep1 = MachineRepresentation::kFloat64;
other_rep2 = MachineRepresentation::kSimd128;
break;
case MachineRepresentation::kFloat64:
other_rep1 = MachineRepresentation::kFloat32;
other_rep2 = MachineRepresentation::kSimd128;
break;
case MachineRepresentation::kSimd128:
other_rep1 = MachineRepresentation::kFloat32;
other_rep2 = MachineRepresentation::kFloat64;
break;
default:
UNREACHABLE();
break;
}
const RegisterConfiguration* config = RegisterConfiguration::Turbofan();
int base = -1;
int aliases = config->GetAliases(rep, loc.register_code(), other_rep1, &base);
DCHECK(aliases > 0 || (aliases == 0 && base == -1));
while (aliases--) {
if (set.find(LocationOperand(loc.kind(), loc.location_kind(), other_rep1,
base + aliases)) != set.end())
return true;
}
aliases = config->GetAliases(rep, loc.register_code(), other_rep2, &base);
DCHECK(aliases > 0 || (aliases == 0 && base == -1));
while (aliases--) {
if (set.find(LocationOperand(loc.kind(), loc.location_kind(), other_rep2,
base + aliases)) != set.end())
return true;
}
return false;
if (loc.representation() == MachineRepresentation::kFloat64) {
return set.find(operand) != set.end() ||
set.find(LocationOperand(loc.kind(), loc.location_kind(),
MachineRepresentation::kFloat32,
loc.register_code())) != set.end();
}
DCHECK_EQ(MachineRepresentation::kFloat32, loc.representation());
return set.find(operand) != set.end() ||
set.find(LocationOperand(loc.kind(), loc.location_kind(),
MachineRepresentation::kFloat64,
loc.register_code())) != set.end();
}
int FindFirstNonEmptySlot(const Instruction* instr) {
......
......@@ -33,7 +33,7 @@ int GetRegisterCount(const RegisterConfiguration* cfg, RegisterKind kind) {
int GetAllocatableRegisterCount(const RegisterConfiguration* cfg,
RegisterKind kind) {
return kind == FP_REGISTERS ? cfg->num_allocatable_double_registers()
return kind == FP_REGISTERS ? cfg->num_allocatable_aliased_double_registers()
: cfg->num_allocatable_general_registers();
}
......@@ -72,13 +72,19 @@ int GetByteWidth(MachineRepresentation rep) {
case MachineRepresentation::kWord16:
case MachineRepresentation::kWord32:
case MachineRepresentation::kTagged:
return kPointerSize;
case MachineRepresentation::kFloat32:
// TODO(bbudge) Eliminate this when FP register aliasing works.
#if V8_TARGET_ARCH_ARM
return kDoubleSize;
#else
return kPointerSize;
#endif
case MachineRepresentation::kWord64:
case MachineRepresentation::kFloat64:
return 8;
return kDoubleSize;
case MachineRepresentation::kSimd128:
return 16;
return kSimd128Size;
case MachineRepresentation::kNone:
break;
}
......@@ -1335,12 +1341,8 @@ RegisterAllocationData::RegisterAllocationData(
allocation_zone()),
fixed_live_ranges_(this->config()->num_general_registers(), nullptr,
allocation_zone()),
fixed_float_live_ranges_(this->config()->num_float_registers(), nullptr,
allocation_zone()),
fixed_double_live_ranges_(this->config()->num_double_registers(), nullptr,
allocation_zone()),
fixed_simd128_live_ranges_(this->config()->num_simd128_registers(),
nullptr, allocation_zone()),
spill_ranges_(code->VirtualRegisterCount(), nullptr, allocation_zone()),
delayed_references_(allocation_zone()),
assigned_registers_(nullptr),
......@@ -1518,21 +1520,8 @@ void RegisterAllocationData::MarkAllocated(MachineRepresentation rep,
int index) {
switch (rep) {
case MachineRepresentation::kFloat32:
case MachineRepresentation::kSimd128:
if (kSimpleFPAliasing) {
assigned_double_registers_->Add(index);
} else {
int alias_base_index = -1;
int aliases = config()->GetAliases(
rep, index, MachineRepresentation::kFloat64, &alias_base_index);
DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1));
while (aliases--) {
int aliased_reg = alias_base_index + aliases;
assigned_double_registers_->Add(aliased_reg);
}
}
break;
case MachineRepresentation::kFloat64:
case MachineRepresentation::kSimd128:
assigned_double_registers_->Add(index);
break;
default:
......@@ -1859,11 +1848,7 @@ int LiveRangeBuilder::FixedFPLiveRangeID(int index, MachineRepresentation rep) {
int result = -index - 1;
switch (rep) {
case MachineRepresentation::kSimd128:
result -= config()->num_float_registers();
// Fall through.
case MachineRepresentation::kFloat32:
result -= config()->num_double_registers();
// Fall through.
case MachineRepresentation::kFloat64:
result -= config()->num_general_registers();
break;
......@@ -1893,18 +1878,8 @@ TopLevelLiveRange* LiveRangeBuilder::FixedFPLiveRangeFor(
TopLevelLiveRange* result = nullptr;
switch (rep) {
case MachineRepresentation::kFloat32:
DCHECK(rep == MachineRepresentation::kFloat32);
DCHECK(index < config()->num_float_registers());
result = data()->fixed_float_live_ranges()[index];
if (result == nullptr) {
result = data()->NewLiveRange(FixedFPLiveRangeID(index, rep), rep);
DCHECK(result->IsFixed());
result->set_assigned_register(index);
data()->MarkAllocated(rep, index);
data()->fixed_float_live_ranges()[index] = result;
}
break;
case MachineRepresentation::kFloat64:
case MachineRepresentation::kSimd128:
DCHECK(index < config()->num_double_registers());
result = data()->fixed_double_live_ranges()[index];
if (result == nullptr) {
......@@ -1915,17 +1890,6 @@ TopLevelLiveRange* LiveRangeBuilder::FixedFPLiveRangeFor(
data()->fixed_double_live_ranges()[index] = result;
}
break;
case MachineRepresentation::kSimd128:
DCHECK(index < config()->num_simd128_registers());
result = data()->fixed_simd128_live_ranges()[index];
if (result == nullptr) {
result = data()->NewLiveRange(FixedFPLiveRangeID(index, rep), rep);
DCHECK(result->IsFixed());
result->set_assigned_register(index);
data()->MarkAllocated(rep, index);
data()->fixed_simd128_live_ranges()[index] = result;
}
break;
default:
UNREACHABLE();
break;
......@@ -2052,7 +2016,8 @@ void LiveRangeBuilder::ProcessInstructions(const InstructionBlock* block,
}
if (instr->ClobbersDoubleRegisters()) {
for (int i = 0; i < config()->num_allocatable_double_registers(); ++i) {
for (int i = 0; i < config()->num_allocatable_aliased_double_registers();
++i) {
// Add a UseInterval for all DoubleRegisters. See comment above for
// general registers.
int code = config()->GetAllocatableDoubleCode(i);
......@@ -2061,26 +2026,6 @@ void LiveRangeBuilder::ProcessInstructions(const InstructionBlock* block,
range->AddUseInterval(curr_position, curr_position.End(),
allocation_zone());
}
// Preserve fixed float registers on archs with non-simple aliasing.
if (!kSimpleFPAliasing) {
for (int i = 0; i < config()->num_allocatable_float_registers(); ++i) {
// Add a UseInterval for all FloatRegisters. See comment above for
// general registers.
int code = config()->GetAllocatableFloatCode(i);
TopLevelLiveRange* range =
FixedFPLiveRangeFor(code, MachineRepresentation::kFloat32);
range->AddUseInterval(curr_position, curr_position.End(),
allocation_zone());
}
for (int i = 0; i < config()->num_allocatable_simd128_registers();
++i) {
int code = config()->GetAllocatableSimd128Code(i);
TopLevelLiveRange* range =
FixedFPLiveRangeFor(code, MachineRepresentation::kSimd128);
range->AddUseInterval(curr_position, curr_position.End(),
allocation_zone());
}
}
}
for (size_t i = 0; i < instr->InputCount(); i++) {
......@@ -2644,15 +2589,9 @@ void LinearScanAllocator::AllocateRegisters() {
if (current != nullptr) AddToInactive(current);
}
} else {
for (TopLevelLiveRange* current : data()->fixed_float_live_ranges()) {
if (current != nullptr) AddToInactive(current);
}
for (TopLevelLiveRange* current : data()->fixed_double_live_ranges()) {
if (current != nullptr) AddToInactive(current);
}
for (TopLevelLiveRange* current : data()->fixed_simd128_live_ranges()) {
if (current != nullptr) AddToInactive(current);
}
}
while (!unhandled_live_ranges().empty()) {
......@@ -2819,21 +2758,9 @@ void LinearScanAllocator::InactiveToActive(LiveRange* range) {
bool LinearScanAllocator::TryAllocateFreeReg(LiveRange* current) {
MachineRepresentation rep = current->representation();
int num_regs = num_registers();
int num_codes = num_allocatable_registers();
const int* codes = allocatable_register_codes();
if (!kSimpleFPAliasing) {
if (rep == MachineRepresentation::kFloat32) {
num_regs = data()->config()->num_float_registers();
num_codes = data()->config()->num_allocatable_float_registers();
codes = data()->config()->allocatable_float_codes();
} else if (rep == MachineRepresentation::kSimd128) {
num_regs = data()->config()->num_simd128_registers();
num_codes = data()->config()->num_allocatable_simd128_registers();
codes = data()->config()->allocatable_simd128_codes();
}
}
LifetimePosition free_until_pos[RegisterConfiguration::kMaxFPRegisters];
for (int i = 0; i < num_regs; i++) {
......@@ -2842,21 +2769,9 @@ bool LinearScanAllocator::TryAllocateFreeReg(LiveRange* current) {
for (LiveRange* cur_active : active_live_ranges()) {
int cur_reg = cur_active->assigned_register();
if (kSimpleFPAliasing || mode() == GENERAL_REGISTERS) {
free_until_pos[cur_reg] = LifetimePosition::GapFromInstructionIndex(0);
TRACE("Register %s is free until pos %d (1)\n", RegisterName(cur_reg),
LifetimePosition::GapFromInstructionIndex(0).value());
} else {
int alias_base_index = -1;
int aliases = data()->config()->GetAliases(
cur_active->representation(), cur_reg, rep, &alias_base_index);
DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1));
while (aliases--) {
int aliased_reg = alias_base_index + aliases;
free_until_pos[aliased_reg] =
LifetimePosition::GapFromInstructionIndex(0);
}
}
free_until_pos[cur_reg] = LifetimePosition::GapFromInstructionIndex(0);
TRACE("Register %s is free until pos %d (1)\n", RegisterName(cur_reg),
LifetimePosition::GapFromInstructionIndex(0).value());
}
for (LiveRange* cur_inactive : inactive_live_ranges()) {
......@@ -2865,21 +2780,9 @@ bool LinearScanAllocator::TryAllocateFreeReg(LiveRange* current) {
cur_inactive->FirstIntersection(current);
if (!next_intersection.IsValid()) continue;
int cur_reg = cur_inactive->assigned_register();
if (kSimpleFPAliasing || mode() == GENERAL_REGISTERS) {
free_until_pos[cur_reg] = Min(free_until_pos[cur_reg], next_intersection);
TRACE("Register %s is free until pos %d (2)\n", RegisterName(cur_reg),
Min(free_until_pos[cur_reg], next_intersection).value());
} else {
int alias_base_index = -1;
int aliases = data()->config()->GetAliases(
cur_inactive->representation(), cur_reg, rep, &alias_base_index);
DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1));
while (aliases--) {
int aliased_reg = alias_base_index + aliases;
free_until_pos[aliased_reg] =
Min(free_until_pos[aliased_reg], next_intersection);
}
}
free_until_pos[cur_reg] = Min(free_until_pos[cur_reg], next_intersection);
TRACE("Register %s is free until pos %d (2)\n", RegisterName(cur_reg),
Min(free_until_pos[cur_reg], next_intersection).value());
}
int hint_register;
......@@ -2943,21 +2846,9 @@ void LinearScanAllocator::AllocateBlockedReg(LiveRange* current) {
return;
}
MachineRepresentation rep = current->representation();
int num_regs = num_registers();
int num_codes = num_allocatable_registers();
const int* codes = allocatable_register_codes();
if (!kSimpleFPAliasing) {
if (rep == MachineRepresentation::kFloat32) {
num_regs = data()->config()->num_float_registers();
num_codes = data()->config()->num_allocatable_float_registers();
codes = data()->config()->allocatable_float_codes();
} else if (rep == MachineRepresentation::kSimd128) {
num_regs = data()->config()->num_simd128_registers();
num_codes = data()->config()->num_allocatable_simd128_registers();
codes = data()->config()->allocatable_simd128_codes();
}
}
LifetimePosition use_pos[RegisterConfiguration::kMaxFPRegisters];
LifetimePosition block_pos[RegisterConfiguration::kMaxFPRegisters];
......@@ -2969,38 +2860,16 @@ void LinearScanAllocator::AllocateBlockedReg(LiveRange* current) {
int cur_reg = range->assigned_register();
bool is_fixed_or_cant_spill =
range->TopLevel()->IsFixed() || !range->CanBeSpilled(current->Start());
if (kSimpleFPAliasing || mode() == GENERAL_REGISTERS) {
if (is_fixed_or_cant_spill) {
block_pos[cur_reg] = use_pos[cur_reg] =
LifetimePosition::GapFromInstructionIndex(0);
} else {
UsePosition* next_use =
range->NextUsePositionRegisterIsBeneficial(current->Start());
if (next_use == nullptr) {
use_pos[cur_reg] = range->End();
} else {
use_pos[cur_reg] = next_use->pos();
}
}
if (is_fixed_or_cant_spill) {
block_pos[cur_reg] = use_pos[cur_reg] =
LifetimePosition::GapFromInstructionIndex(0);
} else {
int alias_base_index = -1;
int aliases = data()->config()->GetAliases(
range->representation(), cur_reg, rep, &alias_base_index);
DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1));
while (aliases--) {
int aliased_reg = alias_base_index + aliases;
if (is_fixed_or_cant_spill) {
block_pos[aliased_reg] = use_pos[aliased_reg] =
LifetimePosition::GapFromInstructionIndex(0);
} else {
UsePosition* next_use =
range->NextUsePositionRegisterIsBeneficial(current->Start());
if (next_use == nullptr) {
use_pos[aliased_reg] = range->End();
} else {
use_pos[aliased_reg] = next_use->pos();
}
}
UsePosition* next_use =
range->NextUsePositionRegisterIsBeneficial(current->Start());
if (next_use == nullptr) {
use_pos[cur_reg] = range->End();
} else {
use_pos[cur_reg] = next_use->pos();
}
}
}
......@@ -3011,29 +2880,11 @@ void LinearScanAllocator::AllocateBlockedReg(LiveRange* current) {
if (!next_intersection.IsValid()) continue;
int cur_reg = range->assigned_register();
bool is_fixed = range->TopLevel()->IsFixed();
if (kSimpleFPAliasing || mode() == GENERAL_REGISTERS) {
if (is_fixed) {
block_pos[cur_reg] = Min(block_pos[cur_reg], next_intersection);
use_pos[cur_reg] = Min(block_pos[cur_reg], use_pos[cur_reg]);
} else {
use_pos[cur_reg] = Min(use_pos[cur_reg], next_intersection);
}
if (is_fixed) {
block_pos[cur_reg] = Min(block_pos[cur_reg], next_intersection);
use_pos[cur_reg] = Min(block_pos[cur_reg], use_pos[cur_reg]);
} else {
int alias_base_index = -1;
int aliases = data()->config()->GetAliases(
range->representation(), cur_reg, rep, &alias_base_index);
DCHECK(aliases > 0 || (aliases == 0 && alias_base_index == -1));
while (aliases--) {
int aliased_reg = alias_base_index + aliases;
if (is_fixed) {
block_pos[aliased_reg] =
Min(block_pos[aliased_reg], next_intersection);
use_pos[aliased_reg] =
Min(block_pos[aliased_reg], use_pos[aliased_reg]);
} else {
use_pos[aliased_reg] = Min(use_pos[aliased_reg], next_intersection);
}
}
use_pos[cur_reg] = Min(use_pos[cur_reg], next_intersection);
}
}
......@@ -3085,15 +2936,7 @@ void LinearScanAllocator::SplitAndSpillIntersecting(LiveRange* current) {
LifetimePosition split_pos = current->Start();
for (size_t i = 0; i < active_live_ranges().size(); ++i) {
LiveRange* range = active_live_ranges()[i];
if (kSimpleFPAliasing || mode() == GENERAL_REGISTERS) {
if (range->assigned_register() != reg) continue;
} else {
if (!data()->config()->AreAliases(current->representation(), reg,
range->representation(),
range->assigned_register())) {
continue;
}
}
if (range->assigned_register() != reg) continue;
UsePosition* next_pos = range->NextRegisterPosition(current->Start());
LifetimePosition spill_pos = FindOptimalSpillingPos(range, split_pos);
......@@ -3120,14 +2963,7 @@ void LinearScanAllocator::SplitAndSpillIntersecting(LiveRange* current) {
LiveRange* range = inactive_live_ranges()[i];
DCHECK(range->End() > current->Start());
if (range->TopLevel()->IsFixed()) continue;
if (kSimpleFPAliasing || mode() == GENERAL_REGISTERS) {
if (range->assigned_register() != reg) continue;
} else {
if (!data()->config()->AreAliases(current->representation(), reg,
range->representation(),
range->assigned_register()))
continue;
}
if (range->assigned_register() != reg) continue;
LifetimePosition next_intersection = range->FirstIntersection(current);
if (next_intersection.IsValid()) {
......
......@@ -766,24 +766,12 @@ class RegisterAllocationData final : public ZoneObject {
ZoneVector<TopLevelLiveRange*>& fixed_live_ranges() {
return fixed_live_ranges_;
}
ZoneVector<TopLevelLiveRange*>& fixed_float_live_ranges() {
return fixed_float_live_ranges_;
}
const ZoneVector<TopLevelLiveRange*>& fixed_float_live_ranges() const {
return fixed_float_live_ranges_;
}
ZoneVector<TopLevelLiveRange*>& fixed_double_live_ranges() {
return fixed_double_live_ranges_;
}
const ZoneVector<TopLevelLiveRange*>& fixed_double_live_ranges() const {
return fixed_double_live_ranges_;
}
ZoneVector<TopLevelLiveRange*>& fixed_simd128_live_ranges() {
return fixed_simd128_live_ranges_;
}
const ZoneVector<TopLevelLiveRange*>& fixed_simd128_live_ranges() const {
return fixed_simd128_live_ranges_;
}
ZoneVector<BitVector*>& live_in_sets() { return live_in_sets_; }
ZoneVector<BitVector*>& live_out_sets() { return live_out_sets_; }
ZoneVector<SpillRange*>& spill_ranges() { return spill_ranges_; }
......@@ -845,9 +833,7 @@ class RegisterAllocationData final : public ZoneObject {
ZoneVector<BitVector*> live_out_sets_;
ZoneVector<TopLevelLiveRange*> live_ranges_;
ZoneVector<TopLevelLiveRange*> fixed_live_ranges_;
ZoneVector<TopLevelLiveRange*> fixed_float_live_ranges_;
ZoneVector<TopLevelLiveRange*> fixed_double_live_ranges_;
ZoneVector<TopLevelLiveRange*> fixed_simd128_live_ranges_;
ZoneVector<SpillRange*> spill_ranges_;
DelayedReferences delayed_references_;
BitVector* assigned_registers_;
......
......@@ -177,17 +177,6 @@ struct Allocator {
// Allocate a floating point register/stack location.
if (fp_offset < fp_count) {
DoubleRegister reg = fp_regs[fp_offset++];
#if V8_TARGET_ARCH_ARM
// Allocate floats using a double register, but modify the code to
// reflect how ARM FP registers alias.
// TODO(bbudge) Modify wasm linkage to allow use of all float regs.
if (type == kAstF32) {
int float_reg_code = reg.code() * 2;
DCHECK(float_reg_code < RegisterConfiguration::kMaxFPRegisters);
return regloc(DoubleRegister::from_code(float_reg_code),
MachineTypeFor(type));
}
#endif
return regloc(reg, MachineTypeFor(type));
} else {
int offset = -1 - stack_offset;
......
......@@ -70,12 +70,15 @@ class ArchDefaultRegisterConfiguration : public RegisterConfiguration {
#if V8_TARGET_ARCH_IA32
kMaxAllocatableGeneralRegisterCount,
kMaxAllocatableDoubleRegisterCount,
kMaxAllocatableDoubleRegisterCount,
#elif V8_TARGET_ARCH_X87
kMaxAllocatableGeneralRegisterCount,
compiler == TURBOFAN ? 1 : kMaxAllocatableDoubleRegisterCount,
compiler == TURBOFAN ? 1 : kMaxAllocatableDoubleRegisterCount,
#elif V8_TARGET_ARCH_X64
kMaxAllocatableGeneralRegisterCount,
kMaxAllocatableDoubleRegisterCount,
kMaxAllocatableDoubleRegisterCount,
#elif V8_TARGET_ARCH_ARM
FLAG_enable_embedded_constant_pool
? (kMaxAllocatableGeneralRegisterCount - 1)
......@@ -83,21 +86,27 @@ class ArchDefaultRegisterConfiguration : public RegisterConfiguration {
CpuFeatures::IsSupported(VFP32DREGS)
? kMaxAllocatableDoubleRegisterCount
: (ALLOCATABLE_NO_VFP32_DOUBLE_REGISTERS(REGISTER_COUNT) 0),
ALLOCATABLE_NO_VFP32_DOUBLE_REGISTERS(REGISTER_COUNT) 0,
#elif V8_TARGET_ARCH_ARM64
kMaxAllocatableGeneralRegisterCount,
kMaxAllocatableDoubleRegisterCount,
kMaxAllocatableDoubleRegisterCount,
#elif V8_TARGET_ARCH_MIPS
kMaxAllocatableGeneralRegisterCount,
kMaxAllocatableDoubleRegisterCount,
kMaxAllocatableDoubleRegisterCount,
#elif V8_TARGET_ARCH_MIPS64
kMaxAllocatableGeneralRegisterCount,
kMaxAllocatableDoubleRegisterCount,
kMaxAllocatableDoubleRegisterCount,
#elif V8_TARGET_ARCH_PPC
kMaxAllocatableGeneralRegisterCount,
kMaxAllocatableDoubleRegisterCount,
kMaxAllocatableDoubleRegisterCount,
#elif V8_TARGET_ARCH_S390
kMaxAllocatableGeneralRegisterCount,
kMaxAllocatableDoubleRegisterCount,
kMaxAllocatableDoubleRegisterCount,
#else
#error Unsupported target architecture.
#endif
......@@ -136,6 +145,7 @@ const RegisterConfiguration* RegisterConfiguration::Turbofan() {
RegisterConfiguration::RegisterConfiguration(
int num_general_registers, int num_double_registers,
int num_allocatable_general_registers, int num_allocatable_double_registers,
int num_allocatable_aliased_double_registers,
const int* allocatable_general_codes, const int* allocatable_double_codes,
AliasingKind fp_aliasing_kind, const char* const* general_register_names,
const char* const* float_register_names,
......@@ -148,6 +158,8 @@ RegisterConfiguration::RegisterConfiguration(
num_allocatable_general_registers_(num_allocatable_general_registers),
num_allocatable_float_registers_(0),
num_allocatable_double_registers_(num_allocatable_double_registers),
num_allocatable_aliased_double_registers_(
num_allocatable_aliased_double_registers),
num_allocatable_simd128_registers_(0),
allocatable_general_codes_mask_(0),
allocatable_float_codes_mask_(0),
......
......@@ -35,6 +35,7 @@ class RegisterConfiguration {
RegisterConfiguration(int num_general_registers, int num_double_registers,
int num_allocatable_general_registers,
int num_allocatable_double_registers,
int num_allocatable_aliased_double_registers,
const int* allocatable_general_codes,
const int* allocatable_double_codes,
AliasingKind fp_aliasing_kind,
......@@ -56,6 +57,12 @@ class RegisterConfiguration {
int num_allocatable_double_registers() const {
return num_allocatable_double_registers_;
}
// TODO(bbudge): This is a temporary work-around required because our
// register allocator does not yet support the aliasing of single/double
// registers on ARM.
int num_allocatable_aliased_double_registers() const {
return num_allocatable_aliased_double_registers_;
}
int num_allocatable_simd128_registers() const {
return num_allocatable_simd128_registers_;
}
......@@ -135,6 +142,7 @@ class RegisterConfiguration {
int num_allocatable_general_registers_;
int num_allocatable_float_registers_;
int num_allocatable_double_registers_;
int num_allocatable_aliased_double_registers_;
int num_allocatable_simd128_registers_;
int32_t allocatable_general_codes_mask_;
int32_t allocatable_float_codes_mask_;
......
......@@ -83,8 +83,7 @@ class InterpreterState {
const LocationOperand& loc_op = LocationOperand::cast(op);
if (loc_op.IsAnyRegister()) {
if (loc_op.IsFPRegister()) {
rep = kSimpleFPAliasing ? MachineRepresentation::kFloat64
: loc_op.representation();
rep = MachineRepresentation::kFloat64;
}
index = loc_op.register_code();
} else {
......@@ -186,10 +185,7 @@ class ParallelMoveCreator : public HandleAndZoneScope {
case 1:
return MachineRepresentation::kWord64;
case 2:
// TODO(bbudge) Re-enable float operands when GapResolver correctly
// handles FP aliasing.
return kSimpleFPAliasing ? MachineRepresentation::kFloat32
: MachineRepresentation::kFloat64;
return MachineRepresentation::kFloat32;
case 3:
return MachineRepresentation::kFloat64;
case 4:
......@@ -205,13 +201,18 @@ class ParallelMoveCreator : public HandleAndZoneScope {
auto GetRegisterCode = [&conf](MachineRepresentation rep, int index) {
switch (rep) {
case MachineRepresentation::kFloat32:
#if V8_TARGET_ARCH_ARM
// Only even number float registers are used on Arm.
// TODO(bbudge) Eliminate this when FP register aliasing works.
return conf->RegisterConfiguration::GetAllocatableDoubleCode(index) *
2;
#endif
// Fall through on non-Arm targets.
case MachineRepresentation::kFloat64:
return conf->RegisterConfiguration::GetAllocatableDoubleCode(index);
break;
default:
return conf->RegisterConfiguration::GetAllocatableGeneralCode(index);
break;
}
UNREACHABLE();
return static_cast<int>(Register::kCode_no_reg);
......
......@@ -87,16 +87,8 @@ class RegisterPairs : public Pairs {
class Float32RegisterPairs : public Pairs {
public:
Float32RegisterPairs()
: Pairs(
100,
#if V8_TARGET_ARCH_ARM
// TODO(bbudge) Modify wasm linkage to allow use of all float regs.
GetRegConfig()->num_allocatable_double_registers() / 2 - 2,
#else
GetRegConfig()->num_allocatable_double_registers(),
#endif
GetRegConfig()->allocatable_double_codes()) {
}
: Pairs(100, GetRegConfig()->num_allocatable_aliased_double_registers(),
GetRegConfig()->allocatable_double_codes()) {}
};
......@@ -135,10 +127,6 @@ struct Allocator {
// Allocate a floating point register/stack location.
if (fp_offset < fp_count) {
int code = fp_regs[fp_offset++];
#if V8_TARGET_ARCH_ARM
// TODO(bbudge) Modify wasm linkage to allow use of all float regs.
if (type.representation() == MachineRepresentation::kFloat32) code *= 2;
#endif
return LinkageLocation::ForRegister(code, type);
} else {
int offset = -1 - stack_offset;
......
......@@ -67,7 +67,8 @@ RegisterConfiguration* InstructionSequenceTest::config() {
if (!config_) {
config_.reset(new RegisterConfiguration(
num_general_registers_, num_double_registers_, num_general_registers_,
num_double_registers_, allocatable_codes, allocatable_double_codes,
num_double_registers_, num_double_registers_, allocatable_codes,
allocatable_double_codes,
kSimpleFPAliasing ? RegisterConfiguration::OVERLAP
: RegisterConfiguration::COMBINE,
general_register_names_,
......
......@@ -30,8 +30,9 @@ TEST_F(RegisterConfigurationUnitTest, BasicProperties) {
RegisterConfiguration test(
kNumGeneralRegs, kNumDoubleRegs, kNumAllocatableGeneralRegs,
kNumAllocatableDoubleRegs, general_codes, double_codes,
RegisterConfiguration::OVERLAP, nullptr, nullptr, nullptr, nullptr);
kNumAllocatableDoubleRegs, kNumAllocatableDoubleRegs, general_codes,
double_codes, RegisterConfiguration::OVERLAP, nullptr, nullptr, nullptr,
nullptr);
EXPECT_EQ(test.num_general_registers(), kNumGeneralRegs);
EXPECT_EQ(test.num_double_registers(), kNumDoubleRegs);
......@@ -66,8 +67,9 @@ TEST_F(RegisterConfigurationUnitTest, CombineAliasing) {
RegisterConfiguration test(
kNumGeneralRegs, kNumDoubleRegs, kNumAllocatableGeneralRegs,
kNumAllocatableDoubleRegs, general_codes, double_codes,
RegisterConfiguration::COMBINE, nullptr, nullptr, nullptr, nullptr);
kNumAllocatableDoubleRegs, kNumAllocatableDoubleRegs, general_codes,
double_codes, RegisterConfiguration::COMBINE, nullptr, nullptr, nullptr,
nullptr);
// There are 3 allocatable double regs, but only 2 can alias float regs.
EXPECT_EQ(test.num_allocatable_float_registers(), 4);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment