Commit 00f4b33a authored by Andreas Haas's avatar Andreas Haas Committed by Commit Bot

[arm][turbofan] Pass double immediate to vmov as uint64_t

On x86, signalling NaNs get converted to quiet NaNs when they get push
on the stack and popped again. This happens in the code generation for
arm, specifically for the vmov instruction with the immediate parameter.
This CL replaces the vmov function in assembler-arm to take the
immediate as a uint64_t instead of a double, to guarantee that the bit
pattern does not change even if the parameter is a signalling NaN.

BUG=v8:6564

Change-Id: I062559f9a7ba8b0f560628e5c39621ca578c3e7d
Reviewed-on: https://chromium-review.googlesource.com/558964
Commit-Queue: Andreas Haas <ahaas@chromium.org>
Reviewed-by: 's avatarBen Titzer <titzer@chromium.org>
Reviewed-by: 's avatarMartyn Capewell <martyn.capewell@arm.com>
Cr-Commit-Position: refs/heads/master@{#46418}
parent c633282d
......@@ -2677,19 +2677,16 @@ void Assembler::vstm(BlockAddrMode am, Register base, SwVfpRegister first,
0xA*B8 | count);
}
static void DoubleAsTwoUInt32(double d, uint32_t* lo, uint32_t* hi) {
uint64_t i;
memcpy(&i, &d, 8);
static void DoubleAsTwoUInt32(Double d, uint32_t* lo, uint32_t* hi) {
uint64_t i = d.AsUint64();
*lo = i & 0xffffffff;
*hi = i >> 32;
}
// Only works for little endian floating point formats.
// We don't support VFP on the mixed endian floating point platform.
static bool FitsVmovFPImmediate(double d, uint32_t* encoding) {
static bool FitsVmovFPImmediate(Double d, uint32_t* encoding) {
// VMOV can accept an immediate of the form:
//
// +/- m * 2^(-n) where 16 <= m <= 31 and 0 <= n <= 7
......@@ -2735,10 +2732,10 @@ static bool FitsVmovFPImmediate(double d, uint32_t* encoding) {
return true;
}
void Assembler::vmov(const SwVfpRegister dst, float imm) {
uint32_t enc;
if (CpuFeatures::IsSupported(VFPv3) && FitsVmovFPImmediate(imm, &enc)) {
if (CpuFeatures::IsSupported(VFPv3) &&
FitsVmovFPImmediate(Double(imm), &enc)) {
CpuFeatureScope scope(this, VFPv3);
// The float can be encoded in the instruction.
//
......@@ -2755,9 +2752,7 @@ void Assembler::vmov(const SwVfpRegister dst, float imm) {
}
}
void Assembler::vmov(const DwVfpRegister dst,
double imm,
void Assembler::vmov(const DwVfpRegister dst, Double imm,
const Register scratch) {
DCHECK(VfpRegisterIsAvailable(dst));
DCHECK(!scratch.is(ip));
......@@ -2823,7 +2818,6 @@ void Assembler::vmov(const DwVfpRegister dst,
}
}
void Assembler::vmov(const SwVfpRegister dst,
const SwVfpRegister src,
const Condition cond) {
......@@ -5150,7 +5144,7 @@ void Assembler::ConstantPoolAddEntry(int position, RelocInfo::Mode rmode,
}
}
void Assembler::ConstantPoolAddEntry(int position, double value) {
void Assembler::ConstantPoolAddEntry(int position, Double value) {
DCHECK(pending_64_bit_constants_.size() < kMaxNumPending64Constants);
if (pending_64_bit_constants_.empty()) {
first_const_pool_64_use_ = position;
......
......@@ -45,6 +45,7 @@
#include "src/arm/constants-arm.h"
#include "src/assembler.h"
#include "src/double.h"
namespace v8 {
namespace internal {
......@@ -1166,7 +1167,7 @@ class Assembler : public AssemblerBase {
void vmov(const SwVfpRegister dst, float imm);
void vmov(const DwVfpRegister dst,
double imm,
Double imm,
const Register scratch = no_reg);
void vmov(const SwVfpRegister dst,
const SwVfpRegister src,
......@@ -1857,7 +1858,7 @@ class Assembler : public AssemblerBase {
void RecordRelocInfo(RelocInfo::Mode rmode, intptr_t data = 0);
void ConstantPoolAddEntry(int position, RelocInfo::Mode rmode,
intptr_t value);
void ConstantPoolAddEntry(int position, double value);
void ConstantPoolAddEntry(int position, Double value);
friend class RelocInfo;
friend class CodePatcher;
......
......@@ -12,6 +12,7 @@
#include "src/bootstrapper.h"
#include "src/codegen.h"
#include "src/counters.h"
#include "src/double.h"
#include "src/heap/heap-inl.h"
#include "src/ic/handler-compiler.h"
#include "src/ic/ic.h"
......@@ -708,7 +709,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ mov(exponent, scratch);
}
__ vmov(double_scratch, double_base); // Back up base.
__ vmov(double_result, 1.0, scratch2);
__ vmov(double_result, Double(1.0), scratch2);
// Get absolute value of exponent.
__ cmp(scratch, Operand::Zero());
......@@ -723,7 +724,7 @@ void MathPowStub::Generate(MacroAssembler* masm) {
__ cmp(exponent, Operand::Zero());
__ b(ge, &done);
__ vmov(double_scratch, 1.0, scratch);
__ vmov(double_scratch, Double(1.0), scratch);
__ vdiv(double_result, double_scratch, double_result);
// Test whether result is zero. Bail out to check for subnormal result.
// Due to subnormals, x^-y == (1/x)^y does not hold in all cases.
......@@ -980,7 +981,7 @@ void JSEntryStub::Generate(MacroAssembler* masm) {
// Save callee-saved vfp registers.
__ vstm(db_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg);
// Set up the reserved register for 0.0.
__ vmov(kDoubleRegZero, 0.0);
__ vmov(kDoubleRegZero, Double(0.0));
// Get address of argv, see stm above.
// r0: code entry
......
......@@ -14,6 +14,7 @@
#include "src/codegen.h"
#include "src/counters.h"
#include "src/debug/debug.h"
#include "src/double.h"
#include "src/objects-inl.h"
#include "src/register-configuration.h"
#include "src/runtime/runtime.h"
......@@ -992,15 +993,13 @@ void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
vmrs(fpscr_flags, cond);
}
void MacroAssembler::Vmov(const DwVfpRegister dst,
const double imm,
void MacroAssembler::Vmov(const DwVfpRegister dst, Double imm,
const Register scratch) {
int64_t imm_bits = bit_cast<int64_t>(imm);
uint64_t imm_bits = imm.AsUint64();
// Handle special values first.
if (imm_bits == bit_cast<int64_t>(0.0)) {
if (imm_bits == Double(0.0).AsUint64()) {
vmov(dst, kDoubleRegZero);
} else if (imm_bits == bit_cast<int64_t>(-0.0)) {
} else if (imm_bits == Double(-0.0).AsUint64()) {
vneg(dst, kDoubleRegZero);
} else {
vmov(dst, imm, scratch);
......@@ -3379,7 +3378,7 @@ void MacroAssembler::ClampDoubleToUint8(Register result_reg,
Label done;
// Handle inputs >= 255 (including +infinity).
Vmov(double_scratch, 255.0, result_reg);
Vmov(double_scratch, Double(255.0), result_reg);
mov(result_reg, Operand(255));
VFPCompareAndSetFlags(input_reg, double_scratch);
b(ge, &done);
......
......@@ -543,8 +543,7 @@ class MacroAssembler: public Assembler {
const Register fpscr_flags,
const Condition cond = al);
void Vmov(const DwVfpRegister dst,
const double imm,
void Vmov(const DwVfpRegister dst, Double imm,
const Register scratch = no_reg);
void VmovHigh(Register dst, DwVfpRegister src);
......
......@@ -3225,7 +3225,6 @@ void Simulator::DecodeType7(Instruction* instr) {
void Simulator::DecodeTypeVFP(Instruction* instr) {
DCHECK((instr->TypeValue() == 7) && (instr->Bit(24) == 0x0) );
DCHECK(instr->Bits(11, 9) == 0x5);
// Obtain single precision register codes.
int m = instr->VFPMRegValue(kSinglePrecision);
int d = instr->VFPDRegValue(kSinglePrecision);
......
......@@ -40,6 +40,7 @@
#include "src/allocation.h"
#include "src/builtins/builtins.h"
#include "src/deoptimize-reason.h"
#include "src/double.h"
#include "src/globals.h"
#include "src/label.h"
#include "src/log.h"
......@@ -1150,8 +1151,10 @@ class ConstantPoolEntry {
: position_(position),
merged_index_(sharing_ok ? SHARING_ALLOWED : SHARING_PROHIBITED),
value_(value) {}
ConstantPoolEntry(int position, double value)
: position_(position), merged_index_(SHARING_ALLOWED), value64_(value) {}
ConstantPoolEntry(int position, Double value)
: position_(position),
merged_index_(SHARING_ALLOWED),
value64_(value.AsUint64()) {}
int position() const { return position_; }
bool sharing_ok() const { return merged_index_ != SHARING_PROHIBITED; }
......@@ -1174,7 +1177,7 @@ class ConstantPoolEntry {
merged_index_ = offset;
}
intptr_t value() const { return value_; }
uint64_t value64() const { return bit_cast<uint64_t>(value64_); }
uint64_t value64() const { return value64_; }
enum Type { INTPTR, DOUBLE, NUMBER_OF_TYPES };
......@@ -1189,7 +1192,7 @@ class ConstantPoolEntry {
int merged_index_;
union {
intptr_t value_;
double value64_;
uint64_t value64_;
};
enum { SHARING_PROHIBITED = -2, SHARING_ALLOWED = -1 };
};
......@@ -1211,7 +1214,7 @@ class ConstantPoolBuilder BASE_EMBEDDED {
// Add double constant to the embedded constant pool
ConstantPoolEntry::Access AddEntry(int position, double value) {
ConstantPoolEntry entry(position, value);
ConstantPoolEntry entry(position, Double(value));
return AddEntry(entry, ConstantPoolEntry::DOUBLE);
}
......
......@@ -11,6 +11,7 @@
#include "src/compiler/gap-resolver.h"
#include "src/compiler/node-matchers.h"
#include "src/compiler/osr.h"
#include "src/double.h"
#include "src/heap/heap-inl.h"
namespace v8 {
......@@ -159,7 +160,7 @@ class OutOfLineLoadDouble final : public OutOfLineCode {
void Generate() final {
// Compute sqrt(-1.0), which results in a quiet double-precision NaN.
__ vmov(result_, -1.0);
__ vmov(result_, Double(-1.0));
__ vsqrt(result_, result_);
}
......@@ -2983,7 +2984,7 @@ void CodeGenerator::AssembleMove(InstructionOperand* source,
DwVfpRegister dst = destination->IsFPRegister()
? g.ToDoubleRegister(destination)
: kScratchDoubleReg;
__ vmov(dst, src.ToFloat64(), kScratchReg);
__ vmov(dst, Double(src.ToFloat64AsInt()), kScratchReg);
if (destination->IsDoubleStackSlot()) {
__ vstr(dst, g.ToMemOperand(destination));
}
......
......@@ -1083,6 +1083,7 @@ class V8_EXPORT_PRIVATE Constant final {
return bit_cast<double>(value_);
}
// TODO(ahaas) Use the Double class instead of uint64_t.
uint64_t ToFloat64AsInt() const {
if (type() == kInt32) return ToInt32();
DCHECK_EQ(kFloat64, type());
......
......@@ -31,6 +31,7 @@
#include "src/assembler-inl.h"
#include "src/base/utils/random-number-generator.h"
#include "src/disassembler.h"
#include "src/double.h"
#include "src/factory.h"
#include "src/macro-assembler.h"
#include "src/ostreams.h"
......@@ -274,16 +275,16 @@ TEST(4) {
__ vstr(s1, r4, offsetof(T, y));
// Move a literal into a register that can be encoded in the instruction.
__ vmov(d4, 1.0);
__ vmov(d4, Double(1.0));
__ vstr(d4, r4, offsetof(T, e));
// Move a literal into a register that requires 64 bits to encode.
// 0x3ff0000010000000 = 1.000000059604644775390625
__ vmov(d4, 1.000000059604644775390625);
__ vmov(d4, Double(1.000000059604644775390625));
__ vstr(d4, r4, offsetof(T, d));
// Convert from floating point to integer.
__ vmov(d4, 2.0);
__ vmov(d4, Double(2.0));
__ vcvt_s32_f64(s1, d4);
__ vstr(s1, r4, offsetof(T, i));
......@@ -458,7 +459,7 @@ static void TestRoundingMode(VCVTTypes types,
__ vmsr(r2);
// Load value, convert, and move back result to r0 if everything went well.
__ vmov(d1, value);
__ vmov(d1, Double(value));
switch (types) {
case s32_f64:
__ vcvt_s32_f64(s0, d1, kFPSCRRounding);
......@@ -1092,8 +1093,8 @@ TEST(13) {
__ vstm(ia_w, r4, d29, d31);
// Move constants into d20, d21, d22 and store into i, j, k.
__ vmov(d20, 14.7610017472335499);
__ vmov(d21, 16.0);
__ vmov(d20, Double(14.7610017472335499));
__ vmov(d21, Double(16.0));
__ mov(r1, Operand(372106121));
__ mov(r2, Operand(1079146608));
__ vmov(d22, VmovIndexLo, r1);
......@@ -1379,12 +1380,12 @@ TEST(15) {
// ARM core register to scalar.
__ mov(r4, Operand(0xfffffff8));
__ vmov(d0, 0);
__ vmov(d0, Double(0.0));
__ vmov(NeonS8, d0, 1, r4);
__ vmov(NeonS16, d0, 1, r4);
__ vmov(NeonS32, d0, 1, r4);
__ vstr(d0, r0, offsetof(T, vmov_to_scalar1));
__ vmov(d0, 0);
__ vmov(d0, Double(0.0));
__ vmov(NeonS8, d0, 3, r4);
__ vmov(NeonS16, d0, 3, r4);
__ vstr(d0, r0, offsetof(T, vmov_to_scalar2));
......@@ -3236,8 +3237,8 @@ TEST(ARMv8_vsel) {
__ vsel(vc, s0, s1, s2);
__ vstr(s0, r1, offsetof(ResultsF32, vselvc_));
__ vmov(d1, kResultPass);
__ vmov(d2, kResultFail);
__ vmov(d1, Double(kResultPass));
__ vmov(d2, Double(kResultFail));
__ vsel(eq, d0, d1, d2);
__ vstr(d0, r2, offsetof(ResultsF64, vseleq_));
......
......@@ -32,6 +32,7 @@
#include "src/debug/debug.h"
#include "src/disasm.h"
#include "src/disassembler.h"
#include "src/double.h"
#include "src/macro-assembler.h"
#include "src/objects-inl.h"
#include "src/v8.h"
......@@ -610,14 +611,14 @@ TEST(Vfp) {
COMPARE(vsqrt(s2, s3, ne),
"1eb11ae1 vsqrtne.f32 s2, s3");
COMPARE(vmov(d0, 1.0),
COMPARE(vmov(d0, Double(1.0)),
"eeb70b00 vmov.f64 d0, #1");
COMPARE(vmov(d2, -13.0),
COMPARE(vmov(d2, Double(-13.0)),
"eeba2b0a vmov.f64 d2, #-13");
COMPARE(vmov(s1, -1.0),
COMPARE(vmov(s1, -1.0f),
"eeff0a00 vmov.f32 s1, #-1");
COMPARE(vmov(s3, 13.0),
COMPARE(vmov(s3, 13.0f),
"eef21a0a vmov.f32 s3, #13");
COMPARE(vmov(d0, VmovIndexLo, r0),
......@@ -776,7 +777,7 @@ TEST(Vfp) {
COMPARE(vsqrt(d16, d17),
"eef10be1 vsqrt.f64 d16, d17");
COMPARE(vmov(d30, 16.0),
COMPARE(vmov(d30, Double(16.0)),
"eef3eb00 vmov.f64 d30, #16");
COMPARE(vmov(d31, VmovIndexLo, r7),
......
......@@ -1321,9 +1321,6 @@ WASM_EXEC_TEST(I64ReinterpretF64) {
}
}
// Do not run this test in a simulator because of signalling NaN issues on ia32.
#ifndef USE_SIMULATOR
WASM_EXEC_TEST(SignallingNanSurvivesI64ReinterpretF64) {
REQUIRE(I64ReinterpretF64);
WasmRunner<int64_t> r(execution_mode);
......@@ -1333,7 +1330,6 @@ WASM_EXEC_TEST(SignallingNanSurvivesI64ReinterpretF64) {
// This is a signalling nan.
CHECK_EQ(0x7ff4000000000000, r.Call());
}
#endif
WASM_EXEC_TEST(F64ReinterpretI64) {
REQUIRE(F64ReinterpretI64);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment